From efe1135d2841d219a88a7f3c97e399d741bf1137 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 9 Feb 2024 16:55:30 +0100 Subject: [PATCH 001/598] Update Reply protocol definition and codec (#717) * Update Reply protocol definition and codec * Make consolidation a flag in Query/Reply * Fix wrong Consolidation cast in codec * Apply Reply changes to routing * Fix shared-memory feature * Fix stats * Bump Zenoh Protocol Version * Add query/reply ok(put|del)/err() tests --- commons/zenoh-codec/src/zenoh/mod.rs | 4 +- commons/zenoh-codec/src/zenoh/query.rs | 70 +++++----- commons/zenoh-codec/src/zenoh/reply.rs | 141 +++----------------- commons/zenoh-codec/tests/codec.rs | 2 +- commons/zenoh-protocol/src/lib.rs | 2 +- commons/zenoh-protocol/src/zenoh/mod.rs | 10 +- commons/zenoh-protocol/src/zenoh/query.rs | 25 ++-- commons/zenoh-protocol/src/zenoh/reply.rs | 90 +++---------- io/zenoh-transport/src/common/stats.rs | 8 ++ io/zenoh-transport/src/shm.rs | 17 +-- zenoh/src/net/routing/dispatcher/pubsub.rs | 13 +- zenoh/src/net/routing/dispatcher/queries.rs | 54 +++++--- zenoh/src/queryable.rs | 81 ++++++----- zenoh/src/session.rs | 74 +++++++--- zenoh/tests/session.rs | 71 +++++++++- 15 files changed, 329 insertions(+), 333 deletions(-) diff --git a/commons/zenoh-codec/src/zenoh/mod.rs b/commons/zenoh-codec/src/zenoh/mod.rs index 2e3ea48be7..d59add9d63 100644 --- a/commons/zenoh-codec/src/zenoh/mod.rs +++ b/commons/zenoh-codec/src/zenoh/mod.rs @@ -121,8 +121,8 @@ where fn write(self, writer: &mut W, x: &ResponseBody) -> Self::Output { match x { ResponseBody::Reply(b) => self.write(&mut *writer, b), - ResponseBody::Err(b) => self.write(&mut *writer, b), ResponseBody::Ack(b) => self.write(&mut *writer, b), + ResponseBody::Err(b) => self.write(&mut *writer, b), ResponseBody::Put(b) => self.write(&mut *writer, b), } } @@ -140,8 +140,8 @@ where let codec = Zenoh080Header::new(header); let body = match imsg::mid(codec.header) { id::REPLY => ResponseBody::Reply(codec.read(&mut *reader)?), - id::ERR => ResponseBody::Err(codec.read(&mut *reader)?), id::ACK => ResponseBody::Ack(codec.read(&mut *reader)?), + id::ERR => ResponseBody::Err(codec.read(&mut *reader)?), id::PUT => ResponseBody::Put(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; diff --git a/commons/zenoh-codec/src/zenoh/query.rs b/commons/zenoh-codec/src/zenoh/query.rs index 09b01b2266..cb0506e474 100644 --- a/commons/zenoh-codec/src/zenoh/query.rs +++ b/commons/zenoh-codec/src/zenoh/query.rs @@ -22,48 +22,46 @@ use zenoh_protocol::{ common::{iext, imsg}, zenoh::{ id, - query::{ext, flag, Query}, + query::{ext, flag, Consolidation, Query}, }, }; -// Extension Consolidation -impl WCodec<(ext::ConsolidationType, bool), &mut W> for Zenoh080 +// Consolidation +impl WCodec for Zenoh080 where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: (ext::ConsolidationType, bool)) -> Self::Output { - let (x, more) = x; + fn write(self, writer: &mut W, x: Consolidation) -> Self::Output { let v: u64 = match x { - ext::ConsolidationType::Auto => 0, - ext::ConsolidationType::None => 1, - ext::ConsolidationType::Monotonic => 2, - ext::ConsolidationType::Latest => 3, - ext::ConsolidationType::Unique => 4, + Consolidation::Auto => 0, + Consolidation::None => 1, + Consolidation::Monotonic => 2, + Consolidation::Latest => 3, + Consolidation::Unique => 4, }; - let v = ext::Consolidation::new(v); - self.write(&mut *writer, (&v, more)) + self.write(&mut *writer, v) } } -impl RCodec<(ext::ConsolidationType, bool), &mut R> for Zenoh080Header +impl RCodec for Zenoh080 where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result<(ext::ConsolidationType, bool), Self::Error> { - let (ext, more): (ext::Consolidation, bool) = self.read(&mut *reader)?; - let c = match ext.value { - 0 => ext::ConsolidationType::Auto, - 1 => ext::ConsolidationType::None, - 2 => ext::ConsolidationType::Monotonic, - 3 => ext::ConsolidationType::Latest, - 4 => ext::ConsolidationType::Unique, - _ => return Err(DidntRead), + fn read(self, reader: &mut R) -> Result { + let v: u64 = self.read(&mut *reader)?; + let c = match v { + 0 => Consolidation::Auto, + 1 => Consolidation::None, + 2 => Consolidation::Monotonic, + 3 => Consolidation::Latest, + 4 => Consolidation::Unique, + _ => Consolidation::Auto, // Fallback on Auto if Consolidation is unknown }; - Ok((c, more)) + Ok(c) } } @@ -75,9 +73,9 @@ where fn write(self, writer: &mut W, x: &Query) -> Self::Output { let Query { + consolidation, parameters, ext_sinfo, - ext_consolidation, ext_body, ext_attachment, ext_unknown, @@ -85,11 +83,13 @@ where // Header let mut header = id::QUERY; + if consolidation != &Consolidation::default() { + header |= flag::C; + } if !parameters.is_empty() { header |= flag::P; } let mut n_exts = (ext_sinfo.is_some() as u8) - + ((ext_consolidation != &ext::ConsolidationType::default()) as u8) + (ext_body.is_some() as u8) + (ext_attachment.is_some() as u8) + (ext_unknown.len() as u8); @@ -99,6 +99,9 @@ where self.write(&mut *writer, header)?; // Body + if consolidation != &Consolidation::default() { + self.write(&mut *writer, *consolidation)?; + } if !parameters.is_empty() { self.write(&mut *writer, parameters)?; } @@ -108,10 +111,6 @@ where n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } - if ext_consolidation != &ext::ConsolidationType::default() { - n_exts -= 1; - self.write(&mut *writer, (*ext_consolidation, n_exts != 0))?; - } if let Some(body) = ext_body.as_ref() { n_exts -= 1; self.write(&mut *writer, (body, n_exts != 0))?; @@ -154,6 +153,11 @@ where } // Body + let mut consolidation = Consolidation::default(); + if imsg::has_flag(self.header, flag::C) { + consolidation = self.codec.read(&mut *reader)?; + } + let mut parameters = String::new(); if imsg::has_flag(self.header, flag::P) { parameters = self.codec.read(&mut *reader)?; @@ -161,7 +165,6 @@ where // Extensions let mut ext_sinfo: Option = None; - let mut ext_consolidation = ext::ConsolidationType::default(); let mut ext_body: Option = None; let mut ext_attachment: Option = None; let mut ext_unknown = Vec::new(); @@ -176,11 +179,6 @@ where ext_sinfo = Some(s); has_ext = ext; } - ext::Consolidation::ID => { - let (c, ext): (ext::ConsolidationType, bool) = eodec.read(&mut *reader)?; - ext_consolidation = c; - has_ext = ext; - } ext::QueryBodyType::SID | ext::QueryBodyType::VID => { let (s, ext): (ext::QueryBodyType, bool) = eodec.read(&mut *reader)?; ext_body = Some(s); @@ -200,9 +198,9 @@ where } Ok(Query { + consolidation, parameters, ext_sinfo, - ext_consolidation, ext_body, ext_attachment, ext_unknown, diff --git a/commons/zenoh-codec/src/zenoh/reply.rs b/commons/zenoh-codec/src/zenoh/reply.rs index d98c72b341..d54e98cc5e 100644 --- a/commons/zenoh-codec/src/zenoh/reply.rs +++ b/commons/zenoh-codec/src/zenoh/reply.rs @@ -11,23 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(not(feature = "shared-memory"))] -use crate::Zenoh080Bounded; -#[cfg(feature = "shared-memory")] -use crate::Zenoh080Sliced; use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::vec::Vec; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, - ZBuf, }; use zenoh_protocol::{ - common::{iext, imsg}, - core::Encoding, + common::imsg, zenoh::{ id, - reply::{ext, flag, Reply}, + query::Consolidation, + reply::{flag, Reply, ReplyBody}, }, }; @@ -39,81 +34,35 @@ where fn write(self, writer: &mut W, x: &Reply) -> Self::Output { let Reply { - timestamp, - encoding, - ext_sinfo, - ext_consolidation, - #[cfg(feature = "shared-memory")] - ext_shm, - ext_attachment, + consolidation, ext_unknown, payload, } = x; // Header let mut header = id::REPLY; - if timestamp.is_some() { - header |= flag::T; - } - if encoding != &Encoding::default() { - header |= flag::E; - } - let mut n_exts = (ext_sinfo.is_some()) as u8 - + ((ext_consolidation != &ext::ConsolidationType::default()) as u8) - + (ext_attachment.is_some()) as u8 - + (ext_unknown.len() as u8); - #[cfg(feature = "shared-memory")] - { - n_exts += ext_shm.is_some() as u8; + if consolidation != &Consolidation::default() { + header |= flag::C; } + let mut n_exts = ext_unknown.len() as u8; if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - if let Some(ts) = timestamp.as_ref() { - self.write(&mut *writer, ts)?; - } - if encoding != &Encoding::default() { - self.write(&mut *writer, encoding)?; + if consolidation != &Consolidation::default() { + self.write(&mut *writer, *consolidation)?; } // Extensions - if let Some(sinfo) = ext_sinfo.as_ref() { - n_exts -= 1; - self.write(&mut *writer, (sinfo, n_exts != 0))?; - } - if ext_consolidation != &ext::ConsolidationType::default() { - n_exts -= 1; - self.write(&mut *writer, (*ext_consolidation, n_exts != 0))?; - } - #[cfg(feature = "shared-memory")] - if let Some(eshm) = ext_shm.as_ref() { - n_exts -= 1; - self.write(&mut *writer, (eshm, n_exts != 0))?; - } - if let Some(att) = ext_attachment.as_ref() { - n_exts -= 1; - self.write(&mut *writer, (att, n_exts != 0))?; - } for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; } // Payload - #[cfg(feature = "shared-memory")] - { - let codec = Zenoh080Sliced::::new(ext_shm.is_some()); - codec.write(&mut *writer, payload)?; - } - - #[cfg(not(feature = "shared-memory"))] - { - let bodec = Zenoh080Bounded::::new(); - bodec.write(&mut *writer, payload)?; - } + self.write(&mut *writer, payload)?; Ok(()) } @@ -144,81 +93,27 @@ where } // Body - let mut timestamp: Option = None; - if imsg::has_flag(self.header, flag::T) { - timestamp = Some(self.codec.read(&mut *reader)?); - } - - let mut encoding = Encoding::default(); - if imsg::has_flag(self.header, flag::E) { - encoding = self.codec.read(&mut *reader)?; + let mut consolidation = Consolidation::default(); + if imsg::has_flag(self.header, flag::C) { + consolidation = self.codec.read(&mut *reader)?; } // Extensions - let mut ext_sinfo: Option = None; - let mut ext_consolidation = ext::ConsolidationType::default(); - #[cfg(feature = "shared-memory")] - let mut ext_shm: Option = None; - let mut ext_attachment: Option = None; let mut ext_unknown = Vec::new(); let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { let ext: u8 = self.codec.read(&mut *reader)?; - let eodec = Zenoh080Header::new(ext); - match iext::eid(ext) { - ext::SourceInfo::ID => { - let (s, ext): (ext::SourceInfoType, bool) = eodec.read(&mut *reader)?; - ext_sinfo = Some(s); - has_ext = ext; - } - ext::Consolidation::ID => { - let (c, ext): (ext::ConsolidationType, bool) = eodec.read(&mut *reader)?; - ext_consolidation = c; - has_ext = ext; - } - #[cfg(feature = "shared-memory")] - ext::Shm::ID => { - let (s, ext): (ext::ShmType, bool) = eodec.read(&mut *reader)?; - ext_shm = Some(s); - has_ext = ext; - } - ext::Attachment::ID => { - let (a, ext): (ext::AttachmentType, bool) = eodec.read(&mut *reader)?; - ext_attachment = Some(a); - has_ext = ext; - } - _ => { - let (u, ext) = extension::read(reader, "Reply", ext)?; - ext_unknown.push(u); - has_ext = ext; - } - } + let (u, ext) = extension::read(reader, "Reply", ext)?; + ext_unknown.push(u); + has_ext = ext; } // Payload - let payload: ZBuf = { - #[cfg(feature = "shared-memory")] - { - let codec = Zenoh080Sliced::::new(ext_shm.is_some()); - codec.read(&mut *reader)? - } - - #[cfg(not(feature = "shared-memory"))] - { - let bodec = Zenoh080Bounded::::new(); - bodec.read(&mut *reader)? - } - }; + let payload: ReplyBody = self.codec.read(&mut *reader)?; Ok(Reply { - timestamp, - encoding, - ext_sinfo, - ext_consolidation, - #[cfg(feature = "shared-memory")] - ext_shm, - ext_attachment, + consolidation, ext_unknown, payload, }) diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index 3fdb95e1b5..28201c1977 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -556,7 +556,7 @@ fn codec_network() { run!(NetworkMessage, NetworkMessage::rand()); } -// Zenoh new +// Zenoh #[test] fn codec_put() { run!(zenoh::Put, zenoh::Put::rand()); diff --git a/commons/zenoh-protocol/src/lib.rs b/commons/zenoh-protocol/src/lib.rs index 2e1a2fa7cf..8d26f52ed9 100644 --- a/commons/zenoh-protocol/src/lib.rs +++ b/commons/zenoh-protocol/src/lib.rs @@ -28,7 +28,7 @@ pub mod transport; pub mod zenoh; // Zenoh version -pub const VERSION: u8 = 0x08; +pub const VERSION: u8 = 0x09; // Zenoh protocol uses the following conventions for message definition and representation. // diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index e67576e673..a23eaa9b21 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -95,10 +95,11 @@ impl RequestBody { let mut rng = rand::thread_rng(); - match rng.gen_range(0..3) { + match rng.gen_range(0..4) { 0 => RequestBody::Query(Query::rand()), 1 => RequestBody::Put(Put::rand()), 2 => RequestBody::Del(Del::rand()), + 3 => RequestBody::Pull(Pull::rand()), _ => unreachable!(), } } @@ -126,8 +127,8 @@ impl From for RequestBody { #[derive(Debug, Clone, PartialEq, Eq)] pub enum ResponseBody { Reply(Reply), - Err(Err), Ack(Ack), + Err(Err), Put(Put), } @@ -135,13 +136,12 @@ impl ResponseBody { #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; - let mut rng = rand::thread_rng(); match rng.gen_range(0..4) { 0 => ResponseBody::Reply(Reply::rand()), - 1 => ResponseBody::Err(Err::rand()), - 2 => ResponseBody::Ack(Ack::rand()), + 1 => ResponseBody::Ack(Ack::rand()), + 2 => ResponseBody::Err(Err::rand()), 3 => ResponseBody::Put(Put::rand()), _ => unreachable!(), } diff --git a/commons/zenoh-protocol/src/zenoh/query.rs b/commons/zenoh-protocol/src/zenoh/query.rs index 7432840492..17dfa23df8 100644 --- a/commons/zenoh-protocol/src/zenoh/query.rs +++ b/commons/zenoh-protocol/src/zenoh/query.rs @@ -69,50 +69,45 @@ impl From for Consolidation { /// /// ```text /// Flags: +/// - C: Consolidation if C==1 then consolidation is present /// - P: Parameters If P==1 then the parameters are present -/// - X: Reserved /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|X|P| QUERY | +/// |Z|P|C| QUERY | /// +-+-+-+---------+ +/// % consolidation % if C==1 +/// +---------------+ /// ~ ps: ~ if P==1 /// +---------------+ /// ~ [qry_exts] ~ if Z==1 /// +---------------+ /// ``` pub mod flag { - pub const P: u8 = 1 << 5; // 0x20 Parameters if P==1 then the parameters are present - // pub const X: u8 = 1 << 6; // 0x40 Reserved + pub const C: u8 = 1 << 5; // 0x20 Consolidation if C==1 then consolidation is present + pub const P: u8 = 1 << 6; // 0x40 Parameters if P==1 then the parameters are present pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Query { + pub consolidation: Consolidation, pub parameters: String, pub ext_sinfo: Option, - pub ext_consolidation: Consolidation, pub ext_body: Option, pub ext_attachment: Option, pub ext_unknown: Vec, } pub mod ext { - use crate::{ - common::{ZExtZ64, ZExtZBuf}, - zextz64, zextzbuf, - }; + use crate::{common::ZExtZBuf, zextzbuf}; /// # SourceInfo extension /// Used to carry additional information about the source of data pub type SourceInfo = zextzbuf!(0x1, false); pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; - /// # Consolidation extension - pub type Consolidation = zextz64!(0x2, true); - pub type ConsolidationType = crate::zenoh::query::Consolidation; - /// # QueryBody extension /// Used to carry a body attached to the query /// Shared Memory extension is automatically defined by ValueType extension if @@ -137,6 +132,7 @@ impl Query { const MIN: usize = 2; const MAX: usize = 16; + let consolidation = Consolidation::rand(); let parameters: String = if rng.gen_bool(0.5) { let len = rng.gen_range(MIN..MAX); Alphanumeric.sample_string(&mut rng, len) @@ -144,7 +140,6 @@ impl Query { String::new() }; let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); - let ext_consolidation = Consolidation::rand(); let ext_body = rng.gen_bool(0.5).then_some(ext::QueryBodyType::rand()); let ext_attachment = rng.gen_bool(0.5).then_some(ext::AttachmentType::rand()); let mut ext_unknown = Vec::new(); @@ -156,9 +151,9 @@ impl Query { } Self { + consolidation, parameters, ext_sinfo, - ext_consolidation, ext_body, ext_attachment, ext_unknown, diff --git a/commons/zenoh-protocol/src/zenoh/reply.rs b/commons/zenoh-protocol/src/zenoh/reply.rs index 2395e1e9b2..7cbab4ca0a 100644 --- a/commons/zenoh-protocol/src/zenoh/reply.rs +++ b/commons/zenoh-protocol/src/zenoh/reply.rs @@ -11,115 +11,61 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::ZExtUnknown, core::Encoding}; +use crate::{ + common::ZExtUnknown, + zenoh::{query::Consolidation, PushBody}, +}; use alloc::vec::Vec; -use uhlc::Timestamp; -use zenoh_buffers::ZBuf; /// # Reply message /// /// ```text /// Flags: -/// - T: Timestamp If T==1 then the timestamp if present -/// - E: Encoding If E==1 then the encoding is present +/// - C: Consolidation if C==1 then consolidation is present +/// - X: Reserved /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|E|T| REPLY | +/// |Z|X|C| REPLY | /// +-+-+-+---------+ -/// ~ ts: ~ if T==1 -/// +---------------+ -/// ~ encoding ~ if E==1 +/// % consolidation % if C==1 /// +---------------+ /// ~ [repl_exts] ~ if Z==1 /// +---------------+ -/// ~ pl: ~ -- Payload +/// ~ ReplyBody ~ -- Payload /// +---------------+ /// ``` pub mod flag { - pub const T: u8 = 1 << 5; // 0x20 Timestamp if T==0 then the timestamp if present - pub const E: u8 = 1 << 6; // 0x40 Encoding if E==1 then the encoding is present + pub const C: u8 = 1 << 5; // 0x20 Consolidation if C==1 then consolidation is present + // pub const X: u8 = 1 << 6; // 0x40 Reserved pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Reply { - pub timestamp: Option, - pub encoding: Encoding, - pub ext_sinfo: Option, - pub ext_consolidation: ext::ConsolidationType, - #[cfg(feature = "shared-memory")] - pub ext_shm: Option, - pub ext_attachment: Option, + pub consolidation: Consolidation, pub ext_unknown: Vec, - pub payload: ZBuf, + pub payload: ReplyBody, } -pub mod ext { - #[cfg(feature = "shared-memory")] - use crate::{common::ZExtUnit, zextunit}; - use crate::{ - common::{ZExtZ64, ZExtZBuf}, - zextz64, zextzbuf, - }; - - /// # SourceInfo extension - /// Used to carry additional information about the source of data - pub type SourceInfo = zextzbuf!(0x1, false); - pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; - - /// # Consolidation extension - pub type Consolidation = zextz64!(0x2, true); - pub type ConsolidationType = crate::zenoh::query::ext::ConsolidationType; - - /// # Shared Memory extension - /// Used to carry additional information about the shared-memory layour of data - #[cfg(feature = "shared-memory")] - pub type Shm = zextunit!(0x3, true); - #[cfg(feature = "shared-memory")] - pub type ShmType = crate::zenoh::ext::ShmType<{ Shm::ID }>; - - /// # User attachment - pub type Attachment = zextzbuf!(0x4, false); - pub type AttachmentType = crate::zenoh::ext::AttachmentType<{ Attachment::ID }>; -} +pub type ReplyBody = PushBody; impl Reply { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::{common::iext, core::ZenohId, zenoh::Consolidation}; use rand::Rng; let mut rng = rand::thread_rng(); - let timestamp = rng.gen_bool(0.5).then_some({ - let time = uhlc::NTP64(rng.gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); - Timestamp::new(time, id) - }); - let encoding = Encoding::rand(); - let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); - let ext_consolidation = Consolidation::rand(); - #[cfg(feature = "shared-memory")] - let ext_shm = rng.gen_bool(0.5).then_some(ext::ShmType::rand()); - let ext_attachment = rng.gen_bool(0.5).then_some(ext::AttachmentType::rand()); + let payload = ReplyBody::rand(); + let consolidation = Consolidation::rand(); let mut ext_unknown = Vec::new(); for _ in 0..rng.gen_range(0..4) { - ext_unknown.push(ZExtUnknown::rand2( - iext::mid(ext::Attachment::ID) + 1, - false, - )); + ext_unknown.push(ZExtUnknown::rand2(1, false)); } - let payload = ZBuf::rand(rng.gen_range(1..=64)); Self { - timestamp, - encoding, - ext_sinfo, - ext_consolidation, - #[cfg(feature = "shared-memory")] - ext_shm, - ext_attachment, + consolidation, ext_unknown, payload, } diff --git a/io/zenoh-transport/src/common/stats.rs b/io/zenoh-transport/src/common/stats.rs index f095a58273..aaf39641c0 100644 --- a/io/zenoh-transport/src/common/stats.rs +++ b/io/zenoh-transport/src/common/stats.rs @@ -208,6 +208,10 @@ stats_struct! { # TYPE "counter" pub tx_z_del_msgs DiscriminatedStats, + # HELP "Counter of received bytes in zenoh del message attachments." + # TYPE "counter" + pub tx_z_del_pl_bytes DiscriminatedStats, + # HELP "Counter of sent zenoh query messages." # TYPE "counter" pub tx_z_query_msgs DiscriminatedStats, @@ -252,6 +256,10 @@ stats_struct! { # TYPE "counter" pub rx_z_del_msgs DiscriminatedStats, + # HELP "Counter of received bytes in zenoh del message attachments." + # TYPE "counter" + pub rx_z_del_pl_bytes DiscriminatedStats, + # HELP "Counter of received zenoh query messages." # TYPE "counter" pub rx_z_query_msgs DiscriminatedStats, diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 04a8f502c4..8b0e93f494 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -21,6 +21,7 @@ use zenoh_protocol::{ err::{ext::ErrBodyType, Err}, ext::ShmType, query::{ext::QueryBodyType, Query}, + reply::ReplyBody, PushBody, Put, Reply, RequestBody, ResponseBody, }, }; @@ -105,17 +106,17 @@ impl MapShm for Query { // Impl - Reply impl MapShm for Reply { fn map_to_shminfo(&mut self) -> ZResult { - let Self { - payload, ext_shm, .. - } = self; - map_to_shminfo!(payload, ext_shm) + match &mut self.payload { + ReplyBody::Put(b) => b.map_to_shminfo(), + _ => Ok(false), + } } fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { - let Self { - payload, ext_shm, .. - } = self; - map_to_shmbuf!(payload, ext_shm, shmr) + match &mut self.payload { + ReplyBody::Put(b) => b.map_to_shmbuf(shmr), + _ => Ok(false), + } } } diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index da6ae0c371..ffe2d3ccca 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -413,10 +413,19 @@ macro_rules! inc_stats { match &$body { PushBody::Put(p) => { stats.[<$txrx _z_put_msgs>].[](1); - stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); + let mut n = p.payload.len(); + if let Some(a) = p.ext_attachment.as_ref() { + n += a.buffer.len(); + } + stats.[<$txrx _z_put_pl_bytes>].[](n); } - PushBody::Del(_) => { + PushBody::Del(d) => { stats.[<$txrx _z_del_msgs>].[](1); + let mut n = 0; + if let Some(a) = d.ext_attachment.as_ref() { + n += a.buffer.len(); + } + stats.[<$txrx _z_del_pl_bytes>].[](n); } } } diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 9645af0f74..a6748650ab 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -21,16 +21,16 @@ use async_trait::async_trait; use std::collections::HashMap; use std::sync::{Arc, Weak}; use zenoh_config::WhatAmI; -use zenoh_protocol::core::key_expr::keyexpr; -use zenoh_protocol::network::declare::queryable::ext::QueryableInfo; +use zenoh_protocol::zenoh::reply::ReplyBody; +use zenoh_protocol::zenoh::Put; use zenoh_protocol::{ - core::{Encoding, WireExpr}, + core::{key_expr::keyexpr, Encoding, WireExpr}, network::{ - declare::ext, + declare::{ext, queryable::ext::QueryableInfo}, request::{ext::TargetType, Request, RequestId}, response::{self, ext::ResponderIdType, Response, ResponseFinal}, }, - zenoh::{reply::ext::ConsolidationType, Reply, RequestBody, ResponseBody}, + zenoh::{query::Consolidation, Reply, RequestBody, ResponseBody}, }; use zenoh_sync::get_mut_unchecked; use zenoh_util::Timed; @@ -464,11 +464,29 @@ macro_rules! inc_res_stats { match &$body { ResponseBody::Put(p) => { stats.[<$txrx _z_put_msgs>].[](1); - stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); + let mut n = p.payload.len(); + if let Some(a) = p.ext_attachment.as_ref() { + n += a.buffer.len(); + } + stats.[<$txrx _z_put_pl_bytes>].[](n); } ResponseBody::Reply(r) => { stats.[<$txrx _z_reply_msgs>].[](1); - stats.[<$txrx _z_reply_pl_bytes>].[](r.payload.len()); + let mut n = 0; + match &r.payload { + ReplyBody::Put(p) => { + if let Some(a) = p.ext_attachment.as_ref() { + n += a.buffer.len(); + } + n += p.payload.len(); + } + ReplyBody::Del(d) => { + if let Some(a) = d.ext_attachment.as_ref() { + n += a.buffer.len(); + } + } + } + stats.[<$txrx _z_reply_pl_bytes>].[](n); } ResponseBody::Err(e) => { stats.[<$txrx _z_reply_msgs>].[](1); @@ -537,15 +555,19 @@ pub fn route_query( for (wexpr, payload) in local_replies { let payload = ResponseBody::Reply(Reply { - timestamp: None, - encoding: Encoding::default(), - ext_sinfo: None, - ext_consolidation: ConsolidationType::default(), - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment: None, // @TODO: expose it in the API - ext_unknown: vec![], - payload, + consolidation: Consolidation::default(), // @TODO: handle Del case + ext_unknown: vec![], // @TODO: handle unknown extensions + payload: ReplyBody::Put(Put { + // @TODO: handle Del case + timestamp: None, // @TODO: handle timestamp + encoding: Encoding::default(), // @TODO: handle encoding + ext_sinfo: None, // @TODO: handle source info + ext_attachment: None, // @TODO: expose it in the API + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_unknown: vec![], // @TODO: handle unknown extensions + payload, + }), }); #[cfg(feature = "stats")] if !admin { diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 9ee73d1641..4e9f4914dd 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -30,11 +30,11 @@ use std::future::Ready; use std::ops::Deref; use std::sync::Arc; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -use zenoh_protocol::core::WireExpr; -use zenoh_protocol::network::{response, Mapping, RequestId, Response, ResponseFinal}; -use zenoh_protocol::zenoh::ext::ValueType; -use zenoh_protocol::zenoh::reply::ext::ConsolidationType; -use zenoh_protocol::zenoh::{self, ResponseBody}; +use zenoh_protocol::{ + core::WireExpr, + network::{response, Mapping, RequestId, Response, ResponseFinal}, + zenoh::{self, ext::ValueType, reply::ReplyBody, Del, Put, ResponseBody}, +}; use zenoh_result::ZResult; pub(crate) struct QueryInner { @@ -206,16 +206,33 @@ impl SyncResolve for ReplyBuilder<'_> { source_id: None, source_sn: None, }; - #[allow(unused_mut)] - let mut ext_attachment = None; - #[cfg(feature = "unstable")] - { - data_info.source_id = source_info.source_id; - data_info.source_sn = source_info.source_sn; - if let Some(attachment) = attachment { - ext_attachment = Some(attachment.into()); - } + + // Use a macro for inferring the proper const extension ID between Put and Del cases + macro_rules! ext_attachment { + () => {{ + #[allow(unused_mut)] + let mut ext_attachment = None; + #[cfg(feature = "unstable")] + { + data_info.source_id = source_info.source_id; + data_info.source_sn = source_info.source_sn; + if let Some(attachment) = attachment { + ext_attachment = Some(attachment.into()); + } + } + ext_attachment + }}; } + + let ext_sinfo = if data_info.source_id.is_some() || data_info.source_sn.is_some() { + Some(zenoh::put::ext::SourceInfoType { + zid: data_info.source_id.unwrap_or_default(), + eid: 0, // @TODO use proper EntityId (#703) + sn: data_info.source_sn.unwrap_or_default() as u32, + }) + } else { + None + }; self.query.inner.primitives.send_response(Response { rid: self.query.inner.qid, wire_expr: WireExpr { @@ -224,24 +241,26 @@ impl SyncResolve for ReplyBuilder<'_> { mapping: Mapping::Sender, }, payload: ResponseBody::Reply(zenoh::Reply { - timestamp: data_info.timestamp, - encoding: data_info.encoding.unwrap_or_default(), - ext_sinfo: if data_info.source_id.is_some() || data_info.source_sn.is_some() - { - Some(zenoh::reply::ext::SourceInfoType { - zid: data_info.source_id.unwrap_or_default(), - eid: 0, // @TODO use proper EntityId (#703) - sn: data_info.source_sn.unwrap_or_default() as u32, - }) - } else { - None - }, - ext_consolidation: ConsolidationType::default(), - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment, + consolidation: zenoh::Consolidation::default(), ext_unknown: vec![], - payload, + payload: match kind { + SampleKind::Put => ReplyBody::Put(Put { + timestamp: data_info.timestamp, + encoding: data_info.encoding.unwrap_or_default(), + ext_sinfo, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_attachment: ext_attachment!(), + ext_unknown: vec![], + payload, + }), + SampleKind::Delete => ReplyBody::Del(Del { + timestamp, + ext_sinfo, + ext_attachment: ext_attachment!(), + ext_unknown: vec![], + }), + }, }), ext_qos: response::ext::QoSType::response_default(), ext_tstamp: None, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index d52c446d3d..46cfd5e499 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -57,6 +57,9 @@ use zenoh_config::unwrap_or_default; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; use zenoh_protocol::network::AtomicRequestId; use zenoh_protocol::network::RequestId; +use zenoh_protocol::zenoh::reply::ReplyBody; +use zenoh_protocol::zenoh::Del; +use zenoh_protocol::zenoh::Put; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, @@ -73,10 +76,7 @@ use zenoh_protocol::{ Mapping, Push, Response, ResponseFinal, }, zenoh::{ - query::{ - self, - ext::{ConsolidationType, QueryBodyType}, - }, + query::{self, ext::QueryBodyType, Consolidation}, Pull, PushBody, RequestBody, ResponseBody, }, }; @@ -1808,9 +1808,9 @@ impl Session { ext_budget: None, ext_timeout: Some(timeout), payload: RequestBody::Query(zenoh_protocol::zenoh::Query { + consolidation: consolidation.into(), parameters: selector.parameters().to_string(), ext_sinfo: None, - ext_consolidation: consolidation.into(), ext_body: value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, @@ -1851,7 +1851,7 @@ impl Session { parameters: &str, qid: RequestId, _target: TargetType, - _consolidation: ConsolidationType, + _consolidation: Consolidation, body: Option, #[cfg(feature = "unstable")] attachment: Option, ) { @@ -2233,7 +2233,7 @@ impl Primitives for Session { &m.parameters, msg.id, msg.ext_target, - m.ext_consolidation, + m.consolidation, m.ext_body, #[cfg(feature = "unstable")] m.ext_attachment.map(Into::into), @@ -2341,19 +2341,63 @@ impl Primitives for Session { } None => key_expr, }; - let info = DataInfo { - kind: SampleKind::Put, - encoding: Some(m.encoding), - timestamp: m.timestamp, - source_id: m.ext_sinfo.as_ref().map(|i| i.zid), - source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), + + struct Ret { + payload: ZBuf, + info: DataInfo, + #[cfg(feature = "unstable")] + attachment: Option, + } + let Ret { + payload, + info, + #[cfg(feature = "unstable")] + attachment, + } = match m.payload { + ReplyBody::Put(Put { + timestamp, + encoding, + ext_sinfo, + ext_attachment: _attachment, + payload, + .. + }) => Ret { + payload, + info: DataInfo { + kind: SampleKind::Put, + encoding: Some(encoding), + timestamp, + source_id: ext_sinfo.as_ref().map(|i| i.zid), + source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), + }, + #[cfg(feature = "unstable")] + attachment: _attachment.map(Into::into), + }, + ReplyBody::Del(Del { + timestamp, + ext_sinfo, + ext_attachment: _attachment, + .. + }) => Ret { + payload: ZBuf::empty(), + info: DataInfo { + kind: SampleKind::Delete, + encoding: None, + timestamp, + source_id: ext_sinfo.as_ref().map(|i| i.zid), + source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), + }, + #[cfg(feature = "unstable")] + attachment: _attachment.map(Into::into), + }, }; + #[allow(unused_mut)] let mut sample = - Sample::with_info(key_expr.into_owned(), m.payload, Some(info)); + Sample::with_info(key_expr.into_owned(), payload, Some(info)); #[cfg(feature = "unstable")] { - sample.attachment = m.ext_attachment.map(Into::into); + sample.attachment = attachment; } let new_reply = Reply { sample: Ok(sample), diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index c2cec7c627..f727ad60c3 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -153,10 +153,31 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let c_msgs = msgs.clone(); let qbl = ztimeout!(peer01 .declare_queryable(key_expr) - .callback(move |sample| { + .callback(move |query| { c_msgs.fetch_add(1, Ordering::Relaxed); - let rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - task::block_on(async { ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() }); + match query.parameters() { + "ok_put" => { + let mut rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); + rep.kind = SampleKind::Put; + task::block_on(async { + ztimeout!(query.reply(Ok(rep)).res_async()).unwrap() + }); + } + "ok_del" => { + let mut rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); + rep.kind = SampleKind::Delete; + task::block_on(async { + ztimeout!(query.reply(Ok(rep)).res_async()).unwrap() + }); + } + "err" => { + let rep = Value::from(vec![0u8; size]); + task::block_on(async { + ztimeout!(query.reply(Err(rep)).res_async()).unwrap() + }); + } + _ => panic!("Unknown query parameter"), + } }) .res_async()) .unwrap(); @@ -165,12 +186,15 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re task::sleep(SLEEP).await; // Get data - println!("[QR][02c] Getting on peer02 session. {msg_count} msgs."); + println!("[QR][02c] Getting Ok(Put) on peer02 session. {msg_count} msgs."); let mut cnt = 0; for _ in 0..msg_count { - let rs = ztimeout!(peer02.get(key_expr).res_async()).unwrap(); + let selector = format!("{}?ok_put", key_expr); + let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - assert_eq!(s.sample.unwrap().value.payload.len(), size); + let s = s.sample.unwrap(); + assert_eq!(s.kind, SampleKind::Put); + assert_eq!(s.value.payload.len(), size); cnt += 1; } } @@ -178,6 +202,41 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re assert_eq!(msgs.load(Ordering::Relaxed), msg_count); assert_eq!(cnt, msg_count); + msgs.store(0, Ordering::Relaxed); + + println!("[QR][03c] Getting Ok(Delete) on peer02 session. {msg_count} msgs."); + let mut cnt = 0; + for _ in 0..msg_count { + let selector = format!("{}?ok_del", key_expr); + let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); + while let Ok(s) = ztimeout!(rs.recv_async()) { + let s = s.sample.unwrap(); + assert_eq!(s.kind, SampleKind::Delete); + assert_eq!(s.value.payload.len(), 0); + cnt += 1; + } + } + println!("[QR][03c] Got on peer02 session. {cnt}/{msg_count} msgs."); + assert_eq!(msgs.load(Ordering::Relaxed), msg_count); + assert_eq!(cnt, msg_count); + + msgs.store(0, Ordering::Relaxed); + + println!("[QR][04c] Getting Err() on peer02 session. {msg_count} msgs."); + let mut cnt = 0; + for _ in 0..msg_count { + let selector = format!("{}?err", key_expr); + let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); + while let Ok(s) = ztimeout!(rs.recv_async()) { + let e = s.sample.unwrap_err(); + assert_eq!(e.payload.len(), size); + cnt += 1; + } + } + println!("[QR][04c] Got on peer02 session. {cnt}/{msg_count} msgs."); + assert_eq!(msgs.load(Ordering::Relaxed), msg_count); + assert_eq!(cnt, msg_count); + println!("[PS][03c] Unqueryable on peer01 session"); ztimeout!(qbl.undeclare().res_async()).unwrap(); From d6ffebf080958157ac141c92b51b9fe00c075227 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 15 Feb 2024 11:58:21 +0100 Subject: [PATCH 002/598] Clean-up of protocol types (#729) * Update Reply protocol definition and codec * Make consolidation a flag in Query/Reply * Fix wrong Consolidation cast in codec * Apply Reply changes to routing * Fix shared-memory feature * Fix stats * Bump Zenoh Protocol Version * Add query/reply ok(put|del)/err() tests * Clean-up of code * Default CongestionControl for Push is Drop * Fix Priority::DEFAULT typo * Define DEFAULT consts * ConsolidationMode moved into the API * Remove unused Ack message * Fix Ack leftovers * CongestionControl::DEFAULT * QoSType::DEFAULT * Mapping::DEFAULT * Encoding::DEFAULT * QueryTarget::DEFAULT * NodeType::DEFAULT * QueryableInfo::DEFAULT * Remove ConsolidationMode from zenoh-protocol * ConsolidationType::DEFAULT * Remove dead code * Remove dead code * Move SampleKind to sample.rs * Cleanup SubMode * Cleanup QueryTarget * Remove emptyline --- commons/zenoh-codec/benches/codec.rs | 48 +++---- commons/zenoh-codec/src/common/mod.rs | 1 - commons/zenoh-codec/src/common/priority.rs | 66 --------- commons/zenoh-codec/src/core/mod.rs | 1 - commons/zenoh-codec/src/core/property.rs | 84 ----------- commons/zenoh-codec/src/core/wire_expr.rs | 2 +- commons/zenoh-codec/src/network/declare.rs | 32 ++--- commons/zenoh-codec/src/network/mod.rs | 2 +- commons/zenoh-codec/src/network/oam.rs | 7 +- commons/zenoh-codec/src/network/push.rs | 14 +- commons/zenoh-codec/src/network/request.rs | 20 +-- commons/zenoh-codec/src/network/response.rs | 15 +- commons/zenoh-codec/src/transport/fragment.rs | 6 +- commons/zenoh-codec/src/transport/frame.rs | 6 +- commons/zenoh-codec/src/transport/join.rs | 2 +- commons/zenoh-codec/src/transport/oam.rs | 6 +- commons/zenoh-codec/src/zenoh/ack.rs | 129 ----------------- commons/zenoh-codec/src/zenoh/mod.rs | 3 - commons/zenoh-codec/src/zenoh/put.rs | 6 +- commons/zenoh-codec/src/zenoh/query.rs | 6 +- commons/zenoh-codec/src/zenoh/reply.rs | 6 +- commons/zenoh-codec/tests/codec.rs | 5 - commons/zenoh-protocol/src/common/mod.rs | 15 -- commons/zenoh-protocol/src/core/encoding.rs | 2 + commons/zenoh-protocol/src/core/locator.rs | 64 --------- commons/zenoh-protocol/src/core/mod.rs | 98 ++----------- commons/zenoh-protocol/src/core/wire_expr.rs | 2 +- commons/zenoh-protocol/src/network/declare.rs | 28 +++- commons/zenoh-protocol/src/network/mod.rs | 44 +++--- commons/zenoh-protocol/src/network/request.rs | 13 +- commons/zenoh-protocol/src/transport/mod.rs | 12 +- commons/zenoh-protocol/src/zenoh/ack.rs | 84 ----------- commons/zenoh-protocol/src/zenoh/mod.rs | 19 +-- commons/zenoh-protocol/src/zenoh/query.rs | 14 +- examples/examples/z_pub_thr.rs | 2 +- io/zenoh-transport/src/common/batch.rs | 8 +- io/zenoh-transport/src/common/pipeline.rs | 16 +-- io/zenoh-transport/src/multicast/link.rs | 2 +- io/zenoh-transport/src/multicast/rx.rs | 4 +- io/zenoh-transport/src/shm.rs | 2 - .../src/unicast/establishment/cookie.rs | 1 - .../src/unicast/establishment/properties.rs | 132 ------------------ .../src/unicast/universal/rx.rs | 4 +- .../tests/multicast_compression.rs | 6 +- .../tests/multicast_transport.rs | 6 +- .../tests/unicast_compression.rs | 12 +- .../tests/unicast_concurrent.rs | 12 +- .../tests/unicast_defragmentation.rs | 16 +-- .../tests/unicast_intermittent.rs | 6 +- .../tests/unicast_priorities.rs | 4 +- io/zenoh-transport/tests/unicast_shm.rs | 12 +- .../tests/unicast_simultaneous.rs | 4 +- io/zenoh-transport/tests/unicast_transport.rs | 56 ++++---- zenoh-ext/src/subscriber_ext.rs | 8 +- zenoh/src/key_expr.rs | 4 +- zenoh/src/liveliness.rs | 6 +- zenoh/src/net/routing/dispatcher/pubsub.rs | 4 +- zenoh/src/net/routing/dispatcher/queries.rs | 29 ++-- zenoh/src/net/routing/dispatcher/resource.rs | 4 +- zenoh/src/net/routing/hat/client/pubsub.rs | 16 +-- zenoh/src/net/routing/hat/client/queries.rs | 12 +- .../net/routing/hat/linkstate_peer/network.rs | 2 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 20 +-- .../net/routing/hat/linkstate_peer/queries.rs | 20 +-- zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 2 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 16 +-- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 12 +- zenoh/src/net/routing/hat/router/network.rs | 2 +- zenoh/src/net/routing/hat/router/pubsub.rs | 36 ++--- zenoh/src/net/routing/hat/router/queries.rs | 36 ++--- zenoh/src/net/runtime/adminspace.rs | 14 +- zenoh/src/net/tests/tables.rs | 40 +++--- zenoh/src/prelude.rs | 4 +- zenoh/src/publication.rs | 6 +- zenoh/src/query.rs | 33 ++++- zenoh/src/queryable.rs | 8 +- zenoh/src/sample.rs | 40 +++++- zenoh/src/session.rs | 77 +++++----- zenoh/src/subscriber.rs | 17 --- 79 files changed, 510 insertions(+), 1125 deletions(-) delete mode 100644 commons/zenoh-codec/src/common/priority.rs delete mode 100644 commons/zenoh-codec/src/core/property.rs delete mode 100644 commons/zenoh-codec/src/zenoh/ack.rs delete mode 100644 commons/zenoh-protocol/src/zenoh/ack.rs delete mode 100644 io/zenoh-transport/src/unicast/establishment/properties.rs diff --git a/commons/zenoh-codec/benches/codec.rs b/commons/zenoh-codec/benches/codec.rs index 1c46a700a7..34c9313a7f 100644 --- a/commons/zenoh-codec/benches/codec.rs +++ b/commons/zenoh-codec/benches/codec.rs @@ -75,19 +75,19 @@ fn criterion_benchmark(c: &mut Criterion) { let codec = Zenoh080::new(); let frame = FrameHeader { - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, sn: TransportSn::MIN, - ext_qos: zenoh_protocol::transport::frame::ext::QoSType::default(), + ext_qos: zenoh_protocol::transport::frame::ext::QoSType::DEFAULT, }; let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -121,19 +121,19 @@ fn criterion_benchmark(c: &mut Criterion) { let codec = Zenoh080::new(); let frame = FrameHeader { - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, sn: TransportSn::MIN, - ext_qos: zenoh_protocol::transport::frame::ext::QoSType::default(), + ext_qos: zenoh_protocol::transport::frame::ext::QoSType::DEFAULT, }; let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -162,19 +162,19 @@ fn criterion_benchmark(c: &mut Criterion) { let codec = Zenoh080::new(); let frame = FrameHeader { - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, sn: TransportSn::MIN, - ext_qos: zenoh_protocol::transport::frame::ext::QoSType::default(), + ext_qos: zenoh_protocol::transport::frame::ext::QoSType::DEFAULT, }; let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -210,12 +210,12 @@ fn criterion_benchmark(c: &mut Criterion) { let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -238,12 +238,12 @@ fn criterion_benchmark(c: &mut Criterion) { let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -277,12 +277,12 @@ fn criterion_benchmark(c: &mut Criterion) { let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/commons/zenoh-codec/src/common/mod.rs b/commons/zenoh-codec/src/common/mod.rs index 4c25c93241..f34f9872bf 100644 --- a/commons/zenoh-codec/src/common/mod.rs +++ b/commons/zenoh-codec/src/common/mod.rs @@ -12,4 +12,3 @@ // ZettaScale Zenoh Team, // pub mod extension; -mod priority; diff --git a/commons/zenoh-codec/src/common/priority.rs b/commons/zenoh-codec/src/common/priority.rs deleted file mode 100644 index 776229971e..0000000000 --- a/commons/zenoh-codec/src/common/priority.rs +++ /dev/null @@ -1,66 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; -use core::convert::TryInto; -use zenoh_buffers::{ - reader::{DidntRead, Reader}, - writer::{DidntWrite, Writer}, -}; -use zenoh_protocol::{common::imsg, core::Priority}; - -impl WCodec<&Priority, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &Priority) -> Self::Output { - // Header - let header = imsg::id::PRIORITY | ((*x as u8) << imsg::HEADER_BITS); - self.write(&mut *writer, header)?; - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, _reader: &mut R) -> Result { - if imsg::mid(self.header) != imsg::id::PRIORITY { - return Err(DidntRead); - } - - let priority: Priority = (imsg::flags(self.header) >> imsg::HEADER_BITS) - .try_into() - .map_err(|_| DidntRead)?; - Ok(priority) - } -} diff --git a/commons/zenoh-codec/src/core/mod.rs b/commons/zenoh-codec/src/core/mod.rs index 1f48def695..c8e19f057f 100644 --- a/commons/zenoh-codec/src/core/mod.rs +++ b/commons/zenoh-codec/src/core/mod.rs @@ -13,7 +13,6 @@ // mod encoding; mod locator; -mod property; #[cfg(feature = "shared-memory")] mod shm; mod timestamp; diff --git a/commons/zenoh-codec/src/core/property.rs b/commons/zenoh-codec/src/core/property.rs deleted file mode 100644 index bb7f760208..0000000000 --- a/commons/zenoh-codec/src/core/property.rs +++ /dev/null @@ -1,84 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::{RCodec, WCodec, Zenoh080}; -use alloc::vec::Vec; -use zenoh_buffers::{ - reader::{DidntRead, Reader}, - writer::{DidntWrite, Writer}, -}; -use zenoh_protocol::core::Property; - -impl WCodec<&Property, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &Property) -> Self::Output { - let Property { key, value } = x; - - self.write(&mut *writer, key)?; - self.write(&mut *writer, value.as_slice())?; - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let key: u64 = self.read(&mut *reader)?; - let value: Vec = self.read(&mut *reader)?; - - Ok(Property { key, value }) - } -} - -impl WCodec<&[Property], &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &[Property]) -> Self::Output { - self.write(&mut *writer, x.len())?; - for p in x.iter() { - self.write(&mut *writer, p)?; - } - - Ok(()) - } -} - -impl RCodec, &mut R> for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result, Self::Error> { - let num: usize = self.read(&mut *reader)?; - - let mut ps = Vec::with_capacity(num); - for _ in 0..num { - let p: Property = self.read(&mut *reader)?; - ps.push(p); - } - - Ok(ps) - } -} diff --git a/commons/zenoh-codec/src/core/wire_expr.rs b/commons/zenoh-codec/src/core/wire_expr.rs index 6caba6c8c7..aa6f77b379 100644 --- a/commons/zenoh-codec/src/core/wire_expr.rs +++ b/commons/zenoh-codec/src/core/wire_expr.rs @@ -65,7 +65,7 @@ where Ok(WireExpr { scope, suffix: suffix.into(), - mapping: Mapping::default(), + mapping: Mapping::DEFAULT, }) } } diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index 20916dc359..cf92b27c17 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -102,16 +102,16 @@ where // Header let mut header = id::DECLARE; - let mut n_exts = ((ext_qos != &declare::ext::QoSType::default()) as u8) + let mut n_exts = ((ext_qos != &declare::ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) - + ((ext_nodeid != &declare::ext::NodeIdType::default()) as u8); + + ((ext_nodeid != &declare::ext::NodeIdType::DEFAULT) as u8); if n_exts != 0 { header |= declare::flag::Z; } self.write(&mut *writer, header)?; // Extensions - if ext_qos != &declare::ext::QoSType::default() { + if ext_qos != &declare::ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -119,7 +119,7 @@ where n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if ext_nodeid != &declare::ext::NodeIdType::default() { + if ext_nodeid != &declare::ext::NodeIdType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; } @@ -157,9 +157,9 @@ where } // Extensions - let mut ext_qos = declare::ext::QoSType::default(); + let mut ext_qos = declare::ext::QoSType::DEFAULT; let mut ext_tstamp = None; - let mut ext_nodeid = declare::ext::NodeIdType::default(); + let mut ext_nodeid = declare::ext::NodeIdType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, declare::flag::Z); while has_ext { @@ -340,11 +340,11 @@ where // Header let mut header = declare::id::D_SUBSCRIBER; - let mut n_exts = (ext_info != &subscriber::ext::SubscriberInfo::default()) as u8; + let mut n_exts = (ext_info != &subscriber::ext::SubscriberInfo::DEFAULT) as u8; if n_exts != 0 { header |= subscriber::flag::Z; } - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= subscriber::flag::M; } if wire_expr.has_suffix() { @@ -357,7 +357,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_info != &subscriber::ext::SubscriberInfo::default() { + if ext_info != &subscriber::ext::SubscriberInfo::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_info, n_exts != 0))?; } @@ -402,7 +402,7 @@ where }; // Extensions - let mut ext_info = subscriber::ext::SubscriberInfo::default(); + let mut ext_info = subscriber::ext::SubscriberInfo::DEFAULT; let mut has_ext = imsg::has_flag(self.header, subscriber::flag::Z); while has_ext { @@ -524,11 +524,11 @@ where // Header let mut header = declare::id::D_QUERYABLE; - let mut n_exts = (ext_info != &queryable::ext::QueryableInfo::default()) as u8; + let mut n_exts = (ext_info != &queryable::ext::QueryableInfo::DEFAULT) as u8; if n_exts != 0 { header |= subscriber::flag::Z; } - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= subscriber::flag::M; } if wire_expr.has_suffix() { @@ -539,7 +539,7 @@ where // Body self.write(&mut *writer, id)?; self.write(&mut *writer, wire_expr)?; - if ext_info != &queryable::ext::QueryableInfo::default() { + if ext_info != &queryable::ext::QueryableInfo::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_info, n_exts != 0))?; } @@ -584,7 +584,7 @@ where }; // Extensions - let mut ext_info = queryable::ext::QueryableInfo::default(); + let mut ext_info = queryable::ext::QueryableInfo::DEFAULT; let mut has_ext = imsg::has_flag(self.header, queryable::flag::Z); while has_ext { @@ -699,7 +699,7 @@ where // Header let mut header = declare::id::D_TOKEN; - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= subscriber::flag::M; } if wire_expr.has_suffix() { @@ -851,7 +851,7 @@ where // Header let mut header = declare::id::D_INTEREST; - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= subscriber::flag::M; } if wire_expr.has_suffix() { diff --git a/commons/zenoh-codec/src/network/mod.rs b/commons/zenoh-codec/src/network/mod.rs index c1f2489b88..dade13d362 100644 --- a/commons/zenoh-codec/src/network/mod.rs +++ b/commons/zenoh-codec/src/network/mod.rs @@ -58,7 +58,7 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let codec = Zenoh080Reliability::new(Reliability::default()); + let codec = Zenoh080Reliability::new(Reliability::DEFAULT); codec.read(reader) } } diff --git a/commons/zenoh-codec/src/network/oam.rs b/commons/zenoh-codec/src/network/oam.rs index ff6daeb020..9751e9952d 100644 --- a/commons/zenoh-codec/src/network/oam.rs +++ b/commons/zenoh-codec/src/network/oam.rs @@ -52,8 +52,7 @@ where header |= iext::ENC_ZBUF; } } - let mut n_exts = - ((ext_qos != &ext::QoSType::default()) as u8) + (ext_tstamp.is_some() as u8); + let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8); if n_exts != 0 { header |= flag::Z; } @@ -63,7 +62,7 @@ where self.write(&mut *writer, id)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -115,7 +114,7 @@ where let id: OamId = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut ext_tstamp = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); diff --git a/commons/zenoh-codec/src/network/push.rs b/commons/zenoh-codec/src/network/push.rs index 10a8489b29..b9ec2ba5db 100644 --- a/commons/zenoh-codec/src/network/push.rs +++ b/commons/zenoh-codec/src/network/push.rs @@ -44,13 +44,13 @@ where // Header let mut header = id::PUSH; - let mut n_exts = ((ext_qos != &ext::QoSType::default()) as u8) + let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) - + ((ext_nodeid != &ext::NodeIdType::default()) as u8); + + ((ext_nodeid != &ext::NodeIdType::DEFAULT) as u8); if n_exts != 0 { header |= flag::Z; } - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= flag::M; } if wire_expr.has_suffix() { @@ -62,7 +62,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -70,7 +70,7 @@ where n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if ext_nodeid != &ext::NodeIdType::default() { + if ext_nodeid != &ext::NodeIdType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; } @@ -116,9 +116,9 @@ where }; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut ext_tstamp = None; - let mut ext_nodeid = ext::NodeIdType::default(); + let mut ext_nodeid = ext::NodeIdType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/network/request.rs b/commons/zenoh-codec/src/network/request.rs index 19711ff147..364c1af3d0 100644 --- a/commons/zenoh-codec/src/network/request.rs +++ b/commons/zenoh-codec/src/network/request.rs @@ -93,16 +93,16 @@ where // Header let mut header = id::REQUEST; - let mut n_exts = ((ext_qos != &ext::QoSType::default()) as u8) + let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) - + ((ext_target != &ext::TargetType::default()) as u8) + + ((ext_target != &ext::TargetType::DEFAULT) as u8) + (ext_budget.is_some() as u8) + (ext_timeout.is_some() as u8) - + ((ext_nodeid != &ext::NodeIdType::default()) as u8); + + ((ext_nodeid != &ext::NodeIdType::DEFAULT) as u8); if n_exts != 0 { header |= flag::Z; } - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= flag::M; } if wire_expr.has_suffix() { @@ -115,7 +115,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -123,7 +123,7 @@ where n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if ext_target != &ext::TargetType::default() { + if ext_target != &ext::TargetType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (ext_target, n_exts != 0))?; } @@ -137,7 +137,7 @@ where let e = ext::Timeout::new(to.as_millis() as u64); self.write(&mut *writer, (&e, n_exts != 0))?; } - if ext_nodeid != &ext::NodeIdType::default() { + if ext_nodeid != &ext::NodeIdType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; } @@ -185,10 +185,10 @@ where }; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut ext_tstamp = None; - let mut ext_nodeid = ext::NodeIdType::default(); - let mut ext_target = ext::TargetType::default(); + let mut ext_nodeid = ext::NodeIdType::DEFAULT; + let mut ext_target = ext::TargetType::DEFAULT; let mut ext_limit = None; let mut ext_timeout = None; diff --git a/commons/zenoh-codec/src/network/response.rs b/commons/zenoh-codec/src/network/response.rs index bec7df2967..5b69e8b109 100644 --- a/commons/zenoh-codec/src/network/response.rs +++ b/commons/zenoh-codec/src/network/response.rs @@ -48,13 +48,13 @@ where // Header let mut header = id::RESPONSE; - let mut n_exts = ((ext_qos != &ext::QoSType::default()) as u8) + let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) + (ext_respid.is_some() as u8); if n_exts != 0 { header |= flag::Z; } - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= flag::M; } if wire_expr.has_suffix() { @@ -67,7 +67,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -123,7 +123,7 @@ where }; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut ext_tstamp = None; let mut ext_respid = None; @@ -183,8 +183,7 @@ where // Header let mut header = id::RESPONSE_FINAL; - let mut n_exts = - ((ext_qos != &ext::QoSType::default()) as u8) + (ext_tstamp.is_some() as u8); + let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8); if n_exts != 0 { header |= flag::Z; } @@ -194,7 +193,7 @@ where self.write(&mut *writer, rid)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -236,7 +235,7 @@ where let rid: RequestId = bodec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut ext_tstamp = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); diff --git a/commons/zenoh-codec/src/transport/fragment.rs b/commons/zenoh-codec/src/transport/fragment.rs index b66f395df1..b01e2c2bae 100644 --- a/commons/zenoh-codec/src/transport/fragment.rs +++ b/commons/zenoh-codec/src/transport/fragment.rs @@ -48,7 +48,7 @@ where if *more { header |= flag::M; } - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { header |= flag::Z; } self.write(&mut *writer, header)?; @@ -57,7 +57,7 @@ where self.write(&mut *writer, sn)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { self.write(&mut *writer, (*ext_qos, false))?; } @@ -97,7 +97,7 @@ where let sn: TransportSn = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/transport/frame.rs b/commons/zenoh-codec/src/transport/frame.rs index 8d39aabcdb..ab82a024c4 100644 --- a/commons/zenoh-codec/src/transport/frame.rs +++ b/commons/zenoh-codec/src/transport/frame.rs @@ -46,7 +46,7 @@ where if let Reliability::Reliable = reliability { header |= flag::R; } - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { header |= flag::Z; } self.write(&mut *writer, header)?; @@ -55,7 +55,7 @@ where self.write(&mut *writer, sn)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { self.write(&mut *writer, (x.ext_qos, false))?; } @@ -94,7 +94,7 @@ where let sn: TransportSn = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/transport/join.rs b/commons/zenoh-codec/src/transport/join.rs index 80c1663413..d87ceecc78 100644 --- a/commons/zenoh-codec/src/transport/join.rs +++ b/commons/zenoh-codec/src/transport/join.rs @@ -121,7 +121,7 @@ where let (_, more): (ZExtZBufHeader<{ ext::QoS::ID }>, bool) = self.read(&mut *reader)?; // Body - let mut ext_qos = Box::new([PrioritySn::default(); Priority::NUM]); + let mut ext_qos = Box::new([PrioritySn::DEFAULT; Priority::NUM]); for p in ext_qos.iter_mut() { *p = self.codec.read(&mut *reader)?; } diff --git a/commons/zenoh-codec/src/transport/oam.rs b/commons/zenoh-codec/src/transport/oam.rs index e2f905abf8..6861f638d3 100644 --- a/commons/zenoh-codec/src/transport/oam.rs +++ b/commons/zenoh-codec/src/transport/oam.rs @@ -47,7 +47,7 @@ where header |= iext::ENC_ZBUF; } } - let mut n_exts = (ext_qos != &ext::QoSType::default()) as u8; + let mut n_exts = (ext_qos != &ext::QoSType::DEFAULT) as u8; if n_exts != 0 { header |= flag::Z; } @@ -57,7 +57,7 @@ where self.write(&mut *writer, id)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -105,7 +105,7 @@ where let id: OamId = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/zenoh/ack.rs b/commons/zenoh-codec/src/zenoh/ack.rs deleted file mode 100644 index 78cbca2987..0000000000 --- a/commons/zenoh-codec/src/zenoh/ack.rs +++ /dev/null @@ -1,129 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; -use alloc::vec::Vec; -use zenoh_buffers::{ - reader::{DidntRead, Reader}, - writer::{DidntWrite, Writer}, -}; -use zenoh_protocol::{ - common::{iext, imsg}, - zenoh::{ - ack::{ext, flag, Ack}, - id, - }, -}; - -impl WCodec<&Ack, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &Ack) -> Self::Output { - let Ack { - timestamp, - ext_sinfo, - ext_unknown, - } = x; - - // Header - let mut header = id::ACK; - if timestamp.is_some() { - header |= flag::T; - } - let mut n_exts = ((ext_sinfo.is_some()) as u8) + (ext_unknown.len() as u8); - if n_exts != 0 { - header |= flag::Z; - } - self.write(&mut *writer, header)?; - - // Body - if let Some(ts) = timestamp.as_ref() { - self.write(&mut *writer, ts)?; - } - - // Extensions - if let Some(sinfo) = ext_sinfo.as_ref() { - n_exts -= 1; - self.write(&mut *writer, (sinfo, n_exts != 0))?; - } - for u in ext_unknown.iter() { - n_exts -= 1; - self.write(&mut *writer, (u, n_exts != 0))?; - } - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != id::ACK { - return Err(DidntRead); - } - - // Body - let mut timestamp: Option = None; - if imsg::has_flag(self.header, flag::T) { - timestamp = Some(self.codec.read(&mut *reader)?); - } - - // Extensions - let mut ext_sinfo: Option = None; - let mut ext_unknown = Vec::new(); - - let mut has_ext = imsg::has_flag(self.header, flag::Z); - while has_ext { - let ext: u8 = self.codec.read(&mut *reader)?; - let eodec = Zenoh080Header::new(ext); - match iext::eid(ext) { - ext::SourceInfo::ID => { - let (s, ext): (ext::SourceInfoType, bool) = eodec.read(&mut *reader)?; - ext_sinfo = Some(s); - has_ext = ext; - } - _ => { - let (u, ext) = extension::read(reader, "Ack", ext)?; - ext_unknown.push(u); - has_ext = ext; - } - } - } - - Ok(Ack { - timestamp, - ext_sinfo, - ext_unknown, - }) - } -} diff --git a/commons/zenoh-codec/src/zenoh/mod.rs b/commons/zenoh-codec/src/zenoh/mod.rs index d59add9d63..fdff09be94 100644 --- a/commons/zenoh-codec/src/zenoh/mod.rs +++ b/commons/zenoh-codec/src/zenoh/mod.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -pub mod ack; pub mod del; pub mod err; pub mod pull; @@ -121,7 +120,6 @@ where fn write(self, writer: &mut W, x: &ResponseBody) -> Self::Output { match x { ResponseBody::Reply(b) => self.write(&mut *writer, b), - ResponseBody::Ack(b) => self.write(&mut *writer, b), ResponseBody::Err(b) => self.write(&mut *writer, b), ResponseBody::Put(b) => self.write(&mut *writer, b), } @@ -140,7 +138,6 @@ where let codec = Zenoh080Header::new(header); let body = match imsg::mid(codec.header) { id::REPLY => ResponseBody::Reply(codec.read(&mut *reader)?), - id::ACK => ResponseBody::Ack(codec.read(&mut *reader)?), id::ERR => ResponseBody::Err(codec.read(&mut *reader)?), id::PUT => ResponseBody::Put(codec.read(&mut *reader)?), _ => return Err(DidntRead), diff --git a/commons/zenoh-codec/src/zenoh/put.rs b/commons/zenoh-codec/src/zenoh/put.rs index ebc364cf9b..4f50be4872 100644 --- a/commons/zenoh-codec/src/zenoh/put.rs +++ b/commons/zenoh-codec/src/zenoh/put.rs @@ -54,7 +54,7 @@ where if timestamp.is_some() { header |= flag::T; } - if encoding != &Encoding::default() { + if encoding != &Encoding::DEFAULT { header |= flag::E; } let mut n_exts = (ext_sinfo.is_some()) as u8 @@ -73,7 +73,7 @@ where if let Some(ts) = timestamp.as_ref() { self.write(&mut *writer, ts)?; } - if encoding != &Encoding::default() { + if encoding != &Encoding::DEFAULT { self.write(&mut *writer, encoding)?; } @@ -143,7 +143,7 @@ where timestamp = Some(self.codec.read(&mut *reader)?); } - let mut encoding = Encoding::default(); + let mut encoding = Encoding::DEFAULT; if imsg::has_flag(self.header, flag::E) { encoding = self.codec.read(&mut *reader)?; } diff --git a/commons/zenoh-codec/src/zenoh/query.rs b/commons/zenoh-codec/src/zenoh/query.rs index cb0506e474..55f25cd5ea 100644 --- a/commons/zenoh-codec/src/zenoh/query.rs +++ b/commons/zenoh-codec/src/zenoh/query.rs @@ -83,7 +83,7 @@ where // Header let mut header = id::QUERY; - if consolidation != &Consolidation::default() { + if consolidation != &Consolidation::DEFAULT { header |= flag::C; } if !parameters.is_empty() { @@ -99,7 +99,7 @@ where self.write(&mut *writer, header)?; // Body - if consolidation != &Consolidation::default() { + if consolidation != &Consolidation::DEFAULT { self.write(&mut *writer, *consolidation)?; } if !parameters.is_empty() { @@ -153,7 +153,7 @@ where } // Body - let mut consolidation = Consolidation::default(); + let mut consolidation = Consolidation::DEFAULT; if imsg::has_flag(self.header, flag::C) { consolidation = self.codec.read(&mut *reader)?; } diff --git a/commons/zenoh-codec/src/zenoh/reply.rs b/commons/zenoh-codec/src/zenoh/reply.rs index d54e98cc5e..308004a1c2 100644 --- a/commons/zenoh-codec/src/zenoh/reply.rs +++ b/commons/zenoh-codec/src/zenoh/reply.rs @@ -41,7 +41,7 @@ where // Header let mut header = id::REPLY; - if consolidation != &Consolidation::default() { + if consolidation != &Consolidation::DEFAULT { header |= flag::C; } let mut n_exts = ext_unknown.len() as u8; @@ -51,7 +51,7 @@ where self.write(&mut *writer, header)?; // Body - if consolidation != &Consolidation::default() { + if consolidation != &Consolidation::DEFAULT { self.write(&mut *writer, *consolidation)?; } @@ -93,7 +93,7 @@ where } // Body - let mut consolidation = Consolidation::default(); + let mut consolidation = Consolidation::DEFAULT; if imsg::has_flag(self.header, flag::C) { consolidation = self.codec.read(&mut *reader)?; } diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index 28201c1977..7f23214b49 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -582,11 +582,6 @@ fn codec_err() { run!(zenoh::Err, zenoh::Err::rand()); } -#[test] -fn codec_ack() { - run!(zenoh::Ack, zenoh::Ack::rand()); -} - #[test] fn codec_pull() { run!(zenoh::Pull, zenoh::Pull::rand()); diff --git a/commons/zenoh-protocol/src/common/mod.rs b/commons/zenoh-protocol/src/common/mod.rs index d11d0b0c52..ef53e5a8ac 100644 --- a/commons/zenoh-protocol/src/common/mod.rs +++ b/commons/zenoh-protocol/src/common/mod.rs @@ -19,21 +19,6 @@ pub use extension::*; /*************************************/ // Inner Message IDs pub mod imsg { - pub mod id { - // Zenoh Messages - pub const DECLARE: u8 = 0x0b; - pub const DATA: u8 = 0x0c; - pub const QUERY: u8 = 0x0d; - pub const PULL: u8 = 0x0e; - pub const UNIT: u8 = 0x0f; - pub const LINK_STATE_LIST: u8 = 0x10; - - // Message decorators - pub const PRIORITY: u8 = 0x1c; - pub const ROUTING_CONTEXT: u8 = 0x1d; - pub const REPLY_CONTEXT: u8 = 0x1e; - } - // Header mask pub const HEADER_BITS: u8 = 5; pub const HEADER_MASK: u8 = !(0xff << HEADER_BITS); diff --git a/commons/zenoh-protocol/src/core/encoding.rs b/commons/zenoh-protocol/src/core/encoding.rs index f202b8e79c..b3abae8aae 100644 --- a/commons/zenoh-protocol/src/core/encoding.rs +++ b/commons/zenoh-protocol/src/core/encoding.rs @@ -266,6 +266,8 @@ impl Default for Encoding { } impl Encoding { + pub const DEFAULT: Self = Self::EMPTY; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::{ diff --git a/commons/zenoh-protocol/src/core/locator.rs b/commons/zenoh-protocol/src/core/locator.rs index cdd3dfa64c..42379f2b65 100644 --- a/commons/zenoh-protocol/src/core/locator.rs +++ b/commons/zenoh-protocol/src/core/locator.rs @@ -122,67 +122,3 @@ impl Locator { EndPoint::rand().into() } } - -// pub(crate) trait HasCanonForm { -// fn is_canon(&self) -> bool; - -// type Output; -// fn canonicalize(self) -> Self::Output; -// } - -// fn cmp(this: &str, than: &str) -> core::cmp::Ordering { -// let is_longer = this.len().cmp(&than.len()); -// let this = this.chars(); -// let than = than.chars(); -// let zip = this.zip(than); -// for (this, than) in zip { -// match this.cmp(&than) { -// core::cmp::Ordering::Equal => {} -// o => return o, -// } -// } -// is_longer -// } - -// impl<'a, T: Iterator + Clone, V> HasCanonForm for T { -// fn is_canon(&self) -> bool { -// let mut iter = self.clone(); -// let mut acc = if let Some((key, _)) = iter.next() { -// key -// } else { -// return true; -// }; -// for (key, _) in iter { -// if cmp(key, acc) != core::cmp::Ordering::Greater { -// return false; -// } -// acc = key; -// } -// true -// } - -// type Output = Vec<(&'a str, V)>; -// fn canonicalize(mut self) -> Self::Output { -// let mut result = Vec::new(); -// if let Some(v) = self.next() { -// result.push(v); -// } -// 'outer: for (k, v) in self { -// for (i, (x, _)) in result.iter().enumerate() { -// match cmp(k, x) { -// core::cmp::Ordering::Less => { -// result.insert(i, (k, v)); -// continue 'outer; -// } -// core::cmp::Ordering::Equal => { -// result[i].1 = v; -// continue 'outer; -// } -// core::cmp::Ordering::Greater => {} -// } -// } -// result.push((k, v)) -// } -// result -// } -// } diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 2547034c44..3e9315bec2 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -16,7 +16,6 @@ use alloc::{ boxed::Box, format, string::{String, ToString}, - vec::Vec, }; use core::{ convert::{From, TryFrom, TryInto}, @@ -54,43 +53,6 @@ pub use endpoint::*; pub mod resolution; pub use resolution::*; -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Property { - pub key: u64, - pub value: Vec, -} - -/// The kind of a `Sample`. -#[repr(u8)] -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] -pub enum SampleKind { - /// if the `Sample` was issued by a `put` operation. - #[default] - Put = 0, - /// if the `Sample` was issued by a `delete` operation. - Delete = 1, -} - -impl fmt::Display for SampleKind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - SampleKind::Put => write!(f, "PUT"), - SampleKind::Delete => write!(f, "DELETE"), - } - } -} - -impl TryFrom for SampleKind { - type Error = u64; - fn try_from(kind: u64) -> Result { - match kind { - 0 => Ok(SampleKind::Put), - 1 => Ok(SampleKind::Delete), - _ => Err(kind), - } - } -} - /// The global unique id of a zenoh peer. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] @@ -314,6 +276,8 @@ pub enum Priority { } impl Priority { + /// Default + pub const DEFAULT: Self = Self::Data; /// The lowest Priority pub const MIN: Self = Self::Background; /// The highest Priority @@ -354,6 +318,8 @@ pub enum Reliability { } impl Reliability { + pub const DEFAULT: Self = Self::BestEffort; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -374,6 +340,13 @@ pub struct Channel { pub reliability: Reliability, } +impl Channel { + pub const DEFAULT: Self = Self { + priority: Priority::DEFAULT, + reliability: Reliability::DEFAULT, + }; +} + /// The kind of congestion control. #[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] #[repr(u8)] @@ -383,51 +356,6 @@ pub enum CongestionControl { Block = 1, } -/// The subscription mode. -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] -#[repr(u8)] -pub enum SubMode { - #[default] - Push = 0, - Pull = 1, -} - -#[derive(Debug, Clone, PartialEq, Eq, Default)] -pub struct SubInfo { - pub reliability: Reliability, - pub mode: SubMode, -} - -#[derive(Debug, Default, Clone, PartialEq, Eq, Hash)] -pub struct QueryableInfo { - pub complete: u64, // Default 0: incomplete - pub distance: u64, // Default 0: no distance -} - -/// The kind of consolidation. -#[derive(Debug, Clone, PartialEq, Eq, Copy)] -pub enum ConsolidationMode { - /// No consolidation applied: multiple samples may be received for the same key-timestamp. - None, - /// Monotonic consolidation immediately forwards samples, except if one with an equal or more recent timestamp - /// has already been sent with the same key. - /// - /// This optimizes latency while potentially reducing bandwidth. - /// - /// Note that this doesn't cause re-ordering, but drops the samples for which a more recent timestamp has already - /// been observed with the same key. - Monotonic, - /// Holds back samples to only send the set of samples that had the highest timestamp for their key. - Latest, -} - -/// The `zenoh::queryable::Queryable`s that should be target of a `zenoh::Session::get()`. -#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] -pub enum QueryTarget { - #[default] - BestMatching, - All, - AllComplete, - #[cfg(feature = "complete_n")] - Complete(u64), +impl CongestionControl { + pub const DEFAULT: Self = Self::Drop; } diff --git a/commons/zenoh-protocol/src/core/wire_expr.rs b/commons/zenoh-protocol/src/core/wire_expr.rs index 7b0dee7471..6d9623d6ca 100644 --- a/commons/zenoh-protocol/src/core/wire_expr.rs +++ b/commons/zenoh-protocol/src/core/wire_expr.rs @@ -257,7 +257,7 @@ impl WireExpr<'_> { WireExpr { scope, suffix: suffix.into(), - mapping: Mapping::default(), + mapping: Mapping::DEFAULT, } } } diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 76415d52f5..1568029cc6 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -156,6 +156,8 @@ pub enum Mode { } impl Mode { + pub const DEFAULT: Self = Self::Push; + #[cfg(feature = "test")] fn rand() -> Self { use rand::Rng; @@ -344,7 +346,7 @@ pub mod subscriber { /// - if P==1 then the subscription is pull, else it is push /// - rsv: Reserved /// ``` - #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct SubscriberInfo { pub reliability: Reliability, pub mode: Mode, @@ -354,6 +356,11 @@ pub mod subscriber { pub const R: u64 = 1; pub const P: u64 = 1 << 1; + pub const DEFAULT: Self = Self { + reliability: Reliability::DEFAULT, + mode: Mode::DEFAULT, + }; + #[cfg(feature = "test")] pub fn rand() -> Self { let reliability = Reliability::rand(); @@ -363,6 +370,12 @@ pub mod subscriber { } } + impl Default for SubscriberInfo { + fn default() -> Self { + Self::DEFAULT + } + } + impl From for SubscriberInfo { fn from(ext: Info) -> Self { let reliability = if imsg::has_option(ext.value, SubscriberInfo::R) { @@ -502,13 +515,18 @@ pub mod queryable { /// +---------------+ /// ~ distance ~ /// +---------------+ - #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct QueryableInfo { pub complete: u8, // Default 0: incomplete // @TODO: maybe a bitflag pub distance: u32, // Default 0: no distance } impl QueryableInfo { + pub const DEFAULT: Self = Self { + complete: 0, + distance: 0, + }; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -520,6 +538,12 @@ pub mod queryable { } } + impl Default for QueryableInfo { + fn default() -> Self { + Self::DEFAULT + } + } + impl From for QueryableInfo { fn from(ext: Info) -> Self { let complete = ext.value as u8; diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index 1be58db5cc..6807488873 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -51,6 +51,8 @@ pub enum Mapping { } impl Mapping { + pub const DEFAULT: Self = Self::Receiver; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -226,6 +228,16 @@ pub mod ext { const D_FLAG: u8 = 0b00001000; const E_FLAG: u8 = 0b00010000; + pub const DEFAULT: Self = Self::new(Priority::DEFAULT, CongestionControl::DEFAULT, false); + + pub const DECLARE: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); + pub const PUSH: Self = Self::new(Priority::DEFAULT, CongestionControl::Drop, false); + pub const REQUEST: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); + pub const RESPONSE: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); + pub const RESPONSE_FINAL: Self = + Self::new(Priority::DEFAULT, CongestionControl::Block, false); + pub const OAM: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); + pub const fn new( priority: Priority, congestion_control: CongestionControl, @@ -275,35 +287,11 @@ pub mod ext { let inner: u8 = rng.gen(); Self { inner } } - - pub fn declare_default() -> Self { - Self::new(Priority::default(), CongestionControl::Block, false) - } - - pub fn push_default() -> Self { - Self::new(Priority::default(), CongestionControl::Drop, false) - } - - pub fn request_default() -> Self { - Self::new(Priority::default(), CongestionControl::Block, false) - } - - pub fn response_default() -> Self { - Self::new(Priority::default(), CongestionControl::Block, false) - } - - pub fn response_final_default() -> Self { - Self::new(Priority::default(), CongestionControl::Block, false) - } - - pub fn oam_default() -> Self { - Self::new(Priority::default(), CongestionControl::Block, false) - } } impl Default for QoSType<{ ID }> { fn default() -> Self { - Self::new(Priority::default(), CongestionControl::default(), false) + Self::new(Priority::DEFAULT, CongestionControl::DEFAULT, false) } } @@ -371,6 +359,9 @@ pub mod ext { } impl NodeIdType<{ ID }> { + // node_id == 0 means the message has been generated by the node itself + pub const DEFAULT: Self = Self { node_id: 0 }; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -382,8 +373,7 @@ pub mod ext { impl Default for NodeIdType<{ ID }> { fn default() -> Self { - // node_id == 0 means the message has been generated by the node itself - Self { node_id: 0 } + Self::DEFAULT } } diff --git a/commons/zenoh-protocol/src/network/request.rs b/commons/zenoh-protocol/src/network/request.rs index 9e0137ea3a..aba6bb057a 100644 --- a/commons/zenoh-protocol/src/network/request.rs +++ b/commons/zenoh-protocol/src/network/request.rs @@ -66,7 +66,6 @@ pub struct Request { pub mod ext { use crate::{ common::{ZExtZ64, ZExtZBuf}, - core::QueryTarget, zextz64, zextzbuf, }; use core::{num::NonZeroU32, time::Duration}; @@ -88,9 +87,19 @@ pub mod ext { /// +---------------+ /// /// The `zenoh::queryable::Queryable`s that should be target of a `zenoh::Session::get()`. - pub type TargetType = QueryTarget; + #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] + pub enum TargetType { + #[default] + BestMatching, + All, + AllComplete, + #[cfg(feature = "complete_n")] + Complete(u64), + } impl TargetType { + pub const DEFAULT: Self = Self::BestMatching; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::prelude::*; diff --git a/commons/zenoh-protocol/src/transport/mod.rs b/commons/zenoh-protocol/src/transport/mod.rs index cdf994e5dd..307389f8c9 100644 --- a/commons/zenoh-protocol/src/transport/mod.rs +++ b/commons/zenoh-protocol/src/transport/mod.rs @@ -75,13 +75,18 @@ pub enum TransportBodyLowLatency { pub type TransportSn = u32; -#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct PrioritySn { pub reliable: TransportSn, pub best_effort: TransportSn, } impl PrioritySn { + pub const DEFAULT: Self = Self { + reliable: TransportSn::MIN, + best_effort: TransportSn::MIN, + }; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -252,7 +257,8 @@ pub mod ext { } impl QoSType<{ ID }> { - pub const P_MASK: u8 = 0b00000111; + const P_MASK: u8 = 0b00000111; + pub const DEFAULT: Self = Self::new(Priority::DEFAULT); pub const fn new(priority: Priority) -> Self { Self { @@ -276,7 +282,7 @@ pub mod ext { impl Default for QoSType<{ ID }> { fn default() -> Self { - Self::new(Priority::default()) + Self::DEFAULT } } diff --git a/commons/zenoh-protocol/src/zenoh/ack.rs b/commons/zenoh-protocol/src/zenoh/ack.rs deleted file mode 100644 index d40bf58791..0000000000 --- a/commons/zenoh-protocol/src/zenoh/ack.rs +++ /dev/null @@ -1,84 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::common::ZExtUnknown; -use alloc::vec::Vec; -use uhlc::Timestamp; - -/// # Ack message -/// -/// ```text -/// Flags: -/// - T: Timestamp If T==1 then the timestamp if present -/// - X: Reserved -/// - Z: Extension If Z==1 then at least one extension is present -/// -/// 7 6 5 4 3 2 1 0 -/// +-+-+-+-+-+-+-+-+ -/// |Z|X|T| ACK | -/// +-+-+-+---------+ -/// ~ ts: ~ if T==1 -/// +---------------+ -/// ~ [err_exts] ~ if Z==1 -/// +---------------+ -/// ``` -pub mod flag { - pub const T: u8 = 1 << 5; // 0x20 Timestamp if T==0 then the timestamp if present - // pub const X: u8 = 1 << 6; // 0x40 Reserved - pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Ack { - pub timestamp: Option, - pub ext_sinfo: Option, - pub ext_unknown: Vec, -} - -pub mod ext { - use crate::{common::ZExtZBuf, zextzbuf}; - - /// # SourceInfo extension - /// Used to carry additional information about the source of data - pub type SourceInfo = zextzbuf!(0x1, false); - pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; -} - -impl Ack { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use crate::{common::iext, core::ZenohId}; - use rand::Rng; - let mut rng = rand::thread_rng(); - - let timestamp = rng.gen_bool(0.5).then_some({ - let time = uhlc::NTP64(rng.gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); - Timestamp::new(time, id) - }); - let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); - let mut ext_unknown = Vec::new(); - for _ in 0..rng.gen_range(0..4) { - ext_unknown.push(ZExtUnknown::rand2( - iext::mid(ext::SourceInfo::ID) + 1, - false, - )); - } - - Self { - timestamp, - ext_sinfo, - ext_unknown, - } - } -} diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index a23eaa9b21..d73d8cdd06 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -pub mod ack; pub mod del; pub mod err; pub mod pull; @@ -20,7 +19,6 @@ pub mod query; pub mod reply; use crate::core::Encoding; -pub use ack::Ack; pub use del::Del; pub use err::Err; pub use pull::Pull; @@ -35,8 +33,7 @@ pub mod id { pub const QUERY: u8 = 0x03; pub const REPLY: u8 = 0x04; pub const ERR: u8 = 0x05; - pub const ACK: u8 = 0x06; - pub const PULL: u8 = 0x07; + pub const PULL: u8 = 0x06; } // DataInfo @@ -127,7 +124,6 @@ impl From for RequestBody { #[derive(Debug, Clone, PartialEq, Eq)] pub enum ResponseBody { Reply(Reply), - Ack(Ack), Err(Err), Put(Put), } @@ -138,11 +134,10 @@ impl ResponseBody { use rand::Rng; let mut rng = rand::thread_rng(); - match rng.gen_range(0..4) { + match rng.gen_range(0..3) { 0 => ResponseBody::Reply(Reply::rand()), - 1 => ResponseBody::Ack(Ack::rand()), - 2 => ResponseBody::Err(Err::rand()), - 3 => ResponseBody::Put(Put::rand()), + 1 => ResponseBody::Err(Err::rand()), + 2 => ResponseBody::Put(Put::rand()), _ => unreachable!(), } } @@ -160,12 +155,6 @@ impl From for ResponseBody { } } -impl From for ResponseBody { - fn from(r: Ack) -> ResponseBody { - ResponseBody::Ack(r) - } -} - pub mod ext { use zenoh_buffers::ZBuf; diff --git a/commons/zenoh-protocol/src/zenoh/query.rs b/commons/zenoh-protocol/src/zenoh/query.rs index 17dfa23df8..ac53b963f5 100644 --- a/commons/zenoh-protocol/src/zenoh/query.rs +++ b/commons/zenoh-protocol/src/zenoh/query.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::ZExtUnknown, core::ConsolidationMode}; +use crate::common::ZExtUnknown; use alloc::{string::String, vec::Vec}; /// The kind of consolidation. @@ -38,6 +38,8 @@ pub enum Consolidation { } impl Consolidation { + pub const DEFAULT: Self = Self::Auto; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::prelude::SliceRandom; @@ -55,16 +57,6 @@ impl Consolidation { } } -impl From for Consolidation { - fn from(val: ConsolidationMode) -> Self { - match val { - ConsolidationMode::None => Consolidation::None, - ConsolidationMode::Monotonic => Consolidation::Monotonic, - ConsolidationMode::Latest => Consolidation::Latest, - } - } -} - /// # Query message /// /// ```text diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 3e130e0608..b698cbc80b 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -23,7 +23,7 @@ fn main() { env_logger::init(); let args = Args::parse(); - let mut prio = Priority::default(); + let mut prio = Priority::DEFAULT; if let Some(p) = args.priority { prio = p.try_into().unwrap(); } diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index 4139a65a05..a6aad76f7b 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -574,12 +574,12 @@ mod tests { let tmsg: TransportMessage = KeepAlive.into(); let nmsg: NetworkMessage = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::new(Priority::default(), CongestionControl::Block, false), + ext_qos: ext::QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -601,7 +601,7 @@ mod tests { let mut frame = FrameHeader { reliability: Reliability::Reliable, sn: 0, - ext_qos: frame::ext::QoSType::default(), + ext_qos: frame::ext::QoSType::DEFAULT, }; // Serialize with a frame diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index 954c656280..eebf23abc9 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -513,7 +513,7 @@ impl TransmissionPipeline { let mut stage_in = vec![]; let mut stage_out = vec![]; - let default_queue_size = [config.queue_size[Priority::default() as usize]]; + let default_queue_size = [config.queue_size[Priority::DEFAULT as usize]]; let size_iter = if priority.len() == 1 { default_queue_size.iter() } else { @@ -602,7 +602,7 @@ impl TransmissionPipelineProducer { let priority = msg.priority(); (priority as usize, priority) } else { - (0, Priority::default()) + (0, Priority::DEFAULT) }; // Lock the channel. We are the only one that will be writing on it. let mut queue = zlock!(self.stage_in[idx]); @@ -751,10 +751,10 @@ mod tests { wire_expr: key, ext_qos: ext::QoSType::new(Priority::Control, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -881,10 +881,10 @@ mod tests { wire_expr: key, ext_qos: ext::QoSType::new(Priority::Control, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -993,10 +993,10 @@ mod tests { false, ), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 21ed0b3fdf..b24c077c57 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -483,7 +483,7 @@ async fn tx_task( .collect::>(); let (next_sn, ext_qos) = if next_sns.len() == Priority::NUM { let tmp: [PrioritySn; Priority::NUM] = next_sns.try_into().unwrap(); - (PrioritySn::default(), Some(Box::new(tmp))) + (PrioritySn::DEFAULT, Some(Box::new(tmp))) } else { (next_sns[0], None) }; diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index 14f2fd619c..dedef2149c 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -145,7 +145,7 @@ impl TransportMulticastInner { let priority = ext_qos.priority(); let c = if self.is_qos() { &peer.priority_rx[priority as usize] - } else if priority == Priority::default() { + } else if priority == Priority::DEFAULT { &peer.priority_rx[0] } else { bail!( @@ -181,7 +181,7 @@ impl TransportMulticastInner { let priority = ext_qos.priority(); let c = if self.is_qos() { &peer.priority_rx[priority as usize] - } else if priority == Priority::default() { + } else if priority == Priority::DEFAULT { &peer.priority_rx[0] } else { bail!( diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 8b0e93f494..6f98cafc14 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -167,7 +167,6 @@ pub fn map_zmsg_to_shminfo(msg: &mut NetworkMessage) -> ZResult { ResponseBody::Reply(b) => b.map_to_shminfo(), ResponseBody::Put(b) => b.map_to_shminfo(), ResponseBody::Err(b) => b.map_to_shminfo(), - ResponseBody::Ack(_) => Ok(false), }, NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), } @@ -222,7 +221,6 @@ pub fn map_zmsg_to_shmbuf( ResponseBody::Put(b) => b.map_to_shmbuf(shmr), ResponseBody::Err(b) => b.map_to_shmbuf(shmr), ResponseBody::Reply(b) => b.map_to_shmbuf(shmr), - ResponseBody::Ack(_) => Ok(false), }, NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), } diff --git a/io/zenoh-transport/src/unicast/establishment/cookie.rs b/io/zenoh-transport/src/unicast/establishment/cookie.rs index e9916be7e6..0db9e1c93a 100644 --- a/io/zenoh-transport/src/unicast/establishment/cookie.rs +++ b/io/zenoh-transport/src/unicast/establishment/cookie.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -// use super::properties::EstablishmentProperties; use crate::unicast::establishment::ext; use std::convert::TryFrom; use zenoh_buffers::{ diff --git a/io/zenoh-transport/src/unicast/establishment/properties.rs b/io/zenoh-transport/src/unicast/establishment/properties.rs deleted file mode 100644 index e259b650ab..0000000000 --- a/io/zenoh-transport/src/unicast/establishment/properties.rs +++ /dev/null @@ -1,132 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use std::{ - convert::TryFrom, - ops::{Deref, DerefMut}, -}; -use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZBuf}; -use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_protocol::core::Property; -use zenoh_result::{bail, zerror, Error as ZError, ZResult}; - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct EstablishmentProperties(Vec); - -impl Deref for EstablishmentProperties { - type Target = Vec; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for EstablishmentProperties { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl EstablishmentProperties { - pub(super) fn new() -> Self { - EstablishmentProperties(vec![]) - } - - pub(super) fn insert(&mut self, p: Property) -> ZResult<()> { - if self.0.iter().any(|x| x.key == p.key) { - bail!("Property {} already exists", p.key) - } - self.0.push(p); - Ok(()) - } - - pub(super) fn remove(&mut self, key: u64) -> Option { - self.0 - .iter() - .position(|x| x.key == key) - .map(|i| self.0.remove(i)) - } -} - -impl TryFrom<&EstablishmentProperties> for Attachment { - type Error = ZError; - - fn try_from(eps: &EstablishmentProperties) -> Result { - if eps.is_empty() { - bail!("Can not create an attachment with zero properties") - } - - let mut zbuf = ZBuf::empty(); - let mut writer = zbuf.writer(); - let codec = Zenoh080::new(); - - codec - .write(&mut writer, eps.0.as_slice()) - .map_err(|_| zerror!(""))?; - - let attachment = Attachment::new(zbuf); - Ok(attachment) - } -} - -impl TryFrom> for EstablishmentProperties { - type Error = ZError; - - fn try_from(mut ps: Vec) -> Result { - let mut eps = EstablishmentProperties::new(); - for p in ps.drain(..) { - eps.insert(p)?; - } - - Ok(eps) - } -} - -impl TryFrom<&Attachment> for EstablishmentProperties { - type Error = ZError; - - fn try_from(att: &Attachment) -> Result { - let mut reader = att.buffer.reader(); - let codec = Zenoh080::new(); - - let ps: Vec = codec.read(&mut reader).map_err(|_| zerror!(""))?; - EstablishmentProperties::try_from(ps) - } -} - -impl EstablishmentProperties { - #[cfg(test)] - pub fn rand() -> Self { - use rand::Rng; - - const MIN: usize = 1; - const MAX: usize = 8; - - let mut rng = rand::thread_rng(); - - let mut eps = EstablishmentProperties::new(); - for _ in MIN..=MAX { - loop { - let key: u64 = rng.gen(); - let mut value = vec![0u8; rng.gen_range(MIN..=MAX)]; - rng.fill(&mut value[..]); - let p = Property { key, value }; - if eps.insert(p).is_ok() { - break; - } - } - } - - eps - } -} diff --git a/io/zenoh-transport/src/unicast/universal/rx.rs b/io/zenoh-transport/src/unicast/universal/rx.rs index 935a1814b0..04af432aef 100644 --- a/io/zenoh-transport/src/unicast/universal/rx.rs +++ b/io/zenoh-transport/src/unicast/universal/rx.rs @@ -81,7 +81,7 @@ impl TransportUnicastUniversal { let priority = ext_qos.priority(); let c = if self.is_qos() { &self.priority_rx[priority as usize] - } else if priority == Priority::default() { + } else if priority == Priority::DEFAULT { &self.priority_rx[0] } else { bail!( @@ -124,7 +124,7 @@ impl TransportUnicastUniversal { let c = if self.is_qos() { &self.priority_rx[qos.priority() as usize] - } else if qos.priority() == Priority::default() { + } else if qos.priority() == Priority::DEFAULT { &self.priority_rx[0] } else { bail!( diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs index f8e56a5484..4d1196e10f 100644 --- a/io/zenoh-transport/tests/multicast_compression.rs +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -269,11 +269,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -363,7 +363,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index ebb290af1e..fe5a44b7ee 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -265,11 +265,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -359,7 +359,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index 323c6f529e..dd4f55b5f5 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -297,11 +297,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, cctrl, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -442,7 +442,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -472,7 +472,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -505,7 +505,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -535,7 +535,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index d13f763b68..4e90432193 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -194,13 +194,13 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec, endpoint02: Vec, client_transport: TransportUn wire_expr: "test".into(), ext_qos: QoSType::new(*p, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; *ms].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index f9180849af..d12a9db7dc 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -271,13 +271,13 @@ mod tests { let message: NetworkMessage = Push { wire_expr: "test".into(), - ext_qos: QoSType::new(Priority::default(), CongestionControl::Block, false), + ext_qos: QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: sbuf.into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, ext_shm: None, ext_attachment: None, @@ -319,13 +319,13 @@ mod tests { let message: NetworkMessage = Push { wire_expr: "test".into(), - ext_qos: QoSType::new(Priority::default(), CongestionControl::Block, false), + ext_qos: QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: sbuf.into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, ext_shm: None, ext_attachment: None, diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index 19380eb49e..db73e99480 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -78,11 +78,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(Priority::Control, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; MSG_SIZE].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index 11839aef2a..795ea90b41 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -468,11 +468,11 @@ async fn test_transport( wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, cctrl, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -614,7 +614,7 @@ fn transport_unicast_tcp_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -644,7 +644,7 @@ fn transport_unicast_tcp_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -677,7 +677,7 @@ fn transport_unicast_udp_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -707,7 +707,7 @@ fn transport_unicast_udp_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -739,7 +739,7 @@ fn transport_unicast_unix_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -773,7 +773,7 @@ fn transport_unicast_unix_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -808,11 +808,11 @@ fn transport_unicast_ws_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -846,11 +846,11 @@ fn transport_unicast_ws_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -887,7 +887,7 @@ fn transport_unicast_unixpipe_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -921,7 +921,7 @@ fn transport_unicast_unixpipe_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -956,7 +956,7 @@ fn transport_unicast_tcp_udp() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -996,7 +996,7 @@ fn transport_unicast_tcp_unix() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1038,7 +1038,7 @@ fn transport_unicast_udp_unix() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1083,7 +1083,7 @@ fn transport_unicast_tcp_udp_unix() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1130,11 +1130,11 @@ fn transport_unicast_tls_only_server() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1184,11 +1184,11 @@ fn transport_unicast_quic_only_server() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1256,11 +1256,11 @@ fn transport_unicast_tls_only_mutual_success() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1323,11 +1323,11 @@ fn transport_unicast_tls_only_mutual_no_client_certs_failure() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1403,11 +1403,11 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index a2987f8833..83de47779c 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -290,7 +290,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> session: self.session, key_expr: self.key_expr, key_space: crate::LivelinessSpace, - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, origin: Locality::default(), fetch, handler: self.handler, @@ -334,11 +334,11 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> session: self.session, key_expr: self.key_expr, key_space: crate::LivelinessSpace, - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, origin: Locality::default(), query_selector: None, - query_target: QueryTarget::default(), - query_consolidation: QueryConsolidation::default(), + query_target: QueryTarget::DEFAULT, + query_consolidation: QueryConsolidation::DEFAULT, query_accept_replies: ReplyKeyExpr::MatchingQuery, query_timeout: Duration::from_secs(10), handler: self.handler, diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index d2295f9798..36c696000a 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -633,9 +633,9 @@ impl SyncResolve for KeyExprUndeclaration<'_> { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(zenoh_protocol::network::Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareKeyExpr(UndeclareKeyExpr { id: expr_id }), }); diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 0883041bb7..26a803fa43 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -549,7 +549,7 @@ where &Some(KeyExpr::from(*KE_PREFIX_LIVELINESS)), Locality::default(), callback, - &SubscriberInfo::default(), + &SubscriberInfo::DEFAULT, ) .map(|sub_state| Subscriber { subscriber: SubscriberInner { @@ -747,8 +747,8 @@ where .query( &self.key_expr?.into(), &Some(KeyExpr::from(*KE_PREFIX_LIVELINESS)), - QueryTarget::default(), - QueryConsolidation::default(), + QueryTarget::DEFAULT, + QueryConsolidation::DEFAULT, Locality::default(), self.timeout, None, diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index ffe2d3ccca..d6497a80b3 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -593,9 +593,9 @@ pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireE for (key_expr, payload) in route { face.primitives.send_push(Push { wire_expr: key_expr, - ext_qos: ext::QoSType::push_default(), + ext_qos: ext::QoSType::PUSH, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload, }); } diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index a6748650ab..e8e84395f8 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -494,7 +494,6 @@ macro_rules! inc_res_stats { e.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), ); } - ResponseBody::Ack(_) => (), } } } @@ -555,14 +554,14 @@ pub fn route_query( for (wexpr, payload) in local_replies { let payload = ResponseBody::Reply(Reply { - consolidation: Consolidation::default(), // @TODO: handle Del case - ext_unknown: vec![], // @TODO: handle unknown extensions + consolidation: Consolidation::DEFAULT, // @TODO: handle Del case + ext_unknown: vec![], // @TODO: handle unknown extensions payload: ReplyBody::Put(Put { // @TODO: handle Del case - timestamp: None, // @TODO: handle timestamp - encoding: Encoding::default(), // @TODO: handle encoding - ext_sinfo: None, // @TODO: handle source info - ext_attachment: None, // @TODO: expose it in the API + timestamp: None, // @TODO: handle timestamp + encoding: Encoding::DEFAULT, // @TODO: handle encoding + ext_sinfo: None, // @TODO: handle source info + ext_attachment: None, // @TODO: expose it in the API #[cfg(feature = "shared-memory")] ext_shm: None, ext_unknown: vec![], // @TODO: handle unknown extensions @@ -583,7 +582,7 @@ pub fn route_query( rid: qid, wire_expr: wexpr, payload, - ext_qos: response::ext::QoSType::declare_default(), + ext_qos: response::ext::QoSType::DECLARE, ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { zid, @@ -605,7 +604,7 @@ pub fn route_query( .send_response_final(RoutingContext::with_expr( ResponseFinal { rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), + ext_qos: response::ext::QoSType::RESPONSE_FINAL, ext_tstamp: None, }, expr.full_expr().to_string(), @@ -636,7 +635,7 @@ pub fn route_query( Request { id: *qid, wire_expr: key_expr.into(), - ext_qos: ext::QoSType::request_default(), + ext_qos: ext::QoSType::REQUEST, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: *context }, ext_target: *t, @@ -672,7 +671,7 @@ pub fn route_query( Request { id: *qid, wire_expr: key_expr.into(), - ext_qos: ext::QoSType::request_default(), + ext_qos: ext::QoSType::REQUEST, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: *context }, ext_target: target, @@ -693,7 +692,7 @@ pub fn route_query( .send_response_final(RoutingContext::with_expr( ResponseFinal { rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), + ext_qos: response::ext::QoSType::RESPONSE_FINAL, ext_tstamp: None, }, expr.full_expr().to_string(), @@ -711,7 +710,7 @@ pub fn route_query( .send_response_final(RoutingContext::with_expr( ResponseFinal { rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), + ext_qos: response::ext::QoSType::RESPONSE_FINAL, ext_tstamp: None, }, "".to_string(), @@ -758,7 +757,7 @@ pub(crate) fn route_send_response( rid: query.src_qid, wire_expr: key_expr.to_owned(), payload: body, - ext_qos: response::ext::QoSType::response_default(), + ext_qos: response::ext::QoSType::RESPONSE, ext_tstamp: None, ext_respid, }, @@ -818,7 +817,7 @@ pub(crate) fn finalize_pending_query(query: Arc) { .send_response_final(RoutingContext::with_expr( ResponseFinal { rid: query.src_qid, - ext_qos: response::ext::QoSType::response_final_default(), + ext_qos: response::ext::QoSType::RESPONSE_FINAL, ext_tstamp: None, }, "".to_string(), diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 7fc71c623d..fb4dec4ad5 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -466,9 +466,9 @@ impl Resource { .insert(expr_id, nonwild_prefix.clone()); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: expr_id, wire_expr: nonwild_prefix.expr().into(), diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 7becff4b4d..6f71ef443a 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -49,9 +49,9 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, @@ -137,9 +137,9 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: res.expr().into(), @@ -171,9 +171,9 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, @@ -209,9 +209,9 @@ pub(super) fn undeclare_client_subscription( let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 35a10557dc..667ff63c0e 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -96,9 +96,9 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, @@ -166,9 +166,9 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // TODO ext_wire_expr: WireExprType { wire_expr }, @@ -431,9 +431,9 @@ pub(super) fn undeclare_client_subscription( let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // TODO ext_wire_expr: WireExprType { wire_expr }, @@ -467,9 +467,9 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // TODO wire_expr: key_expr, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 6281993c93..03a1e11e67 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -137,7 +137,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, @@ -177,9 +177,9 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, @@ -347,7 +347,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, @@ -373,9 +373,9 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index ae3fda51a7..cf4d201867 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -214,7 +214,7 @@ impl Network { Ok(NetworkBody::OAM(Oam { id: OAM_LINKSTATE, body: ZExtBody::ZBuf(buf), - ext_qos: oam::ext::QoSType::oam_default(), + ext_qos: oam::ext::QoSType::OAM, ext_tstamp: None, }) .into()) diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 8f91335f0a..97677893aa 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -49,9 +49,9 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, @@ -137,9 +137,9 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: res.expr().into(), @@ -171,9 +171,9 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, @@ -209,9 +209,9 @@ pub(super) fn undeclare_client_subscription( let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 35a10557dc..667ff63c0e 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -96,9 +96,9 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, @@ -166,9 +166,9 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, @@ -422,9 +422,9 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, @@ -587,9 +587,9 @@ pub(super) fn undeclare_client_subscription( let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, @@ -623,9 +623,9 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, @@ -650,9 +650,9 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, @@ -790,9 +790,9 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: let wire_expr = Resource::get_best_key(res, "", dst_face.id); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber( UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) @@ -815,9 +815,9 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: }; dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 90944a524f..dfffe42e0d 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -208,7 +208,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, @@ -258,9 +258,9 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, @@ -488,7 +488,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, @@ -514,9 +514,9 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, @@ -785,9 +785,9 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, @@ -884,9 +884,9 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links let wire_expr = Resource::get_best_key(res, "", dst_face.id); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable( UndeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) @@ -908,9 +908,9 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index f6fb13e76e..227dd035f4 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -270,9 +270,9 @@ impl AdminSpace { zlock!(admin.primitives).replace(primitives.clone()); primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: [&root_key, "/**"].concat().into(), @@ -284,13 +284,13 @@ impl AdminSpace { }); primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: [&root_key, "/config/**"].concat().into(), - ext_info: SubscriberInfo::default(), + ext_info: SubscriberInfo::DEFAULT, }), }); } @@ -392,7 +392,7 @@ impl Primitives for AdminSpace { ); primitives.send_response_final(ResponseFinal { rid: msg.id, - ext_qos: ext::QoSType::response_final_default(), + ext_qos: ext::QoSType::RESPONSE_FINAL, ext_tstamp: None, }); return; @@ -405,7 +405,7 @@ impl Primitives for AdminSpace { log::error!("Unknown KeyExpr: {}", e); primitives.send_response_final(ResponseFinal { rid: msg.id, - ext_qos: ext::QoSType::response_final_default(), + ext_qos: ext::QoSType::RESPONSE_FINAL, ext_tstamp: None, }); return; diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 363803f682..57f6a6dcbc 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -497,9 +497,9 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 11, wire_expr: "test/client".into(), @@ -523,9 +523,9 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 12, wire_expr: WireExpr::from(11).with_suffix("/z1_pub1"), @@ -544,9 +544,9 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 21, wire_expr: "test/client".into(), @@ -570,9 +570,9 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 22, wire_expr: WireExpr::from(21).with_suffix("/z2_pub1"), @@ -591,9 +591,9 @@ fn client_test() { Primitives::send_declare( primitives2.as_ref(), Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 31, wire_expr: "test/client".into(), @@ -617,10 +617,10 @@ fn client_test() { &tables, &face0.upgrade().unwrap(), &"test/client/z1_wr1".into(), - ext::QoSType::default(), + ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -650,10 +650,10 @@ fn client_test() { &router.tables, &face0.upgrade().unwrap(), &WireExpr::from(11).with_suffix("/z1_wr2"), - ext::QoSType::default(), + ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -683,10 +683,10 @@ fn client_test() { &router.tables, &face1.upgrade().unwrap(), &"test/client/**".into(), - ext::QoSType::default(), + ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -716,10 +716,10 @@ fn client_test() { &router.tables, &face0.upgrade().unwrap(), &12.into(), - ext::QoSType::default(), + ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -749,10 +749,10 @@ fn client_test() { &router.tables, &face1.upgrade().unwrap(), &22.into(), - ext::QoSType::default(), + ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 36a841d1ef..ad28470f63 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -50,9 +50,7 @@ pub(crate) mod common { pub use crate::sample::Locality; #[cfg(not(feature = "unstable"))] pub(crate) use crate::sample::Locality; - pub use crate::sample::Sample; - - pub use zenoh_protocol::core::SampleKind; + pub use crate::sample::{Sample, SampleKind}; pub use crate::publication::Priority; #[zenoh_macros::unstable] diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 843190ad45..58c7c5c367 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -811,7 +811,7 @@ fn resolve_put( false, ), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: match kind { SampleKind::Put => { #[allow(unused_mut)] @@ -887,6 +887,8 @@ pub enum Priority { } impl Priority { + /// Default + pub const DEFAULT: Self = Self::Data; /// The lowest Priority pub const MIN: Self = Self::Background; /// The highest Priority @@ -1328,7 +1330,6 @@ mod tests { #[test] fn sample_kind_integrity_in_publication() { use crate::{open, prelude::sync::*}; - use zenoh_protocol::core::SampleKind; const KEY_EXPR: &str = "test/sample_kind_integrity/publication"; const VALUE: &str = "zenoh"; @@ -1351,7 +1352,6 @@ mod tests { #[test] fn sample_kind_integrity_in_put_builder() { use crate::{open, prelude::sync::*}; - use zenoh_protocol::core::SampleKind; const KEY_EXPR: &str = "test/sample_kind_integrity/put_builder"; const VALUE: &str = "zenoh"; diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index c4f3fb35e9..7a7a867cd8 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -13,7 +13,6 @@ // //! Query primitives. - use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; #[zenoh_macros::unstable] @@ -23,13 +22,38 @@ use std::collections::HashMap; use std::future::Ready; use std::time::Duration; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_protocol::zenoh::query::Consolidation; use zenoh_result::ZResult; /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). -pub use zenoh_protocol::core::QueryTarget; +pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; /// The kind of consolidation. -pub use zenoh_protocol::core::ConsolidationMode; +#[derive(Debug, Clone, PartialEq, Eq, Copy)] +pub enum ConsolidationMode { + /// No consolidation applied: multiple samples may be received for the same key-timestamp. + None, + /// Monotonic consolidation immediately forwards samples, except if one with an equal or more recent timestamp + /// has already been sent with the same key. + /// + /// This optimizes latency while potentially reducing bandwidth. + /// + /// Note that this doesn't cause re-ordering, but drops the samples for which a more recent timestamp has already + /// been observed with the same key. + Monotonic, + /// Holds back samples to only send the set of samples that had the highest timestamp for their key. + Latest, +} + +impl From for Consolidation { + fn from(val: ConsolidationMode) -> Self { + match val { + ConsolidationMode::None => Consolidation::None, + ConsolidationMode::Monotonic => Consolidation::Monotonic, + ConsolidationMode::Latest => Consolidation::Latest, + } + } +} /// The operation: either manual or automatic. #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -45,6 +69,7 @@ pub struct QueryConsolidation { } impl QueryConsolidation { + pub const DEFAULT: Self = Self::AUTO; /// Automatic query consolidation strategy selection. pub const AUTO: Self = Self { mode: Mode::Auto }; @@ -72,7 +97,7 @@ impl From for QueryConsolidation { impl Default for QueryConsolidation { fn default() -> Self { - QueryConsolidation::AUTO + Self::DEFAULT } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 4e9f4914dd..d0ce99b512 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -56,7 +56,7 @@ impl Drop for QueryInner { fn drop(&mut self) { self.primitives.send_response_final(ResponseFinal { rid: self.qid, - ext_qos: response::ext::QoSType::response_final_default(), + ext_qos: response::ext::QoSType::RESPONSE_FINAL, ext_tstamp: None, }); } @@ -241,7 +241,7 @@ impl SyncResolve for ReplyBuilder<'_> { mapping: Mapping::Sender, }, payload: ResponseBody::Reply(zenoh::Reply { - consolidation: zenoh::Consolidation::default(), + consolidation: zenoh::Consolidation::DEFAULT, ext_unknown: vec![], payload: match kind { SampleKind::Put => ReplyBody::Put(Put { @@ -262,7 +262,7 @@ impl SyncResolve for ReplyBuilder<'_> { }), }, }), - ext_qos: response::ext::QoSType::response_default(), + ext_qos: response::ext::QoSType::RESPONSE, ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { zid: self.query.inner.zid, @@ -292,7 +292,7 @@ impl SyncResolve for ReplyBuilder<'_> { }), code: 0, // TODO }), - ext_qos: response::ext::QoSType::response_default(), + ext_qos: response::ext::QoSType::RESPONSE, ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { zid: self.query.inner.zid, diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 5d707e5936..d41e8c83a1 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -14,13 +14,15 @@ //! Sample primitives use crate::buffers::ZBuf; -use crate::prelude::ZenohId; -use crate::prelude::{KeyExpr, SampleKind, Value}; +use crate::prelude::{KeyExpr, Value, ZenohId}; use crate::query::Reply; use crate::time::{new_reception_timestamp, Timestamp}; #[zenoh_macros::unstable] use serde::Serialize; -use std::convert::{TryFrom, TryInto}; +use std::{ + convert::{TryFrom, TryInto}, + fmt, +}; use zenoh_protocol::core::Encoding; pub type SourceSn = u64; @@ -311,6 +313,38 @@ mod attachment { } } } + +/// The kind of a `Sample`. +#[repr(u8)] +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] +pub enum SampleKind { + /// if the `Sample` was issued by a `put` operation. + #[default] + Put = 0, + /// if the `Sample` was issued by a `delete` operation. + Delete = 1, +} + +impl fmt::Display for SampleKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SampleKind::Put => write!(f, "PUT"), + SampleKind::Delete => write!(f, "DELETE"), + } + } +} + +impl TryFrom for SampleKind { + type Error = u64; + fn try_from(kind: u64) -> Result { + match kind { + 0 => Ok(SampleKind::Put), + 1 => Ok(SampleKind::Delete), + _ => Err(kind), + } + } +} + #[zenoh_macros::unstable] pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 46cfd5e499..329e44e43f 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -296,7 +296,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { SubscriberBuilder { session: self.clone(), key_expr: TryIntoKeyExpr::try_into(key_expr).map_err(Into::into), - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, mode: PushMode, origin: Locality::default(), handler: DefaultHandler, @@ -329,8 +329,8 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { PublisherBuilder { session: self.clone(), key_expr: key_expr.try_into().map_err(Into::into), - congestion_control: CongestionControl::default(), - priority: Priority::default(), + congestion_control: CongestionControl::DEFAULT, + priority: Priority::DEFAULT, destination: Locality::default(), } } @@ -775,8 +775,8 @@ impl Session { session: self, selector, scope: Ok(None), - target: QueryTarget::default(), - consolidation: QueryConsolidation::default(), + target: QueryTarget::DEFAULT, + consolidation: QueryConsolidation::DEFAULT, destination: Locality::default(), timeout, value: None, @@ -858,9 +858,9 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: expr_id, wire_expr: WireExpr { @@ -1059,9 +1059,9 @@ impl Session { // }; primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: id as u32, wire_expr: key_expr.to_wire(self).to_owned(), @@ -1124,9 +1124,9 @@ impl Session { let wire_expr = WireExpr::from(join_sub).to_owned(); drop(state); primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, @@ -1149,9 +1149,9 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { @@ -1205,9 +1205,9 @@ impl Session { distance: 0, }; primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: id as u32, wire_expr: key_expr.to_owned(), @@ -1233,9 +1233,9 @@ impl Session { distance: 0, }; primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: id as u32, wire_expr: key_expr.to_owned(), @@ -1298,9 +1298,9 @@ impl Session { distance: 0, }; primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: qable_state.key_expr.clone(), @@ -1317,9 +1317,9 @@ impl Session { distance: 0, }; primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: qable_state.key_expr.clone(), @@ -1333,9 +1333,9 @@ impl Session { // There are no more Queryables on the same KeyExpr. drop(state); primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) ext_wire_expr: WireExprType { @@ -1369,13 +1369,13 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: id as u32, wire_expr: key_expr.to_wire(self).to_owned(), - ext_info: SubscriberInfo::default(), + ext_info: SubscriberInfo::DEFAULT, }), }); Ok(tok_state) @@ -1393,9 +1393,9 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { @@ -1698,10 +1698,10 @@ impl Session { primitives.send_request(Request { id: 0, // @TODO compute a proper request ID wire_expr: key_expr.to_wire(self).to_owned(), - ext_qos: ext::QoSType::request_default(), + ext_qos: ext::QoSType::REQUEST, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - ext_target: request::ext::TargetType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, + ext_target: request::ext::TargetType::DEFAULT, ext_budget: None, ext_timeout: None, payload: RequestBody::Pull(Pull { @@ -1801,9 +1801,9 @@ impl Session { primitives.send_request(Request { id: qid, wire_expr: wexpr.clone(), - ext_qos: request::ext::QoSType::request_default(), + ext_qos: request::ext::QoSType::REQUEST, ext_tstamp: None, - ext_nodeid: request::ext::NodeIdType::default(), + ext_nodeid: request::ext::NodeIdType::DEFAULT, ext_target: target, ext_budget: None, ext_timeout: Some(timeout), @@ -1959,7 +1959,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { SubscriberBuilder { session: SessionRef::Shared(self.clone()), key_expr: key_expr.try_into().map_err(Into::into), - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, mode: PushMode, origin: Locality::default(), handler: DefaultHandler, @@ -2040,8 +2040,8 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { PublisherBuilder { session: SessionRef::Shared(self.clone()), key_expr: key_expr.try_into().map_err(Into::into), - congestion_control: CongestionControl::default(), - priority: Priority::default(), + congestion_control: CongestionControl::DEFAULT, + priority: Priority::DEFAULT, destination: Locality::default(), } } @@ -2247,11 +2247,6 @@ impl Primitives for Session { fn send_response(&self, msg: Response) { trace!("recv Response {:?}", msg); match msg.payload { - ResponseBody::Ack(_) => { - log::warn!( - "Received a ResponseBody::Ack, but this isn't supported yet. Dropping message." - ) - } ResponseBody::Put(_) => { log::warn!( "Received a ResponseBody::Put, but this isn't supported yet. Dropping message." diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 7258833d28..fe2236076f 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -25,9 +25,6 @@ use std::sync::Arc; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_protocol::network::declare::{subscriber::ext::SubscriberInfo, Mode}; -/// The subscription mode. -pub use zenoh_protocol::core::SubMode; - /// The kind of reliability. pub use zenoh_protocol::core::Reliability; @@ -117,7 +114,6 @@ impl<'a> PullSubscriberInner<'a> { /// ``` /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; - /// use zenoh::subscriber::SubMode; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session @@ -252,12 +248,6 @@ impl Drop for SubscriberInner<'_> { #[derive(Debug, Clone, Copy)] pub struct PullMode; -impl From for SubMode { - fn from(_: PullMode) -> Self { - SubMode::Pull - } -} - impl From for Mode { fn from(_: PullMode) -> Self { Mode::Pull @@ -269,12 +259,6 @@ impl From for Mode { #[derive(Debug, Clone, Copy)] pub struct PushMode; -impl From for SubMode { - fn from(_: PushMode) -> Self { - SubMode::Push - } -} - impl From for Mode { fn from(_: PushMode) -> Self { Mode::Push @@ -712,7 +696,6 @@ impl<'a, Receiver> PullSubscriber<'a, Receiver> { /// ``` /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; - /// use zenoh::subscriber::SubMode; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session From cc8d4a1f93f358ef3a951e0ae0fe27c5b3e41171 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 26 Feb 2024 12:30:09 +0100 Subject: [PATCH 003/598] Allow DeclareInterest for any keyexpr (#739) * Allow to DeclareInterest for any keyexpr * Remove forgotten println --- commons/zenoh-codec/src/network/declare.rs | 41 +-- commons/zenoh-protocol/src/network/declare.rs | 283 ++++++++++++++---- zenoh/src/net/routing/mod.rs | 2 +- 3 files changed, 249 insertions(+), 77 deletions(-) diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index cf92b27c17..6df25a8d2a 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -24,6 +24,7 @@ use zenoh_protocol::{ network::{ declare::{ self, common, interest, keyexpr, queryable, subscriber, token, Declare, DeclareBody, + Interest, }, id, Mapping, }, @@ -845,24 +846,20 @@ where fn write(self, writer: &mut W, x: &interest::DeclareInterest) -> Self::Output { let interest::DeclareInterest { id, + interest: _, wire_expr, - interest, } = x; // Header - let mut header = declare::id::D_INTEREST; - if wire_expr.mapping != Mapping::DEFAULT { - header |= subscriber::flag::M; - } - if wire_expr.has_suffix() { - header |= subscriber::flag::N; - } + let header = declare::id::D_INTEREST | x.flags(); self.write(&mut *writer, header)?; // Body self.write(&mut *writer, id)?; - self.write(&mut *writer, wire_expr)?; - self.write(&mut *writer, interest.as_u8())?; + self.write(&mut *writer, x.options())?; + if let Some(we) = wire_expr.as_ref() { + self.write(&mut *writer, we)?; + } Ok(()) } @@ -894,14 +891,20 @@ where // Body let id: interest::InterestId = self.codec.read(&mut *reader)?; - let ccond = Zenoh080Condition::new(imsg::has_flag(self.header, token::flag::N)); - let mut wire_expr: WireExpr<'static> = ccond.read(&mut *reader)?; - wire_expr.mapping = if imsg::has_flag(self.header, token::flag::M) { - Mapping::Sender - } else { - Mapping::Receiver - }; - let interest: u8 = self.codec.read(&mut *reader)?; + let options: u8 = self.codec.read(&mut *reader)?; + let interest = Interest::from((imsg::flags(self.header), options)); + + let mut wire_expr = None; + if interest.restricted() { + let ccond = Zenoh080Condition::new(interest.named()); + let mut we: WireExpr<'static> = ccond.read(&mut *reader)?; + we.mapping = if interest.mapping() { + Mapping::Sender + } else { + Mapping::Receiver + }; + wire_expr = Some(we); + } // Extensions let has_ext = imsg::has_flag(self.header, token::flag::Z); @@ -911,8 +914,8 @@ where Ok(interest::DeclareInterest { id, + interest, wire_expr, - interest: interest.into(), }) } } diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 1568029cc6..8164d9440d 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -18,7 +18,6 @@ use crate::{ zextz64, zextzbuf, }; use alloc::borrow::Cow; -use core::ops::BitOr; pub use interest::*; pub use keyexpr::*; pub use queryable::*; @@ -703,13 +702,18 @@ pub mod token { } pub mod interest { + use core::{ + fmt::{self, Debug}, + ops::{Add, AddAssign, Sub, SubAssign}, + }; + use super::*; pub type InterestId = u32; pub mod flag { - pub const N: u8 = 1 << 5; // 0x20 Named if N==1 then the key expr has name/suffix - pub const M: u8 = 1 << 6; // 0x40 Mapping if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver + pub const C: u8 = 1 << 5; // 0x20 Current if C==1 then the interest refers to the current declarations. + pub const F: u8 = 1 << 6; // 0x40 Future if F==1 then the interest refers to the future declarations. pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } @@ -753,21 +757,23 @@ pub mod interest { /// /// ```text /// Flags: - /// - N: Named If N==1 then the key expr has name/suffix - /// - M: Mapping if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver + /// - C: Current if C==1 then the interest refers to the current declarations. + /// - F: Future if F==1 then the interest refers to the future declarations. Note that if F==0 then: + /// - Declarations SHOULD NOT be sent after the FinalInterest; + /// - UndeclareInterest SHOULD NOT be sent after the FinalInterest. /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ - /// |Z|M|N| D_INT | + /// |Z|F|C| D_INT | /// +---------------+ /// ~ intst_id:z32 ~ /// +---------------+ - /// ~ key_scope:z16 ~ + /// |A|M|N|R|T|Q|S|K| (*) /// +---------------+ - /// ~ key_suffix ~ if N==1 -- + /// ~ key_scope:z16 ~ if R==1 /// +---------------+ - /// |A|F|C|X|T|Q|S|K| (*) + /// ~ key_suffix ~ if R==1 && N==1 -- /// +---------------+ /// ~ [decl_exts] ~ if Z==1 /// +---------------+ @@ -776,63 +782,141 @@ pub mod interest { /// - if S==1 then the interest refers to subscribers /// - if Q==1 then the interest refers to queryables /// - if T==1 then the interest refers to tokens - /// - if C==1 then the interest refers to the current declarations. - /// - if F==1 then the interest refers to the future declarations. Note that if F==0 then: - /// - replies SHOULD NOT be sent after the FinalInterest; - /// - UndeclareInterest SHOULD NOT be sent after the FinalInterest. + /// - if R==1 then the interest is restricted to the matching key expression, else it is for all key expressions. + /// - if N==1 then the key expr has name/suffix. If R==0 then N should be set to 0. + /// - if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver. + /// If R==0 then M should be set to 0. /// - if A==1 then the replies SHOULD be aggregated /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct DeclareInterest { pub id: InterestId, - pub wire_expr: WireExpr<'static>, pub interest: Interest, + pub wire_expr: Option>, } - #[repr(transparent)] - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Interest(u8); + impl DeclareInterest { + pub fn flags(&self) -> u8 { + let mut interest = self.interest; + if self.interest.current() { + interest += Interest::CURRENT; + } + if self.interest.future() { + interest += Interest::FUTURE; + } + interest.flags + } + + pub fn options(&self) -> u8 { + let mut interest = self.interest; + if let Some(we) = self.wire_expr.as_ref() { + interest += Interest::RESTRICTED; + if we.has_suffix() { + interest += Interest::NAMED; + } + if let Mapping::Sender = we.mapping { + interest += Interest::MAPPING; + } + } + interest.options + } + + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + let mut rng = rand::thread_rng(); + + let id: InterestId = rng.gen(); + let wire_expr = rng.gen_bool(0.5).then_some(WireExpr::rand()); + let interest = Interest::rand(); + + Self { + id, + wire_expr, + interest, + } + } + } + + #[derive(Clone, Copy)] + pub struct Interest { + flags: u8, + options: u8, + } impl Interest { - pub const KEYEXPRS: Interest = Interest(1); - pub const SUBSCRIBERS: Interest = Interest(1 << 1); - pub const QUERYABLES: Interest = Interest(1 << 2); - pub const TOKENS: Interest = Interest(1 << 3); - // pub const X: Interest = Interest(1 << 4); - pub const CURRENT: Interest = Interest(1 << 5); - pub const FUTURE: Interest = Interest(1 << 6); - pub const AGGREGATE: Interest = Interest(1 << 7); + // Header + pub const CURRENT: Interest = Interest::flags(interest::flag::C); + pub const FUTURE: Interest = Interest::flags(interest::flag::F); + // Flags + pub const KEYEXPRS: Interest = Interest::options(1); + pub const SUBSCRIBERS: Interest = Interest::options(1 << 1); + pub const QUERYABLES: Interest = Interest::options(1 << 2); + pub const TOKENS: Interest = Interest::options(1 << 3); + const RESTRICTED: Interest = Interest::options(1 << 4); + const NAMED: Interest = Interest::options(1 << 5); + const MAPPING: Interest = Interest::options(1 << 6); + pub const AGGREGATE: Interest = Interest::options(1 << 7); + pub const ALL: Interest = Interest::options( + Interest::KEYEXPRS.options + | Interest::SUBSCRIBERS.options + | Interest::QUERYABLES.options + | Interest::TOKENS.options, + ); + + const fn flags(flags: u8) -> Self { + Self { flags, options: 0 } + } + + const fn options(options: u8) -> Self { + Self { flags: 0, options } + } + + pub const fn empty() -> Self { + Self { + flags: 0, + options: 0, + } + } + + pub const fn current(&self) -> bool { + imsg::has_flag(self.flags, Self::CURRENT.flags) + } + + pub const fn future(&self) -> bool { + imsg::has_flag(self.flags, Self::FUTURE.flags) + } pub const fn keyexprs(&self) -> bool { - imsg::has_flag(self.0, Self::KEYEXPRS.0) + imsg::has_flag(self.options, Self::KEYEXPRS.options) } pub const fn subscribers(&self) -> bool { - imsg::has_flag(self.0, Self::SUBSCRIBERS.0) + imsg::has_flag(self.options, Self::SUBSCRIBERS.options) } pub const fn queryables(&self) -> bool { - imsg::has_flag(self.0, Self::QUERYABLES.0) + imsg::has_flag(self.options, Self::QUERYABLES.options) } pub const fn tokens(&self) -> bool { - imsg::has_flag(self.0, Self::TOKENS.0) + imsg::has_flag(self.options, Self::TOKENS.options) } - pub const fn current(&self) -> bool { - imsg::has_flag(self.0, Self::CURRENT.0) + pub const fn restricted(&self) -> bool { + imsg::has_flag(self.options, Self::RESTRICTED.options) } - pub const fn future(&self) -> bool { - imsg::has_flag(self.0, Self::FUTURE.0) + pub const fn named(&self) -> bool { + imsg::has_flag(self.options, Self::NAMED.options) } - pub const fn aggregate(&self) -> bool { - imsg::has_flag(self.0, Self::AGGREGATE.0) + pub const fn mapping(&self) -> bool { + imsg::has_flag(self.options, Self::MAPPING.options) } - pub const fn as_u8(&self) -> u8 { - self.0 + pub const fn aggregate(&self) -> bool { + imsg::has_flag(self.options, Self::AGGREGATE.options) } #[cfg(feature = "test")] @@ -840,44 +924,129 @@ pub mod interest { use rand::Rng; let mut rng = rand::thread_rng(); - let inner: u8 = rng.gen(); + let mut s = Self::empty(); + if rng.gen_bool(0.5) { + s += Interest::CURRENT; + } + if rng.gen_bool(0.5) { + s += Interest::FUTURE; + } + if rng.gen_bool(0.5) { + s += Interest::KEYEXPRS; + } + if rng.gen_bool(0.5) { + s += Interest::SUBSCRIBERS; + } + if rng.gen_bool(0.5) { + s += Interest::TOKENS; + } + if rng.gen_bool(0.5) { + s += Interest::AGGREGATE; + } + s + } + } - Self(inner) + impl PartialEq for Interest { + fn eq(&self, other: &Self) -> bool { + self.current() == other.current() + && self.future() == other.future() + && self.keyexprs() == other.keyexprs() + && self.subscribers() == other.subscribers() + && self.queryables() == other.queryables() + && self.tokens() == other.tokens() + && self.aggregate() == other.aggregate() } } - impl BitOr for Interest { + impl Debug for Interest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Interest {{ ")?; + if self.current() { + write!(f, "C:Y, ")?; + } else { + write!(f, "C:N, ")?; + } + if self.future() { + write!(f, "F:Y, ")?; + } else { + write!(f, "F:N, ")?; + } + if self.keyexprs() { + write!(f, "K:Y, ")?; + } else { + write!(f, "K:N, ")?; + } + if self.subscribers() { + write!(f, "S:Y, ")?; + } else { + write!(f, "S:N, ")?; + } + if self.queryables() { + write!(f, "Q:Y, ")?; + } else { + write!(f, "Q:N, ")?; + } + if self.tokens() { + write!(f, "T:Y, ")?; + } else { + write!(f, "T:N, ")?; + } + if self.aggregate() { + write!(f, "A:Y")?; + } else { + write!(f, "A:N")?; + } + write!(f, " }}")?; + Ok(()) + } + } + + impl Eq for Interest {} + + impl Add for Interest { type Output = Self; - fn bitor(self, rhs: Self) -> Self::Output { - Self(self.0 | rhs.0) + fn add(self, rhs: Self) -> Self::Output { + Self { + flags: self.flags | rhs.flags, + options: self.options | rhs.options, + } } } - impl From for Interest { - fn from(v: u8) -> Self { - Self(v) + impl AddAssign for Interest { + fn add_assign(&mut self, rhs: Self) { + self.flags |= rhs.flags; + self.options |= rhs.options; } } - impl DeclareInterest { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let id: InterestId = rng.gen(); - let wire_expr = WireExpr::rand(); - let interest = Interest::rand(); + impl Sub for Interest { + type Output = Self; + fn sub(self, rhs: Self) -> Self::Output { Self { - id, - wire_expr, - interest, + flags: self.flags & !rhs.flags, + options: self.options & !rhs.options, } } } + impl SubAssign for Interest { + fn sub_assign(&mut self, rhs: Self) { + self.flags &= !rhs.flags; + self.options &= !rhs.options; + } + } + + impl From<(u8, u8)> for Interest { + fn from(value: (u8, u8)) -> Self { + let (flags, options) = value; + Self { flags, options } + } + } + /// ```text /// Flags: /// - X: Reserved diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index 0b069c1337..8147cca31c 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -115,7 +115,7 @@ impl RoutingContext { DeclareBody::UndeclareQueryable(m) => Some(&m.ext_wire_expr.wire_expr), DeclareBody::DeclareToken(m) => Some(&m.wire_expr), DeclareBody::UndeclareToken(m) => Some(&m.ext_wire_expr.wire_expr), - DeclareBody::DeclareInterest(m) => Some(&m.wire_expr), + DeclareBody::DeclareInterest(m) => m.wire_expr.as_ref(), DeclareBody::FinalInterest(_) => None, DeclareBody::UndeclareInterest(m) => Some(&m.ext_wire_expr.wire_expr), }, From 24e5ef573f3454f7bfea2eb86467b28113ffc6dc Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 28 Feb 2024 10:31:45 +0100 Subject: [PATCH 004/598] ConsolidationMode can be Auto (#738) * ConsolidationMode rework * Fix QueryConsolidation::DEFAULT --- commons/zenoh-codec/src/zenoh/query.rs | 2 - commons/zenoh-protocol/src/zenoh/query.rs | 16 +++----- zenoh/src/query.rs | 45 ++++------------------- zenoh/src/session.rs | 10 ++--- 4 files changed, 18 insertions(+), 55 deletions(-) diff --git a/commons/zenoh-codec/src/zenoh/query.rs b/commons/zenoh-codec/src/zenoh/query.rs index 55f25cd5ea..efac7b5671 100644 --- a/commons/zenoh-codec/src/zenoh/query.rs +++ b/commons/zenoh-codec/src/zenoh/query.rs @@ -39,7 +39,6 @@ where Consolidation::None => 1, Consolidation::Monotonic => 2, Consolidation::Latest => 3, - Consolidation::Unique => 4, }; self.write(&mut *writer, v) } @@ -58,7 +57,6 @@ where 1 => Consolidation::None, 2 => Consolidation::Monotonic, 3 => Consolidation::Latest, - 4 => Consolidation::Unique, _ => Consolidation::Auto, // Fallback on Auto if Consolidation is unknown }; Ok(c) diff --git a/commons/zenoh-protocol/src/zenoh/query.rs b/commons/zenoh-protocol/src/zenoh/query.rs index ac53b963f5..f1baaebe20 100644 --- a/commons/zenoh-protocol/src/zenoh/query.rs +++ b/commons/zenoh-protocol/src/zenoh/query.rs @@ -33,8 +33,8 @@ pub enum Consolidation { Monotonic, /// Holds back samples to only send the set of samples that had the highest timestamp for their key. Latest, - /// Remove the duplicates of any samples based on the their timestamp. - Unique, + // Remove the duplicates of any samples based on the their timestamp. + // Unique, } impl Consolidation { @@ -45,15 +45,9 @@ impl Consolidation { use rand::prelude::SliceRandom; let mut rng = rand::thread_rng(); - *[ - Self::None, - Self::Monotonic, - Self::Latest, - Self::Unique, - Self::Auto, - ] - .choose(&mut rng) - .unwrap() + *[Self::None, Self::Monotonic, Self::Latest, Self::Auto] + .choose(&mut rng) + .unwrap() } } diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 7a7a867cd8..a848913c7a 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -22,38 +22,13 @@ use std::collections::HashMap; use std::future::Ready; use std::time::Duration; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -use zenoh_protocol::zenoh::query::Consolidation; use zenoh_result::ZResult; /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; /// The kind of consolidation. -#[derive(Debug, Clone, PartialEq, Eq, Copy)] -pub enum ConsolidationMode { - /// No consolidation applied: multiple samples may be received for the same key-timestamp. - None, - /// Monotonic consolidation immediately forwards samples, except if one with an equal or more recent timestamp - /// has already been sent with the same key. - /// - /// This optimizes latency while potentially reducing bandwidth. - /// - /// Note that this doesn't cause re-ordering, but drops the samples for which a more recent timestamp has already - /// been observed with the same key. - Monotonic, - /// Holds back samples to only send the set of samples that had the highest timestamp for their key. - Latest, -} - -impl From for Consolidation { - fn from(val: ConsolidationMode) -> Self { - match val { - ConsolidationMode::None => Consolidation::None, - ConsolidationMode::Monotonic => Consolidation::Monotonic, - ConsolidationMode::Latest => Consolidation::Latest, - } - } -} +pub type ConsolidationMode = zenoh_protocol::zenoh::query::Consolidation; /// The operation: either manual or automatic. #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -65,30 +40,26 @@ pub enum Mode { /// The replies consolidation strategy to apply on replies to a [`get`](Session::get). #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct QueryConsolidation { - pub(crate) mode: Mode, + pub(crate) mode: ConsolidationMode, } impl QueryConsolidation { pub const DEFAULT: Self = Self::AUTO; /// Automatic query consolidation strategy selection. - pub const AUTO: Self = Self { mode: Mode::Auto }; + pub const AUTO: Self = Self { + mode: ConsolidationMode::Auto, + }; pub(crate) const fn from_mode(mode: ConsolidationMode) -> Self { - Self { - mode: Mode::Manual(mode), - } + Self { mode } } /// Returns the requested [`ConsolidationMode`]. - pub fn mode(&self) -> Mode { + pub fn mode(&self) -> ConsolidationMode { self.mode } } -impl From> for QueryConsolidation { - fn from(mode: Mode) -> Self { - Self { mode } - } -} + impl From for QueryConsolidation { fn from(mode: ConsolidationMode) -> Self { Self::from_mode(mode) diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 329e44e43f..efb7756ba4 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -1728,14 +1728,14 @@ impl Session { log::trace!("get({}, {:?}, {:?})", selector, target, consolidation); let mut state = zwrite!(self.state); let consolidation = match consolidation.mode { - Mode::Auto => { + ConsolidationMode::Auto => { if selector.decode().any(|(k, _)| k.as_ref() == TIME_RANGE_KEY) { ConsolidationMode::None } else { ConsolidationMode::Latest } } - Mode::Manual(mode) => mode, + mode => mode, }; let qid = state.qid_counter.fetch_add(1, Ordering::SeqCst); let nb_final = match destination { @@ -1808,7 +1808,7 @@ impl Session { ext_budget: None, ext_timeout: Some(timeout), payload: RequestBody::Query(zenoh_protocol::zenoh::Query { - consolidation: consolidation.into(), + consolidation, parameters: selector.parameters().to_string(), ext_sinfo: None, ext_body: value.as_ref().map(|v| query::ext::QueryBodyType { @@ -1829,7 +1829,7 @@ impl Session { selector.parameters(), qid, target, - consolidation.into(), + consolidation, value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, @@ -2441,7 +2441,7 @@ impl Primitives for Session { } } } - ConsolidationMode::Latest => { + Consolidation::Auto | ConsolidationMode::Latest => { match query.replies.as_ref().unwrap().get( new_reply.sample.as_ref().unwrap().key_expr.as_keyexpr(), ) { From e41f768b2b32d0893839807d0c2208e96d71709a Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 28 Feb 2024 12:14:02 +0100 Subject: [PATCH 005/598] Fix bug building reply --- zenoh/src/queryable.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index d0ce99b512..c802c29689 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -214,8 +214,6 @@ impl SyncResolve for ReplyBuilder<'_> { let mut ext_attachment = None; #[cfg(feature = "unstable")] { - data_info.source_id = source_info.source_id; - data_info.source_sn = source_info.source_sn; if let Some(attachment) = attachment { ext_attachment = Some(attachment.into()); } @@ -224,6 +222,11 @@ impl SyncResolve for ReplyBuilder<'_> { }}; } + #[cfg(feature = "unstable")] + { + data_info.source_id = source_info.source_id; + data_info.source_sn = source_info.source_sn; + } let ext_sinfo = if data_info.source_id.is_some() || data_info.source_sn.is_some() { Some(zenoh::put::ext::SourceInfoType { zid: data_info.source_id.unwrap_or_default(), From a8cdbbe802b0c307961c5e731d27cb5cf835e0f2 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 12 Mar 2024 09:39:25 +0100 Subject: [PATCH 006/598] Revised Encoding API and wire format (#764) * Remove KnownEncoding enum and replace it with consts * Fix no_std * Encoding encoder (#746) * Encoding contains a mapping * Add forgotten file * Provide default encoder * Refine encoder * Fix encoding codec * Do not change the protocol representation * Accept Cow<'static, str> in EncodingMapping trait * Improve Value::Display * Fix doctests * Bump EncodingPrefix to u16. Add IANA encoding mapping. * Improve doc * Remove generic from Encoding::starts_with * Remove Display impl for Encoding * Improve doc * Improve doc * Improve encoding parsing * Improve comments * Improve doc * Encoding suffix bitflag * Encoder/Decoder traits take self * Rename encoding() to with_encoding() * Make Value, ZBuf, SingleOrVec empty() const * Derive Encoder for &mut u* and i* * Integers are encoded as le_bytes are not as string * Integers are encoded as le_bytes are not as string * Fix doctest * Refine default encoding mapping * IANA mapping starts from 1024 * Move IANA encoding to zneoh-ext * Improve docs * Improve DefaultEncoding * Add From for ZBuf * Remove Value and Sample Display trait impl * Encoder/Decoder operate on ZBuf * Payload type. Put takes Into. * Flat sample Value to Payload and Encoding fields * Add payload.rs * Polish up Publication * Add serde_cbor::Value as supported DefaultSerializer supported types * Add serde_pickle::Value as supported DefaultSerializer supported types * Add serde_yaml::Value as supported DefaultSerializer supported types * Impl TryFrom for Payload * Remove encoding folder * Polish up Value and Encoding * Fix doctest * Fix some erroneous prelude usage * Fix wrong typedef in publication * Encoding Id and Schema * Encoding Id and Schema * Fix encoding w_len * Wrapper type for Encoding * Add forgotten file * Expand Encoding consts and add doc * Polish doc * Polishing up Payload * Add EncodingMapping trait * Improve docs * Add deserialize in examples * Use deserialize in examples * Remove encoding from zenoh-ext * Add repr(transparent) to Payload * Improve encoding doc --- Cargo.lock | 140 ++- Cargo.toml | 7 +- commons/zenoh-buffers/src/lib.rs | 3 +- commons/zenoh-buffers/src/zbuf.rs | 9 +- commons/zenoh-buffers/src/zslice.rs | 31 +- commons/zenoh-codec/benches/codec.rs | 14 +- commons/zenoh-codec/src/core/encoding.rs | 44 +- commons/zenoh-codec/src/zenoh/put.rs | 6 +- .../zenoh-collections/src/single_or_vec.rs | 10 +- commons/zenoh-protocol/src/core/cowstr.rs | 2 +- commons/zenoh-protocol/src/core/encoding.rs | 292 +----- commons/zenoh-protocol/src/core/mod.rs | 4 +- commons/zenoh-protocol/src/zenoh/mod.rs | 4 +- examples/examples/z_get.rs | 25 +- examples/examples/z_get_liveliness.rs | 9 +- examples/examples/z_ping.rs | 2 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_pub_thr.rs | 2 +- examples/examples/z_pull.rs | 6 +- examples/examples/z_queryable.rs | 5 +- examples/examples/z_storage.rs | 4 +- examples/examples/z_sub.rs | 5 +- io/zenoh-transport/src/common/batch.rs | 4 +- io/zenoh-transport/src/common/pipeline.rs | 6 +- io/zenoh-transport/src/multicast/link.rs | 2 +- io/zenoh-transport/src/unicast/link.rs | 2 +- .../src/unicast/lowlatency/link.rs | 4 +- .../tests/multicast_compression.rs | 2 +- .../tests/multicast_transport.rs | 2 +- .../tests/unicast_compression.rs | 2 +- .../tests/unicast_concurrent.rs | 4 +- .../tests/unicast_defragmentation.rs | 2 +- .../tests/unicast_intermittent.rs | 2 +- .../tests/unicast_priorities.rs | 2 +- io/zenoh-transport/tests/unicast_shm.rs | 4 +- .../tests/unicast_simultaneous.rs | 2 +- io/zenoh-transport/tests/unicast_transport.rs | 2 +- plugins/zenoh-plugin-example/src/lib.rs | 3 +- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 6 +- plugins/zenoh-plugin-rest/src/lib.rs | 71 +- .../src/replica/align_queryable.rs | 15 +- .../src/replica/aligner.rs | 29 +- .../src/replica/mod.rs | 5 +- .../src/replica/storage.rs | 43 +- .../tests/operations.rs | 7 +- .../tests/wildcard.rs | 11 +- zenoh-ext/Cargo.toml | 3 + zenoh-ext/examples/z_query_sub.rs | 8 +- zenoh-ext/src/group.rs | 4 +- zenoh-ext/src/lib.rs | 13 + zenoh-ext/src/querying_subscriber.rs | 48 +- zenoh-ext/src/subscriber_ext.rs | 12 +- zenoh/Cargo.toml | 4 + zenoh/src/admin.rs | 26 +- zenoh/src/encoding.rs | 850 ++++++++++++++++++ zenoh/src/lib.rs | 25 +- zenoh/src/liveliness.rs | 13 +- zenoh/src/net/routing/dispatcher/queries.rs | 2 +- zenoh/src/net/runtime/adminspace.rs | 87 +- zenoh/src/net/tests/tables.rs | 10 +- zenoh/src/payload.rs | 673 ++++++++++++++ zenoh/src/prelude.rs | 20 +- zenoh/src/publication.rs | 110 ++- zenoh/src/queryable.rs | 19 +- zenoh/src/sample.rs | 115 +-- zenoh/src/session.rs | 53 +- zenoh/src/subscriber.rs | 18 +- zenoh/src/value.rs | 696 +------------- zenoh/tests/attachments.rs | 2 +- zenoh/tests/routing.rs | 8 +- zenoh/tests/session.rs | 6 +- zenoh/tests/unicity.rs | 6 +- 72 files changed, 2255 insertions(+), 1434 deletions(-) create mode 100644 zenoh/src/encoding.rs create mode 100644 zenoh/src/payload.rs diff --git a/Cargo.lock b/Cargo.lock index 1d5fab2365..53f2600071 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -446,7 +446,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -739,7 +739,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -1025,7 +1025,7 @@ checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -1337,7 +1337,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -1703,6 +1703,12 @@ dependencies = [ "nom", ] +[[package]] +name = "iter-read" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c397ca3ea05ad509c4ec451fea28b4771236a376ca1c69fd5143aae0cf8f93c4" + [[package]] name = "itertools" version = "0.10.5" @@ -2110,9 +2116,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", "libm", @@ -2267,7 +2273,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -2291,6 +2297,48 @@ dependencies = [ "indexmap", ] +[[package]] +name = "phf" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +dependencies = [ + "phf_macros", + "phf_shared", +] + +[[package]] +name = "phf_generator" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +dependencies = [ + "phf_shared", + "rand 0.8.5", +] + +[[package]] +name = "phf_macros" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +dependencies = [ + "phf_generator", + "phf_shared", + "proc-macro2", + "quote", + "syn 2.0.52", +] + +[[package]] +name = "phf_shared" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.1.3" @@ -2308,7 +2356,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -2451,9 +2499,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] @@ -2508,9 +2556,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -3005,22 +3053,45 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.188" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] +[[package]] +name = "serde-pickle" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c762ad136a26407c6a80825813600ceeab5e613660d93d79a41f0ec877171e71" +dependencies = [ + "byteorder", + "iter-read", + "num-bigint", + "num-traits", + "serde", +] + +[[package]] +name = "serde_cbor" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" +dependencies = [ + "half", + "serde", +] + [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -3045,9 +3116,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ "itoa", "ryu", @@ -3227,6 +3298,12 @@ dependencies = [ "event-listener 2.5.3", ] +[[package]] +name = "siphasher" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + [[package]] name = "slab" version = "0.4.9" @@ -3456,9 +3533,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.33" +version = "2.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9caece70c63bfba29ec2fed841a09851b14a235c60010fa4de58089b6c025668" +checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" dependencies = [ "proc-macro2", "quote", @@ -3491,7 +3568,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -3641,7 +3718,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -3690,7 +3767,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -3995,7 +4072,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", "wasm-bindgen-shared", ] @@ -4029,7 +4106,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4325,11 +4402,15 @@ dependencies = [ "ordered-float", "paste", "petgraph", + "phf", "rand 0.8.5", "regex", "rustc_version 0.4.0", "serde", + "serde-pickle", + "serde_cbor", "serde_json", + "serde_yaml", "socket2 0.5.4", "stop-token", "uhlc", @@ -4467,7 +4548,10 @@ dependencies = [ "flume", "futures", "log", + "phf", "serde", + "serde_cbor", + "serde_json", "zenoh", "zenoh-core", "zenoh-macros", @@ -4695,7 +4779,7 @@ version = "0.11.0-dev" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", "zenoh-keyexpr", ] @@ -4939,7 +5023,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d7210ebc0e..9830b56490 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -110,11 +110,12 @@ libloading = "0.8" log = "0.4.17" lz4_flex = "0.11" nix = { version = "0.27", features = ["fs"] } -num_cpus = "1.15.0" +num_cpus = "1.16.0" ordered-float = "4.1.1" panic-message = "0.3.0" paste = "1.0.12" petgraph = "0.6.3" +phf = { version = "0.11.2", features = ["macros"] } pnet = "0.34" pnet_datalink = "0.34" proc-macro2 = "1.0.51" @@ -136,7 +137,9 @@ secrecy = { version = "0.8.0", features = ["serde", "alloc"] } serde = { version = "1.0.154", default-features = false, features = [ "derive", ] } # Default features are disabled due to usage in no_std crates -serde_json = "1.0.94" +serde_cbor = "0.11.2" +serde_json = "1.0.114" +serde-pickle = "1.1.1" serde_yaml = "0.9.19" sha3 = "0.10.6" shared_memory = "0.12.4" diff --git a/commons/zenoh-buffers/src/lib.rs b/commons/zenoh-buffers/src/lib.rs index 4dee599ea7..eae7f1715c 100644 --- a/commons/zenoh-buffers/src/lib.rs +++ b/commons/zenoh-buffers/src/lib.rs @@ -101,7 +101,8 @@ pub mod buffer { let mut slices = self.slices(); match slices.len() { 0 => Cow::Borrowed(b""), - 1 => Cow::Borrowed(slices.next().unwrap()), + // SAFETY: it's safe to use unwrap_unchecked() beacuse we are explicitly checking the length is 1. + 1 => Cow::Borrowed(unsafe { slices.next().unwrap_unchecked() }), _ => Cow::Owned(slices.fold(Vec::new(), |mut acc, it| { acc.extend(it); acc diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index 1365397966..fd86f454af 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -34,8 +34,10 @@ pub struct ZBuf { impl ZBuf { #[must_use] - pub fn empty() -> Self { - Self::default() + pub const fn empty() -> Self { + Self { + slices: SingleOrVec::empty(), + } } pub fn clear(&mut self) { @@ -72,6 +74,7 @@ impl ZBuf { } self.insert(start, replacement); } + fn remove(&mut self, mut start: usize, mut end: usize) { assert!(start <= end); assert!(end <= self.len()); @@ -100,6 +103,7 @@ impl ZBuf { let drain_end = end_slice_idx + (end_slice.start >= end_slice.end) as usize; self.slices.drain(drain_start..drain_end); } + fn insert(&mut self, mut at: usize, slice: &[u8]) { if slice.is_empty() { return; @@ -206,6 +210,7 @@ where zbuf } } + // Reader #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct ZBufPos { diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index e53e6f3334..c15cbc6828 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -92,24 +92,41 @@ pub struct ZSlice { } impl ZSlice { + #[deprecated(note = "use `new` instead")] pub fn make( buf: Arc, start: usize, end: usize, + ) -> Result> { + Self::new(buf, start, end) + } + + pub fn new( + buf: Arc, + start: usize, + end: usize, ) -> Result> { if start <= end && end <= buf.as_slice().len() { - Ok(ZSlice { - buf, - start, - end, - #[cfg(feature = "shared-memory")] - kind: ZSliceKind::Raw, - }) + // unsafe: this operation is safe because we just checked the slice boundaries + Ok(unsafe { ZSlice::new_unchecked(buf, start, end) }) } else { Err(buf) } } + /// # Safety + /// This function does not verify wether the `start` and `end` indexes are within the buffer boundaries. + /// If a [`ZSlice`] is built via this constructor, a later access may panic if `start` and `end` indexes are out-of-bound. + pub unsafe fn new_unchecked(buf: Arc, start: usize, end: usize) -> Self { + ZSlice { + buf, + start, + end, + #[cfg(feature = "shared-memory")] + kind: ZSliceKind::Raw, + } + } + #[inline] #[must_use] pub fn downcast_ref(&self) -> Option<&T> diff --git a/commons/zenoh-codec/benches/codec.rs b/commons/zenoh-codec/benches/codec.rs index 34c9313a7f..d897038f91 100644 --- a/commons/zenoh-codec/benches/codec.rs +++ b/commons/zenoh-codec/benches/codec.rs @@ -87,7 +87,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -133,7 +133,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -174,7 +174,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -215,7 +215,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -243,7 +243,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -282,7 +282,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -305,7 +305,7 @@ fn criterion_benchmark(c: &mut Criterion) { let mut idx = 0; while idx < zslice.len() { let len = (zslice.len() - idx).min(chunk); - zbuf.push_zslice(ZSlice::make(buff.clone(), idx, idx + len).unwrap()); + zbuf.push_zslice(ZSlice::new(buff.clone(), idx, idx + len).unwrap()); idx += len; } diff --git a/commons/zenoh-codec/src/core/encoding.rs b/commons/zenoh-codec/src/core/encoding.rs index 478bcf1cd8..cfbe0084ba 100644 --- a/commons/zenoh-codec/src/core/encoding.rs +++ b/commons/zenoh-codec/src/core/encoding.rs @@ -12,16 +12,22 @@ // ZettaScale Zenoh Team, // use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; -use alloc::string::String; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; -use zenoh_protocol::core::Encoding; +use zenoh_protocol::{ + common::imsg, + core::encoding::{flag, Encoding, EncodingId}, +}; impl LCodec<&Encoding> for Zenoh080 { fn w_len(self, x: &Encoding) -> usize { - 1 + self.w_len(x.suffix()) + let mut len = self.w_len((x.id as u32) << 1); + if let Some(schema) = x.schema.as_ref() { + len += self.w_len(schema.as_slice()); + } + len } } @@ -32,9 +38,17 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Encoding) -> Self::Output { - let zodec = Zenoh080Bounded::::new(); - zodec.write(&mut *writer, *x.prefix() as u8)?; - zodec.write(&mut *writer, x.suffix())?; + let mut id = (x.id as u32) << 1; + + if x.schema.is_some() { + id |= flag::S; + } + let zodec = Zenoh080Bounded::::new(); + zodec.write(&mut *writer, id)?; + if let Some(schema) = x.schema.as_ref() { + let zodec = Zenoh080Bounded::::new(); + zodec.write(&mut *writer, schema)?; + } Ok(()) } } @@ -46,10 +60,20 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let zodec = Zenoh080Bounded::::new(); - let prefix: u8 = zodec.read(&mut *reader)?; - let suffix: String = zodec.read(&mut *reader)?; - let encoding = Encoding::new(prefix, suffix).map_err(|_| DidntRead)?; + let zodec = Zenoh080Bounded::::new(); + let id: u32 = zodec.read(&mut *reader)?; + let (id, has_suffix) = ( + (id >> 1) as EncodingId, + imsg::has_flag(id as u8, flag::S as u8), + ); + + let mut schema = None; + if has_suffix { + let zodec = Zenoh080Bounded::::new(); + schema = Some(zodec.read(&mut *reader)?); + } + + let encoding = Encoding { id, schema }; Ok(encoding) } } diff --git a/commons/zenoh-codec/src/zenoh/put.rs b/commons/zenoh-codec/src/zenoh/put.rs index 4f50be4872..776b47245f 100644 --- a/commons/zenoh-codec/src/zenoh/put.rs +++ b/commons/zenoh-codec/src/zenoh/put.rs @@ -54,7 +54,7 @@ where if timestamp.is_some() { header |= flag::T; } - if encoding != &Encoding::DEFAULT { + if encoding != &Encoding::empty() { header |= flag::E; } let mut n_exts = (ext_sinfo.is_some()) as u8 @@ -73,7 +73,7 @@ where if let Some(ts) = timestamp.as_ref() { self.write(&mut *writer, ts)?; } - if encoding != &Encoding::DEFAULT { + if encoding != &Encoding::empty() { self.write(&mut *writer, encoding)?; } @@ -143,7 +143,7 @@ where timestamp = Some(self.codec.read(&mut *reader)?); } - let mut encoding = Encoding::DEFAULT; + let mut encoding = Encoding::empty(); if imsg::has_flag(self.header, flag::E) { encoding = self.codec.read(&mut *reader)?; } diff --git a/commons/zenoh-collections/src/single_or_vec.rs b/commons/zenoh-collections/src/single_or_vec.rs index c68ac6d8ff..ceb43e4025 100644 --- a/commons/zenoh-collections/src/single_or_vec.rs +++ b/commons/zenoh-collections/src/single_or_vec.rs @@ -30,6 +30,10 @@ enum SingleOrVecInner { } impl SingleOrVecInner { + const fn empty() -> Self { + SingleOrVecInner::Vec(Vec::new()) + } + fn push(&mut self, value: T) { match self { SingleOrVecInner::Vec(vec) if vec.capacity() == 0 => *self = Self::Single(value), @@ -53,7 +57,7 @@ where impl Default for SingleOrVecInner { fn default() -> Self { - SingleOrVecInner::Vec(Vec::new()) + Self::empty() } } @@ -88,6 +92,10 @@ where pub struct SingleOrVec(SingleOrVecInner); impl SingleOrVec { + pub const fn empty() -> Self { + Self(SingleOrVecInner::empty()) + } + pub fn push(&mut self, value: T) { self.0.push(value); } diff --git a/commons/zenoh-protocol/src/core/cowstr.rs b/commons/zenoh-protocol/src/core/cowstr.rs index 33dac4524f..209d020f40 100644 --- a/commons/zenoh-protocol/src/core/cowstr.rs +++ b/commons/zenoh-protocol/src/core/cowstr.rs @@ -21,7 +21,7 @@ enum CowStrInner<'a> { } pub struct CowStr<'a>(CowStrInner<'a>); impl<'a> CowStr<'a> { - pub(crate) fn borrowed(s: &'a str) -> Self { + pub(crate) const fn borrowed(s: &'a str) -> Self { Self(CowStrInner::Borrowed(s)) } pub fn as_str(&self) -> &str { diff --git a/commons/zenoh-protocol/src/core/encoding.rs b/commons/zenoh-protocol/src/core/encoding.rs index b3abae8aae..9b9aa5bf2f 100644 --- a/commons/zenoh-protocol/src/core/encoding.rs +++ b/commons/zenoh-protocol/src/core/encoding.rs @@ -11,282 +11,68 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::core::CowStr; -use alloc::{borrow::Cow, string::String}; -use core::{ - convert::TryFrom, - fmt::{self, Debug}, - mem, -}; -use zenoh_result::{bail, zerror, ZError, ZResult}; - -mod consts { - pub(super) const MIMES: [&str; 21] = [ - /* 0 */ "", - /* 1 */ "application/octet-stream", - /* 2 */ "application/custom", // non iana standard - /* 3 */ "text/plain", - /* 4 */ "application/properties", // non iana standard - /* 5 */ "application/json", // if not readable from casual users - /* 6 */ "application/sql", - /* 7 */ "application/integer", // non iana standard - /* 8 */ "application/float", // non iana standard - /* 9 */ - "application/xml", // if not readable from casual users (RFC 3023, sec 3) - /* 10 */ "application/xhtml+xml", - /* 11 */ "application/x-www-form-urlencoded", - /* 12 */ "text/json", // non iana standard - if readable from casual users - /* 13 */ "text/html", - /* 14 */ "text/xml", // if readable from casual users (RFC 3023, section 3) - /* 15 */ "text/css", - /* 16 */ "text/csv", - /* 17 */ "text/javascript", - /* 18 */ "image/jpeg", - /* 19 */ "image/png", - /* 20 */ "image/gif", - ]; -} - -#[repr(u8)] -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum KnownEncoding { - Empty = 0, - AppOctetStream = 1, - AppCustom = 2, - TextPlain = 3, - AppProperties = 4, - AppJson = 5, - AppSql = 6, - AppInteger = 7, - AppFloat = 8, - AppXml = 9, - AppXhtmlXml = 10, - AppXWwwFormUrlencoded = 11, - TextJson = 12, - TextHtml = 13, - TextXml = 14, - TextCss = 15, - TextCsv = 16, - TextJavascript = 17, - ImageJpeg = 18, - ImagePng = 19, - ImageGif = 20, -} - -impl From for u8 { - fn from(val: KnownEncoding) -> Self { - val as u8 - } -} - -impl From for &str { - fn from(val: KnownEncoding) -> Self { - consts::MIMES[u8::from(val) as usize] - } -} - -impl TryFrom for KnownEncoding { - type Error = ZError; - fn try_from(value: u8) -> Result { - if value < consts::MIMES.len() as u8 + 1 { - Ok(unsafe { mem::transmute(value) }) - } else { - Err(zerror!("Unknown encoding")) - } - } -} - -impl AsRef for KnownEncoding { - fn as_ref(&self) -> &str { - consts::MIMES[u8::from(*self) as usize] - } -} - -/// The encoding of a zenoh `zenoh::Value`. -/// -/// A zenoh encoding is a HTTP Mime type represented, for wire efficiency, -/// as an integer prefix (that maps to a string) and a string suffix. +use core::fmt::Debug; +use zenoh_buffers::ZSlice; + +pub type EncodingId = u16; + +/// [`Encoding`] is a metadata that indicates how the data payload should be interpreted. +/// For wire-efficiency and extensibility purposes, Zenoh defines an [`Encoding`] as +/// composed of an unsigned integer prefix and a string suffix. The actual meaning of the +/// prefix and suffix are out-of-scope of the protocol definition. Therefore, Zenoh does not +/// impose any encoding mapping and users are free to use any mapping they like. +/// Nevertheless, it is worth highlighting that Zenoh still provides a default mapping as part +/// of the API as per user convenience. That mapping has no impact on the Zenoh protocol definition. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum Encoding { - Exact(KnownEncoding), - WithSuffix(KnownEncoding, CowStr<'static>), +pub struct Encoding { + pub id: EncodingId, + pub schema: Option, } -impl Encoding { - pub fn new(prefix: u8, suffix: IntoCowStr) -> ZResult - where - IntoCowStr: Into> + AsRef, - { - let prefix = KnownEncoding::try_from(prefix)?; - let suffix = suffix.into(); - if suffix.as_bytes().len() > u8::MAX as usize { - bail!("Suffix length is limited to 255 characters") - } - if suffix.as_ref().is_empty() { - Ok(Encoding::Exact(prefix)) - } else { - Ok(Encoding::WithSuffix(prefix, suffix.into())) - } - } - - /// Sets the suffix of this encoding. - pub fn with_suffix(self, suffix: IntoCowStr) -> ZResult - where - IntoCowStr: Into> + AsRef, - { - match self { - Encoding::Exact(e) => Encoding::new(e as u8, suffix), - Encoding::WithSuffix(e, s) => Encoding::new(e as u8, s + suffix.as_ref()), - } - } - - pub fn as_ref<'a, T>(&'a self) -> T - where - &'a Self: Into, - { - self.into() - } - - /// Returns `true`if the string representation of this encoding starts with - /// the string representation of ther given encoding. - pub fn starts_with(&self, with: T) -> bool - where - T: Into, - { - let with: Encoding = with.into(); - self.prefix() == with.prefix() && self.suffix().starts_with(with.suffix()) - } - - pub const fn prefix(&self) -> &KnownEncoding { - match self { - Encoding::Exact(e) | Encoding::WithSuffix(e, _) => e, - } - } - - pub fn suffix(&self) -> &str { - match self { - Encoding::Exact(_) => "", - Encoding::WithSuffix(_, s) => s.as_ref(), - } - } +/// # Encoding field +/// +/// ```text +/// 7 6 5 4 3 2 1 0 +/// +-+-+-+-+-+-+-+-+ +/// ~ id: z16 |S~ +/// +---------------+ +/// ~schema: ~ -- if S==1 +/// +---------------+ +/// ``` +pub mod flag { + pub const S: u32 = 1; // 0x01 Suffix if S==1 then suffix is present } impl Encoding { - pub const EMPTY: Encoding = Encoding::Exact(KnownEncoding::Empty); - pub const APP_OCTET_STREAM: Encoding = Encoding::Exact(KnownEncoding::AppOctetStream); - pub const APP_CUSTOM: Encoding = Encoding::Exact(KnownEncoding::AppCustom); - pub const TEXT_PLAIN: Encoding = Encoding::Exact(KnownEncoding::TextPlain); - pub const APP_PROPERTIES: Encoding = Encoding::Exact(KnownEncoding::AppProperties); - pub const APP_JSON: Encoding = Encoding::Exact(KnownEncoding::AppJson); - pub const APP_SQL: Encoding = Encoding::Exact(KnownEncoding::AppSql); - pub const APP_INTEGER: Encoding = Encoding::Exact(KnownEncoding::AppInteger); - pub const APP_FLOAT: Encoding = Encoding::Exact(KnownEncoding::AppFloat); - pub const APP_XML: Encoding = Encoding::Exact(KnownEncoding::AppXml); - pub const APP_XHTML_XML: Encoding = Encoding::Exact(KnownEncoding::AppXhtmlXml); - pub const APP_XWWW_FORM_URLENCODED: Encoding = - Encoding::Exact(KnownEncoding::AppXWwwFormUrlencoded); - pub const TEXT_JSON: Encoding = Encoding::Exact(KnownEncoding::TextJson); - pub const TEXT_HTML: Encoding = Encoding::Exact(KnownEncoding::TextHtml); - pub const TEXT_XML: Encoding = Encoding::Exact(KnownEncoding::TextXml); - pub const TEXT_CSS: Encoding = Encoding::Exact(KnownEncoding::TextCss); - pub const TEXT_CSV: Encoding = Encoding::Exact(KnownEncoding::TextCsv); - pub const TEXT_JAVASCRIPT: Encoding = Encoding::Exact(KnownEncoding::TextJavascript); - pub const IMAGE_JPEG: Encoding = Encoding::Exact(KnownEncoding::ImageJpeg); - pub const IMAGE_PNG: Encoding = Encoding::Exact(KnownEncoding::ImagePng); - pub const IMAGE_GIF: Encoding = Encoding::Exact(KnownEncoding::ImageGif); -} - -impl fmt::Display for Encoding { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Encoding::Exact(e) => f.write_str(e.as_ref()), - Encoding::WithSuffix(e, s) => { - f.write_str(e.as_ref())?; - f.write_str(s) - } - } - } -} - -impl From<&'static str> for Encoding { - fn from(s: &'static str) -> Self { - for (i, v) in consts::MIMES.iter().enumerate().skip(1) { - if let Some(suffix) = s.strip_prefix(v) { - if suffix.is_empty() { - return Encoding::Exact(unsafe { mem::transmute(i as u8) }); - } else { - return Encoding::WithSuffix(unsafe { mem::transmute(i as u8) }, suffix.into()); - } - } - } - if s.is_empty() { - Encoding::Exact(KnownEncoding::Empty) - } else { - Encoding::WithSuffix(KnownEncoding::Empty, s.into()) - } - } -} - -impl From for Encoding { - fn from(mut s: String) -> Self { - for (i, v) in consts::MIMES.iter().enumerate().skip(1) { - if s.starts_with(v) { - s.replace_range(..v.len(), ""); - if s.is_empty() { - return Encoding::Exact(unsafe { mem::transmute(i as u8) }); - } else { - return Encoding::WithSuffix(unsafe { mem::transmute(i as u8) }, s.into()); - } - } - } - if s.is_empty() { - Encoding::Exact(KnownEncoding::Empty) - } else { - Encoding::WithSuffix(KnownEncoding::Empty, s.into()) + /// Returns a new [`Encoding`] object with default empty prefix ID. + pub const fn empty() -> Self { + Self { + id: 0, + schema: None, } } } -impl From<&KnownEncoding> for Encoding { - fn from(e: &KnownEncoding) -> Encoding { - Encoding::Exact(*e) - } -} - -impl From for Encoding { - fn from(e: KnownEncoding) -> Encoding { - Encoding::Exact(e) - } -} - impl Default for Encoding { fn default() -> Self { - KnownEncoding::Empty.into() + Self::empty() } } impl Encoding { - pub const DEFAULT: Self = Self::EMPTY; - #[cfg(feature = "test")] pub fn rand() -> Self { - use rand::{ - distributions::{Alphanumeric, DistString}, - Rng, - }; + use rand::Rng; const MIN: usize = 2; const MAX: usize = 16; let mut rng = rand::thread_rng(); - let prefix: u8 = rng.gen_range(0..20); - let suffix: String = if rng.gen_bool(0.5) { - let len = rng.gen_range(MIN..MAX); - Alphanumeric.sample_string(&mut rng, len) - } else { - String::new() - }; - Encoding::new(prefix, suffix).unwrap() + let id: EncodingId = rng.gen(); + let schema = rng + .gen_bool(0.5) + .then_some(ZSlice::rand(rng.gen_range(MIN..MAX))); + Encoding { id, schema } } } diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 3e9315bec2..82658db2fd 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -41,8 +41,8 @@ pub use wire_expr::*; mod cowstr; pub use cowstr::CowStr; -mod encoding; -pub use encoding::{Encoding, KnownEncoding}; +pub mod encoding; +pub use encoding::{Encoding, EncodingId}; pub mod locator; pub use locator::*; diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index d73d8cdd06..4c8458885b 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -209,12 +209,14 @@ pub mod ext { } } + /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ encoding ~ /// +---------------+ - /// ~ pl: [u8;z32] ~ -- Payload + /// ~ pl: ~ -- Payload /// +---------------+ + /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct ValueType { #[cfg(feature = "shared-memory")] diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 57c36c2e62..0fff95c250 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use std::convert::TryFrom; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; @@ -40,12 +39,24 @@ async fn main() { .unwrap(); while let Ok(reply) = replies.recv_async().await { match reply.sample { - Ok(sample) => println!( - ">> Received ('{}': '{}')", - sample.key_expr.as_str(), - sample.value, - ), - Err(err) => println!(">> Received (ERROR: '{}')", String::try_from(&err).unwrap()), + Ok(sample) => { + let payload = sample + .payload + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!( + ">> Received ('{}': '{}')", + sample.key_expr.as_str(), + payload, + ); + } + Err(err) => { + let payload = err + .payload + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!(">> Received (ERROR: '{}')", payload); + } } } } diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index e0aaf8cd23..036dc0ab98 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use std::convert::TryFrom; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; @@ -39,7 +38,13 @@ async fn main() { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => println!(">> Alive token ('{}')", sample.key_expr.as_str(),), - Err(err) => println!(">> Received (ERROR: '{}')", String::try_from(&err).unwrap()), + Err(err) => { + let payload = err + .payload + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!(">> Received (ERROR: '{}')", payload); + } } } } diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index fe5ed4d46b..cb6fecd81a 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -38,7 +38,7 @@ fn main() { .res() .unwrap(); - let data: Value = (0usize..size) + let data: Payload = (0usize..size) .map(|i| (i % 10) as u8) .collect::>() .into(); diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index f057075434..1f06c7abb9 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -41,7 +41,7 @@ fn main() { let _sub = session .declare_subscriber(key_expr_ping) - .callback(move |sample| publisher.put(sample.value).res().unwrap()) + .callback(move |sample| publisher.put(sample.payload).res().unwrap()) .res() .unwrap(); for _ in stdin().bytes().take_while(|b| !matches!(b, Ok(b'q'))) {} diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index b698cbc80b..7a3e90f627 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -30,7 +30,7 @@ fn main() { let payload_size = args.payload_size; - let data: Value = (0..payload_size) + let data: Payload = (0..payload_size) .map(|i| (i % 10) as u8) .collect::>() .into(); diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 812c47294e..ed2a90f1a6 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -44,11 +44,15 @@ async fn main() { // Define the future to handle incoming samples of the subscription. let subs = async { while let Ok(sample) = subscriber.recv_async().await { + let payload = sample + .payload + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); println!( ">> [Subscriber] Received {} ('{}': '{}')", sample.kind, sample.key_expr.as_str(), - sample.value, + payload, ); } }; diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 54b9858cf0..d7376835b7 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -49,7 +49,10 @@ async fn main() { let query = query.unwrap(); match query.value() { None => println!(">> [Queryable ] Received Query '{}'", query.selector()), - Some(value) => println!(">> [Queryable ] Received Query '{}' with value '{}'", query.selector(), value), + Some(value) => { + let payload = value.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); + println!(">> [Queryable ] Received Query '{}' with value '{}'", query.selector(), payload); + }, } let reply = if send_errors.swap(false, Relaxed) { println!( diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 79164c914a..5e0eaabd44 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -53,8 +53,8 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - println!(">> [Subscriber] Received {} ('{}': '{}')", - sample.kind, sample.key_expr.as_str(), sample.value); + let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); + println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind, sample.key_expr.as_str(),payload); if sample.kind == SampleKind::Delete { stored.remove(&sample.key_expr.to_string()); } else { diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index 0542f85870..195e2f7640 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -46,10 +46,9 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - println!(">> [Subscriber] Received {} ('{}': '{}')", - sample.kind, sample.key_expr.as_str(), sample.value); + let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); + println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind, sample.key_expr.as_str(), payload); }, - _ = stdin.read_exact(&mut input).fuse() => { match input[0] { b'q' => break, diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index a6aad76f7b..e923a7e1af 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -460,7 +460,7 @@ impl RBatch { let mut into = (buff)(); let n = lz4_flex::block::decompress_into(payload, into.as_mut_slice()) .map_err(|_| zerror!("Decompression error"))?; - let zslice = ZSlice::make(Arc::new(into), 0, n) + let zslice = ZSlice::new(Arc::new(into), 0, n) .map_err(|_| zerror!("Invalid decompression buffer length"))?; Ok(zslice) } @@ -579,7 +579,7 @@ mod tests { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index eebf23abc9..3968eabdf5 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -754,7 +754,7 @@ mod tests { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -884,7 +884,7 @@ mod tests { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -996,7 +996,7 @@ mod tests { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index b24c077c57..0172902935 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -216,7 +216,7 @@ impl TransportLinkMulticastRx { let mut into = (buff)(); let (n, locator) = self.inner.link.read(into.as_mut_slice()).await?; - let buffer = ZSlice::make(Arc::new(into), 0, n).map_err(|_| zerror!("Error"))?; + let buffer = ZSlice::new(Arc::new(into), 0, n).map_err(|_| zerror!("Error"))?; let mut batch = RBatch::new(self.inner.config.batch, buffer); batch.initialize(buff).map_err(|_| zerror!("{ERR}{self}"))?; Ok((batch, locator.into_owned())) diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs index bd756d6396..daa6c3e5a5 100644 --- a/io/zenoh-transport/src/unicast/link.rs +++ b/io/zenoh-transport/src/unicast/link.rs @@ -232,7 +232,7 @@ impl TransportLinkUnicastRx { // log::trace!("RBytes: {:02x?}", &into.as_slice()[0..end]); - let buffer = ZSlice::make(Arc::new(into), 0, end) + let buffer = ZSlice::new(Arc::new(into), 0, end) .map_err(|_| zerror!("{ERR}{self}. ZSlice index(es) out of bounds"))?; let mut batch = RBatch::new(self.batch, buffer); batch diff --git a/io/zenoh-transport/src/unicast/lowlatency/link.rs b/io/zenoh-transport/src/unicast/lowlatency/link.rs index 6a382f5960..3c290ac89e 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/link.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/link.rs @@ -239,7 +239,7 @@ async fn rx_task_stream( transport.stats.inc_rx_bytes(2 + bytes); // Account for the batch len encoding (16 bits) // Deserialize all the messages from the current ZBuf - let zslice = ZSlice::make(Arc::new(buffer), 0, bytes).unwrap(); + let zslice = ZSlice::new(Arc::new(buffer), 0, bytes).unwrap(); transport.read_messages(zslice, &link.link).await?; } } @@ -274,7 +274,7 @@ async fn rx_task_dgram( transport.stats.inc_rx_bytes(bytes); // Deserialize all the messages from the current ZBuf - let zslice = ZSlice::make(Arc::new(buffer), 0, bytes).unwrap(); + let zslice = ZSlice::new(Arc::new(buffer), 0, bytes).unwrap(); transport.read_messages(zslice, &link.link).await?; } } diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs index 4d1196e10f..5301b967f6 100644 --- a/io/zenoh-transport/tests/multicast_compression.rs +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -273,7 +273,7 @@ mod tests { payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index fe5a44b7ee..69c1decd83 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -269,7 +269,7 @@ mod tests { payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index dd4f55b5f5..a9c10e1a9e 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -301,7 +301,7 @@ mod tests { payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index 4e90432193..b14cebaaf9 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -200,7 +200,7 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec, endpoint02: Vec, client_transport: TransportUn payload: Put { payload: vec![0u8; *ms].into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index d12a9db7dc..5ec7e31aba 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -277,7 +277,7 @@ mod tests { payload: Put { payload: sbuf.into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, ext_shm: None, ext_attachment: None, @@ -325,7 +325,7 @@ mod tests { payload: Put { payload: sbuf.into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, ext_shm: None, ext_attachment: None, diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index db73e99480..d465497556 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -82,7 +82,7 @@ mod tests { payload: Put { payload: vec![0u8; MSG_SIZE].into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index 795ea90b41..2a830a9e2b 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -472,7 +472,7 @@ async fn test_transport( payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index c2f083827d..592a08ca9b 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -164,7 +164,8 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { // on sample received by the Subscriber sample = sub.recv_async() => { let sample = sample.unwrap(); - info!("Received data ('{}': '{}')", sample.key_expr, sample.value); + let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); + info!("Received data ('{}': '{}')", sample.key_expr, payload); stored.insert(sample.key_expr.to_string(), sample); }, // on query received by the Queryable diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 0c6eb4357b..c5bdcc4c73 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -75,11 +75,7 @@ async fn main() { println!("Data updates are accessible through HTML5 SSE at http://:8000/{key}"); loop { - publisher - .put(Value::from(value).encoding(KnownEncoding::TextPlain.into())) - .res() - .await - .unwrap(); + publisher.put(value).res().await.unwrap(); async_std::task::sleep(Duration::from_secs(1)).await; } } diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 6f4e80f4eb..1a99d7b5a4 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -21,6 +21,7 @@ use async_std::prelude::FutureExt; use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; use futures::StreamExt; use http_types::Method; +use std::borrow::Cow; use std::convert::TryFrom; use std::str::FromStr; use std::sync::Arc; @@ -29,7 +30,6 @@ use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; -use zenoh::properties::Properties; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; use zenoh::selector::TIME_RANGE_KEY; @@ -46,38 +46,18 @@ lazy_static::lazy_static! { } const RAW_KEY: &str = "_raw"; -fn value_to_json(value: Value) -> String { - // @TODO: transcode to JSON when implemented in Value - match &value.encoding { - p if p.starts_with(KnownEncoding::TextPlain) - || p.starts_with(KnownEncoding::AppXWwwFormUrlencoded) => - { - // convert to Json string for special characters escaping - serde_json::json!(value.to_string()).to_string() - } - p if p.starts_with(KnownEncoding::AppProperties) => { - // convert to Json string for special characters escaping - serde_json::json!(*Properties::from(value.to_string())).to_string() - } - p if p.starts_with(KnownEncoding::AppJson) - || p.starts_with(KnownEncoding::AppInteger) - || p.starts_with(KnownEncoding::AppFloat) => - { - value.to_string() - } - _ => { - format!(r#""{}""#, b64_std_engine.encode(value.payload.contiguous())) - } - } +fn payload_to_json(payload: Payload) -> String { + payload + .deserialize::() + .unwrap_or_else(|_| format!(r#""{}""#, b64_std_engine.encode(payload.contiguous()))) } fn sample_to_json(sample: Sample) -> String { - let encoding = sample.value.encoding.to_string(); format!( r#"{{ "key": "{}", "value": {}, "encoding": "{}", "time": "{}" }}"#, sample.key_expr.as_str(), - value_to_json(sample.value), - encoding, + payload_to_json(sample.payload), + sample.encoding, if let Some(ts) = sample.timestamp { ts.to_string() } else { @@ -90,11 +70,10 @@ fn result_to_json(sample: Result) -> String { match sample { Ok(sample) => sample_to_json(sample), Err(err) => { - let encoding = err.encoding.to_string(); format!( r#"{{ "key": "ERROR", "value": {}, "encoding": "{}"}}"#, - value_to_json(err), - encoding, + payload_to_json(err.payload), + err.encoding, ) } } @@ -157,12 +136,12 @@ async fn to_raw_response(results: flume::Receiver) -> Response { Ok(reply) => match reply.sample { Ok(sample) => response( StatusCode::Ok, - sample.value.encoding.to_string().as_ref(), + Cow::from(&sample.encoding).as_ref(), String::from_utf8_lossy(&sample.payload.contiguous()).as_ref(), ), Err(value) => response( StatusCode::Ok, - value.encoding.to_string().as_ref(), + Cow::from(&value.encoding).as_ref(), String::from_utf8_lossy(&value.payload.contiguous()).as_ref(), ), }, @@ -404,9 +383,9 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result { @@ -441,21 +420,25 @@ async fn write(mut req: Request<(Arc, String)>) -> tide::Result { + session + .put(&key_expr, bytes) + .with_encoding(encoding) + .res() + .await + } + SampleKind::Delete => session.delete(&key_expr).res().await, + }; + match res { Ok(_) => Ok(Response::new(StatusCode::Ok)), Err(e) => Ok(response( StatusCode::InternalServerError, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 7295367a06..359b8dd7e8 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -18,6 +18,7 @@ use std::cmp::Ordering; use std::collections::{BTreeSet, HashMap, HashSet}; use std::str; use std::str::FromStr; +use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; use zenoh::Session; @@ -115,7 +116,12 @@ impl AlignQueryable { query.reply(Ok(sample)).res().await.unwrap(); } AlignData::Data(k, (v, ts)) => { - let sample = Sample::new(k, v).with_timestamp(ts); + let Value { + payload, encoding, .. + } = v; + let sample = Sample::new(k, payload) + .with_encoding(encoding) + .with_timestamp(ts); query.reply(Ok(sample)).res().await.unwrap(); } } @@ -165,7 +171,10 @@ impl AlignQueryable { let entry = entry.unwrap(); result.push(AlignData::Data( OwnedKeyExpr::from(entry.key_expr), - (entry.value, each.timestamp), + ( + Value::new(entry.payload).with_encoding(entry.encoding), + each.timestamp, + ), )); } } @@ -221,7 +230,7 @@ impl AlignQueryable { log::trace!( "[ALIGN QUERYABLE] Received ('{}': '{}')", sample.key_expr.as_str(), - sample.value + StringOrBase64::from(sample.payload.clone()) ); if let Some(timestamp) = sample.timestamp { match timestamp.cmp(&logentry.timestamp) { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 041567ae27..03c6fa949a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -19,6 +19,7 @@ use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; +use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; use zenoh::Session; @@ -104,7 +105,12 @@ impl Aligner { log::trace!("[ALIGNER] Received queried samples: {missing_data:?}"); for (key, (ts, value)) in missing_data { - let sample = Sample::new(key, value).with_timestamp(ts); + let Value { + payload, encoding, .. + } = value; + let sample = Sample::new(key, payload) + .with_encoding(encoding) + .with_timestamp(ts); log::debug!("[ALIGNER] Adding {:?} to storage", sample); self.tx_sample.send_async(sample).await.unwrap_or_else(|e| { log::error!("[ALIGNER] Error adding sample to storage: {}", e) @@ -136,7 +142,10 @@ impl Aligner { for sample in replies { result.insert( sample.key_expr.into(), - (sample.timestamp.unwrap(), sample.value), + ( + sample.timestamp.unwrap(), + Value::new(sample.payload).with_encoding(sample.encoding), + ), ); } (result, no_err) @@ -202,9 +211,9 @@ impl Aligner { let properties = format!("timestamp={}&{}=cold", other.timestamp, ERA); let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; let mut other_intervals: HashMap = HashMap::new(); - // expecting sample.value to be a vec of intervals with their checksum + // expecting sample.payload to be a vec of intervals with their checksum for each in reply_content { - match serde_json::from_str(&each.value.to_string()) { + match serde_json::from_str(&StringOrBase64::from(each.payload)) { Ok((i, c)) => { other_intervals.insert(i, c); } @@ -246,11 +255,11 @@ impl Aligner { INTERVALS, diff_string.join(",") ); - // expecting sample.value to be a vec of subintervals with their checksum + // expecting sample.payload to be a vec of subintervals with their checksum let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; let mut other_subintervals: HashMap = HashMap::new(); for each in reply_content { - match serde_json::from_str(&each.value.to_string()) { + match serde_json::from_str(&StringOrBase64::from(each.payload)) { Ok((i, c)) => { other_subintervals.insert(i, c); } @@ -287,11 +296,11 @@ impl Aligner { SUBINTERVALS, diff_string.join(",") ); - // expecting sample.value to be a vec of log entries with their checksum + // expecting sample.payload to be a vec of log entries with their checksum let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; let mut other_content: HashMap> = HashMap::new(); for each in reply_content { - match serde_json::from_str(&each.value.to_string()) { + match serde_json::from_str(&StringOrBase64::from(each.payload)) { Ok((i, c)) => { other_content.insert(i, c); } @@ -332,13 +341,13 @@ impl Aligner { log::trace!( "[ALIGNER] Received ('{}': '{}')", sample.key_expr.as_str(), - sample.value + StringOrBase64::from(sample.payload.clone()) ); return_val.push(sample); } Err(err) => { log::error!( - "[ALIGNER] Received error for query on selector {} :{}", + "[ALIGNER] Received error for query on selector {} :{:?}", selector, err ); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index b743a70451..78254213f7 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -26,6 +26,7 @@ use std::str; use std::str::FromStr; use std::time::{Duration, SystemTime}; use urlencoding::encode; +use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; use zenoh::Session; @@ -226,9 +227,9 @@ impl Replica { from, sample.kind, sample.key_expr.as_str(), - sample.value + StringOrBase64::from(sample.payload.clone()) ); - let digest: Digest = match serde_json::from_str(&format!("{}", sample.value)) { + let digest: Digest = match serde_json::from_str(&StringOrBase64::from(sample.payload)) { Ok(digest) => digest, Err(e) => { log::error!("[DIGEST_SUB] Error in decoding the digest: {}", e); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 115ed1e8d9..1ef7e65390 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -180,7 +180,7 @@ impl StorageService { // log error if the sample is not timestamped // This is to reduce down the line inconsistencies of having duplicate samples stored if sample.get_timestamp().is_none() { - log::error!("Sample {} is not timestamped. Please timestamp samples meant for replicated storage.", sample); + log::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { self.process_sample(sample).await; @@ -262,7 +262,7 @@ impl StorageService { // The storage should only simply save the key, sample pair while put and retrieve the same during get // the trimming during PUT and GET should be handled by the plugin async fn process_sample(&self, sample: Sample) { - log::trace!("[STORAGE] Processing sample: {}", sample); + log::trace!("[STORAGE] Processing sample: {:?}", sample); // Call incoming data interceptor (if any) let sample = if let Some(ref interceptor) = self.in_interceptor { interceptor(sample) @@ -295,7 +295,7 @@ impl StorageService { && self.is_latest(&k, sample.get_timestamp().unwrap()).await)) { log::trace!( - "Sample `{}` identified as neded processing for key {}", + "Sample `{:?}` identified as neded processing for key {}", sample, k ); @@ -306,15 +306,19 @@ impl StorageService { .await { Some(overriding_update) => { - let mut sample_to_store = - Sample::new(KeyExpr::from(k.clone()), overriding_update.data.value) - .with_timestamp(overriding_update.data.timestamp); + let Value { + payload, encoding, .. + } = overriding_update.data.value; + let mut sample_to_store = Sample::new(KeyExpr::from(k.clone()), payload) + .with_encoding(encoding) + .with_timestamp(overriding_update.data.timestamp); sample_to_store.kind = overriding_update.kind; sample_to_store } None => { let mut sample_to_store = - Sample::new(KeyExpr::from(k.clone()), sample.value.clone()) + Sample::new(KeyExpr::from(k.clone()), sample.payload.clone()) + .with_encoding(sample.encoding.clone()) .with_timestamp(sample.timestamp.unwrap()); sample_to_store.kind = sample.kind; sample_to_store @@ -333,7 +337,8 @@ impl StorageService { storage .put( stripped_key, - sample_to_store.value.clone(), + Value::new(sample_to_store.payload.clone()) + .with_encoding(sample_to_store.encoding.clone()), sample_to_store.timestamp.unwrap(), ) .await @@ -397,7 +402,7 @@ impl StorageService { Update { kind: sample.kind, data: StoredData { - value: sample.value, + value: Value::new(sample.payload).with_encoding(sample.encoding), timestamp: sample.timestamp.unwrap(), }, }, @@ -515,7 +520,11 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let sample = Sample::new(key.clone(), entry.value) + let Value { + payload, encoding, .. + } = entry.value; + let sample = Sample::new(key.clone(), payload) + .with_encoding(encoding) .with_timestamp(entry.timestamp); // apply outgoing interceptor on results let sample = if let Some(ref interceptor) = self.out_interceptor { @@ -549,7 +558,11 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let sample = Sample::new(q.key_expr().clone(), entry.value) + let Value { + payload, encoding, .. + } = entry.value; + let sample = Sample::new(q.key_expr().clone(), payload) + .with_encoding(encoding) .with_timestamp(entry.timestamp); // apply outgoing interceptor on results let sample = if let Some(ref interceptor) = self.out_interceptor { @@ -667,7 +680,7 @@ impl StorageService { self.process_sample(sample).await; } Err(e) => log::warn!( - "Storage '{}' received an error to align query: {}", + "Storage '{}' received an error to align query: {:?}", self.name, e ), @@ -688,15 +701,15 @@ fn serialize_update(update: &Update) -> String { } fn construct_update(data: String) -> Update { - let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); + let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); // @TODO: remove the unwrap() let mut payload = ZBuf::default(); for slice in result.3 { payload.push_zslice(slice.to_vec().into()); } - let value = Value::new(payload).encoding(Encoding::from(result.2)); + let value = Value::new(payload).with_encoding(result.2); let data = StoredData { value, - timestamp: Timestamp::from_str(&result.1).unwrap(), + timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() }; let kind = if result.0.eq(&(SampleKind::Put).to_string()) { SampleKind::Put diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index a4293f31f1..81029e2fa7 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -20,6 +20,7 @@ use std::str::FromStr; use std::thread::sleep; use async_std::task; +use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::{prelude::Config, time::Timestamp}; @@ -100,7 +101,7 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/a").await; assert_eq!(data.len(), 1); - assert_eq!(format!("{}", data[0].value), "1"); + assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "1"); put_data( &session, @@ -116,7 +117,7 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/b").await; assert_eq!(data.len(), 1); - assert_eq!(format!("{}", data[0].value), "2"); + assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "2"); delete_data( &session, @@ -135,7 +136,7 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/b").await; assert_eq!(data.len(), 1); - assert_eq!(format!("{}", data[0].value), "2"); + assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "2"); assert_eq!(data[0].key_expr.as_str(), "operation/test/b"); drop(storage); diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 60970b2247..4808ec246f 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -21,6 +21,7 @@ use std::thread::sleep; // use std::collections::HashMap; use async_std::task; +use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::{prelude::Config, time::Timestamp}; @@ -117,7 +118,7 @@ async fn test_wild_card_in_order() { let data = get_data(&session, "wild/test/*").await; assert_eq!(data.len(), 1); assert_eq!(data[0].key_expr.as_str(), "wild/test/a"); - assert_eq!(format!("{}", data[0].value), "2"); + assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "2"); put_data( &session, @@ -135,8 +136,8 @@ async fn test_wild_card_in_order() { assert_eq!(data.len(), 2); assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr.as_str())); assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr.as_str())); - assert!(["2", "3"].contains(&format!("{}", data[0].value).as_str())); - assert!(["2", "3"].contains(&format!("{}", data[1].value).as_str())); + assert!(["2", "3"].contains(&StringOrBase64::from(data[0].payload.clone()).as_str())); + assert!(["2", "3"].contains(&StringOrBase64::from(data[1].payload.clone()).as_str())); put_data( &session, @@ -154,8 +155,8 @@ async fn test_wild_card_in_order() { assert_eq!(data.len(), 2); assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr.as_str())); assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr.as_str())); - assert_eq!(format!("{}", data[0].value).as_str(), "4"); - assert_eq!(format!("{}", data[1].value).as_str(), "4"); + assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "4"); + assert_eq!(StringOrBase64::from(data[1].payload.clone()).as_str(), "4"); delete_data( &session, diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index 91b0283ddb..7ee6e7213c 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -37,7 +37,10 @@ env_logger = { workspace = true } flume = { workspace = true } futures = { workspace = true } log = { workspace = true } +phf = { workspace = true } serde = { workspace = true, features = ["default"] } +serde_cbor = { workspace = true } +serde_json = { workspace = true } zenoh = { workspace = true, features = ["unstable"], default-features = false } zenoh-core = { workspace = true } zenoh-macros = { workspace = true } diff --git a/zenoh-ext/examples/z_query_sub.rs b/zenoh-ext/examples/z_query_sub.rs index 73433ebf14..80efc0854f 100644 --- a/zenoh-ext/examples/z_query_sub.rs +++ b/zenoh-ext/examples/z_query_sub.rs @@ -17,9 +17,7 @@ use clap::Command; use futures::prelude::*; use futures::select; use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; -use zenoh::query::ReplyKeyExpr; +use zenoh::{config::Config, prelude::r#async::*, query::ReplyKeyExpr}; use zenoh_ext::*; #[async_std::main] @@ -62,8 +60,8 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - println!(">> [Subscriber] Received {} ('{}': '{}')", - sample.kind, sample.key_expr.as_str(), sample.value); + let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); + println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind, sample.key_expr.as_str(), payload); }, _ = stdin.read_exact(&mut input).fuse() => { diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index aece581fde..9078e61741 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -252,7 +252,7 @@ async fn net_event_handler(z: Arc, state: Arc) { .await .unwrap(); while let Ok(s) = sub.recv_async().await { - match bincode::deserialize::(&(s.value.payload.contiguous())) { + match bincode::deserialize::(&(s.payload.contiguous())) { Ok(evt) => match evt { GroupNetEvent::Join(je) => { log::debug!("Member join: {:?}", &je.member); @@ -342,7 +342,7 @@ async fn net_event_handler(z: Arc, state: Arc) { } } Err(e) => { - log::warn!("Error received: {}", e); + log::warn!("Error received: {:?}", e); } } } diff --git a/zenoh-ext/src/lib.rs b/zenoh-ext/src/lib.rs index 7440d80a53..7ac880fd8c 100644 --- a/zenoh-ext/src/lib.rs +++ b/zenoh-ext/src/lib.rs @@ -23,6 +23,9 @@ pub use querying_subscriber::{ pub use session_ext::SessionExt; pub use subscriber_ext::SubscriberBuilderExt; pub use subscriber_ext::SubscriberForward; +use zenoh::query::Reply; +use zenoh::{sample::Sample, Result as ZResult}; +use zenoh_core::zerror; /// The space of keys to use in a [`FetchingSubscriber`]. pub enum KeySpace { @@ -51,3 +54,13 @@ impl From for KeySpace { KeySpace::Liveliness } } + +pub trait ExtractSample { + fn extract(self) -> ZResult; +} + +impl ExtractSample for Reply { + fn extract(self) -> ZResult { + self.sample.map_err(|e| zerror!("{:?}", e).into()) + } +} diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 4a7c4f2ded..2c89ec82ae 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -26,6 +26,8 @@ use zenoh::Result as ZResult; use zenoh::SessionRef; use zenoh_core::{zlock, AsyncResolve, Resolvable, SyncResolve}; +use crate::ExtractSample; + /// The builder of [`FetchingSubscriber`], allowing to configure it. #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct QueryingSubscriberBuilder<'a, 'b, KeySpace, Handler> { @@ -350,8 +352,7 @@ pub struct FetchingSubscriberBuilder< Fetch: FnOnce(Box) -> ZResult<()>, TryIntoSample, > where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { pub(crate) session: SessionRef<'a>, pub(crate) key_expr: ZResult>, @@ -372,8 +373,7 @@ impl< TryIntoSample, > FetchingSubscriberBuilder<'a, 'b, KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { fn with_static_keys( self, @@ -399,8 +399,7 @@ impl< TryIntoSample, > FetchingSubscriberBuilder<'a, 'b, KeySpace, DefaultHandler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { /// Add callback to [`FetchingSubscriber`]. #[inline] @@ -496,8 +495,7 @@ impl< TryIntoSample, > FetchingSubscriberBuilder<'a, 'b, crate::UserSpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { /// Change the subscription reliability. #[inline] @@ -540,8 +538,7 @@ impl< where Handler: IntoCallbackReceiverPair<'static, Sample>, Handler::Receiver: Send, - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { type To = ZResult>; } @@ -556,8 +553,7 @@ where KeySpace: Into, Handler: IntoCallbackReceiverPair<'static, Sample> + Send, Handler::Receiver: Send, - TryIntoSample: TryInto + Send + Sync, - >::Error: Into, + TryIntoSample: ExtractSample + Send + Sync, { fn res_sync(self) -> ::To { FetchingSubscriber::new(self.with_static_keys()) @@ -575,8 +571,7 @@ where KeySpace: Into, Handler: IntoCallbackReceiverPair<'static, Sample> + Send, Handler::Receiver: Send, - TryIntoSample: TryInto + Send + Sync, - >::Error: Into, + TryIntoSample: ExtractSample + Send + Sync, { type Future = Ready; @@ -649,8 +644,7 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { where KeySpace: Into, Handler: IntoCallbackReceiverPair<'static, Sample, Receiver = Receiver> + Send, - TryIntoSample: TryInto + Send + Sync, - >::Error: Into, + TryIntoSample: ExtractSample + Send + Sync, { let state = Arc::new(Mutex::new(InnerState { pending_fetches: 0, @@ -769,8 +763,7 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { fetch: Fetch, ) -> impl Resolve> where - TryIntoSample: TryInto + Send + Sync, - >::Error: Into, + TryIntoSample: ExtractSample + Send + Sync, { FetchBuilder { fetch, @@ -846,8 +839,7 @@ pub struct FetchBuilder< Fetch: FnOnce(Box) -> ZResult<()>, TryIntoSample, > where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { fetch: Fetch, phantom: std::marker::PhantomData, @@ -858,8 +850,7 @@ pub struct FetchBuilder< impl) -> ZResult<()>, TryIntoSample> Resolvable for FetchBuilder where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { type To = ZResult<()>; } @@ -867,8 +858,7 @@ where impl) -> ZResult<()>, TryIntoSample> SyncResolve for FetchBuilder where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { fn res_sync(self) -> ::To { let handler = register_handler(self.state, self.callback); @@ -879,8 +869,7 @@ where impl) -> ZResult<()>, TryIntoSample> AsyncResolve for FetchBuilder where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { type Future = Ready; @@ -906,16 +895,15 @@ fn run_fetch< handler: RepliesHandler, ) -> ZResult<()> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { log::debug!("Fetch data for FetchingSubscriber"); - (fetch)(Box::new(move |s: TryIntoSample| match s.try_into() { + (fetch)(Box::new(move |s: TryIntoSample| match s.extract() { Ok(s) => { let mut state = zlock!(handler.state); log::trace!("Fetched sample received: push it to merge_queue"); state.merge_queue.push(s); } - Err(e) => log::debug!("Received error fetching data: {}", e.into()), + Err(e) => log::debug!("Received error fetching data: {}", e), })) } diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 83de47779c..89d3b5f691 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -13,7 +13,7 @@ // use flume::r#async::RecvStream; use futures::stream::{Forward, Map}; -use std::{convert::TryInto, time::Duration}; +use std::time::Duration; use zenoh::query::ReplyKeyExpr; use zenoh::sample::Locality; use zenoh::Result as ZResult; @@ -24,6 +24,7 @@ use zenoh::{ subscriber::{PushMode, Reliability, Subscriber, SubscriberBuilder}, }; +use crate::ExtractSample; use crate::{querying_subscriber::QueryingSubscriberBuilder, FetchingSubscriberBuilder}; /// Allows writing `subscriber.forward(receiver)` instead of `subscriber.stream().map(Ok).forward(publisher)` @@ -87,8 +88,7 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { fetch: Fetch, ) -> FetchingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into; + TryIntoSample: ExtractSample; /// Create a [`FetchingSubscriber`](super::FetchingSubscriber) that will perform a query (`session.get()`) as it's /// initial fetch. @@ -169,8 +169,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> fetch: Fetch, ) -> FetchingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { FetchingSubscriberBuilder { session: self.session, @@ -283,8 +282,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> fetch: Fetch, ) -> FetchingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { FetchingSubscriberBuilder { session: self.session, diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 11ecfad1bf..e6f7a4d9aa 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -79,10 +79,14 @@ log = { workspace = true } ordered-float = { workspace = true } paste = { workspace = true } petgraph = { workspace = true } +phf = { workspace = true } rand = { workspace = true, features = ["default"] } regex = { workspace = true } serde = { workspace = true, features = ["default"] } +serde_cbor = { workspace = true } serde_json = { workspace = true } +serde-pickle = { workspace = true } +serde_yaml = { workspace = true } socket2 = { workspace = true } stop-token = { workspace = true } uhlc = { workspace = true, features = ["default"] } diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 8cdf638af5..5a242d51b7 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -12,11 +12,12 @@ // ZettaScale Zenoh Team, // use crate::{ + encoding::Encoding, keyexpr, prelude::sync::{KeyExpr, Locality, SampleKind}, queryable::Query, sample::DataInfo, - Sample, Session, ZResult, + Payload, Sample, Session, ZResult, }; use async_std::task; use std::{ @@ -25,10 +26,7 @@ use std::{ sync::Arc, }; use zenoh_core::SyncResolve; -use zenoh_protocol::{ - core::{Encoding, KnownEncoding, WireExpr}, - network::NetworkMessage, -}; +use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; use zenoh_transport::{ TransportEventHandler, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; @@ -71,7 +69,12 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { let key_expr = *KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid; if query.key_expr().intersects(&key_expr) { if let Ok(value) = serde_json::value::to_value(peer.clone()) { - let _ = query.reply(Ok(Sample::new(key_expr, value))).res_sync(); + match Payload::try_from(value) { + Ok(zbuf) => { + let _ = query.reply(Ok(Sample::new(key_expr, zbuf))).res_sync(); + } + Err(e) => log::debug!("Admin query error: {}", e), + } } } @@ -83,7 +86,12 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { *KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid / *KE_LINK / lid; if query.key_expr().intersects(&key_expr) { if let Ok(value) = serde_json::value::to_value(link) { - let _ = query.reply(Ok(Sample::new(key_expr, value))).res_sync(); + match Payload::try_from(value) { + Ok(zbuf) => { + let _ = query.reply(Ok(Sample::new(key_expr, zbuf))).res_sync(); + } + Err(e) => log::debug!("Admin query error: {}", e), + } } } } @@ -145,7 +153,7 @@ impl TransportMulticastEventHandler for Handler { let expr = WireExpr::from(&(*KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid)) .to_owned(); let info = DataInfo { - encoding: Some(Encoding::Exact(KnownEncoding::AppJson)), + encoding: Some(Encoding::APPLICATION_JSON), ..Default::default() }; self.session.handle_data( @@ -191,7 +199,7 @@ impl TransportPeerEventHandler for PeerHandler { let mut s = DefaultHasher::new(); link.hash(&mut s); let info = DataInfo { - encoding: Some(Encoding::Exact(KnownEncoding::AppJson)), + encoding: Some(Encoding::APPLICATION_JSON), ..Default::default() }; self.session.handle_data( diff --git a/zenoh/src/encoding.rs b/zenoh/src/encoding.rs new file mode 100644 index 0000000000..d9fa725ed5 --- /dev/null +++ b/zenoh/src/encoding.rs @@ -0,0 +1,850 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::payload::Payload; +use phf::phf_map; +use std::{borrow::Cow, convert::Infallible, fmt, str::FromStr}; +use zenoh_buffers::{ZBuf, ZSlice}; +use zenoh_protocol::core::EncodingId; +#[cfg(feature = "shared-memory")] +use ::{std::sync::Arc, zenoh_shm::SharedMemoryBuf}; + +/// Default encoding values used by Zenoh. +/// +/// An encoding has a similar role to Content-type in HTTP: it indicates, when present, how data should be interpreted by the application. +/// +/// Please note the Zenoh protocol does not impose any encoding value nor it operates on it. +/// It can be seen as some optional metadata that is carried over by Zenoh in such a way the application may perform different operations depending on the encoding value. +/// +/// A set of associated constants are provided to cover the most common encodings for user convenience. +/// This is parcticular useful in helping Zenoh to perform additional network optimizations. +/// +/// # Examples +/// +/// ### String operations +/// +/// Create an [`Encoding`] from a string and viceversa. +/// ``` +/// use zenoh::prelude::Encoding; +/// +/// let encoding: Encoding = "text/plain".into(); +/// let text: String = encoding.clone().into(); +/// assert_eq!("text/plain", &text); +/// ``` +/// +/// ### Constants and cow operations +/// +/// Since some encoding values are internally optimized by Zenoh, it's generally more efficient to use +/// the defined constants and [`Cow`][std::borrow::Cow] conversion to obtain its string representation. +/// ``` +/// use zenoh::prelude::Encoding; +/// use std::borrow::Cow; +/// +/// // This allocates +/// assert_eq!("text/plain", &String::from(Encoding::TEXT_PLAIN)); +/// // This does NOT allocate +/// assert_eq!("text/plain", &Cow::from(Encoding::TEXT_PLAIN)); +/// ``` +/// +/// ### Schema +/// +/// Additionally, a schema can be associated to the encoding. +/// The convetions is to use the `;` separator if an encoding is created from a string. +/// Alternatively, [`with_schema()`](Encoding::with_schema) can be used to add a schme to one of the associated constants. +/// ``` +/// use zenoh::prelude::Encoding; +/// +/// let encoding1 = Encoding::from("text/plain;utf-8"); +/// let encoding2 = Encoding::TEXT_PLAIN.with_schema("utf-8"); +/// assert_eq!(encoding1, encoding2); +/// assert_eq!("text/plain;utf-8", &encoding1.to_string()); +/// assert_eq!("text/plain;utf-8", &encoding2.to_string()); +/// ``` +#[repr(transparent)] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Encoding(zenoh_protocol::core::Encoding); + +impl Encoding { + const SCHEMA_SEP: char = ';'; + + // For compatibility purposes Zenoh reserves any prefix value from `0` to `1023` included. + + // - Primitives types supported in all Zenoh bindings + /// Just some bytes. + /// + /// Constant alias for string: `"zenoh/bytes"`. + pub const ZENOH_BYTES: Encoding = Self(zenoh_protocol::core::Encoding { + id: 0, + schema: None, + }); + /// A VLE-encoded signed little-endian integer. Either 8bit, 16bit, 32bit, or 64bit. Binary reprensentation uses two's complement. + /// + /// Constant alias for string: `"zenoh/int"`. + pub const ZENOH_INT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 1, + schema: None, + }); + /// A VLE-encoded little-endian unsigned integer. Either 8bit, 16bit, 32bit, or 64bit. + /// + /// Constant alias for string: `"zenoh/uint"`. + pub const ZENOH_UINT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 2, + schema: None, + }); + /// A VLE-encoded float. Either little-endian 32bit or 64bit. Binary representation uses *IEEE 754-2008* *binary32* or *binary64*, respectively. + /// + /// Constant alias for string: `"zenoh/float"`. + pub const ZENOH_FLOAT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 3, + schema: None, + }); + /// A boolean. `0` is `false`, `1` is `true`. Other values are invalid. + /// + /// Constant alias for string: `"zenoh/bool"`. + pub const ZENOH_BOOL: Encoding = Self(zenoh_protocol::core::Encoding { + id: 4, + schema: None, + }); + /// A UTF-8 string. + /// + /// Constant alias for string: `"zenoh/string"`. + pub const ZENOH_STRING: Encoding = Self(zenoh_protocol::core::Encoding { + id: 5, + schema: None, + }); + /// A zenoh error. + /// + /// Constant alias for string: `"zenoh/error"`. + pub const ZENOH_ERROR: Encoding = Self(zenoh_protocol::core::Encoding { + id: 6, + schema: None, + }); + + // - Advanced types may be supported in some of the Zenoh bindings. + /// An application-specific stream of bytes. + /// + /// Constant alias for string: `"application/octet-stream"`. + pub const APPLICATION_OCTET_STREAM: Encoding = Self(zenoh_protocol::core::Encoding { + id: 7, + schema: None, + }); + /// A textual file. + /// + /// Constant alias for string: `"text/plain"`. + pub const TEXT_PLAIN: Encoding = Self(zenoh_protocol::core::Encoding { + id: 8, + schema: None, + }); + /// JSON data intended to be consumed by an application. + /// + /// Constant alias for string: `"application/json"`. + pub const APPLICATION_JSON: Encoding = Self(zenoh_protocol::core::Encoding { + id: 9, + schema: None, + }); + /// JSON data intended to be human readable. + /// + /// Constant alias for string: `"text/json"`. + pub const TEXT_JSON: Encoding = Self(zenoh_protocol::core::Encoding { + id: 10, + schema: None, + }); + /// A Common Data Representation (CDR)-encoded data. + /// + /// Constant alias for string: `"application/cdr"`. + pub const APPLICATION_CDR: Encoding = Self(zenoh_protocol::core::Encoding { + id: 11, + schema: None, + }); + /// A Concise Binary Object Representation (CBOR)-encoded data. + /// + /// Constant alias for string: `"application/cbor"`. + pub const APPLICATION_CBOR: Encoding = Self(zenoh_protocol::core::Encoding { + id: 12, + schema: None, + }); + /// YAML data intended to be consumed by an application. + /// + /// Constant alias for string: `"application/yaml"`. + pub const APPLICATION_YAML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 13, + schema: None, + }); + /// YAML data intended to be human readable. + /// + /// Constant alias for string: `"text/yaml"`. + pub const TEXT_YAML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 14, + schema: None, + }); + /// JSON5 encoded data that are human readable. + /// + /// Constant alias for string: `"text/json5"`. + pub const TEXT_JSON5: Encoding = Self(zenoh_protocol::core::Encoding { + id: 15, + schema: None, + }); + /// A Python object serialized using [pickle](https://docs.python.org/3/library/pickle.html). + /// + /// Constant alias for string: `"application/python-serialized-object"`. + pub const APPLICATION_PYTHON_SERIALIZED_OBJECT: Encoding = + Self(zenoh_protocol::core::Encoding { + id: 16, + schema: None, + }); + /// An application-specific protobuf-encoded data. + /// + /// Constant alias for string: `"application/protobuf"`. + pub const APPLICATION_PROTOBUF: Encoding = Self(zenoh_protocol::core::Encoding { + id: 17, + schema: None, + }); + /// A Java serialized object. + /// + /// Constant alias for string: `"application/java-serialized-object"`. + pub const APPLICATION_JAVA_SERIALIZED_OBJECT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 18, + schema: None, + }); + /// An [openmetrics](https://github.com/OpenObservability/OpenMetrics) data, common used by [Prometheus](https://prometheus.io/). + /// + /// Constant alias for string: `"application/openmetrics-text"`. + pub const APPLICATION_OPENMETRICS_TEXT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 19, + schema: None, + }); + /// A Portable Network Graphics (PNG) image. + /// + /// Constant alias for string: `"image/png"`. + pub const IMAGE_PNG: Encoding = Self(zenoh_protocol::core::Encoding { + id: 20, + schema: None, + }); + /// A Joint Photographic Experts Group (JPEG) image. + /// + /// Constant alias for string: `"image/jpeg"`. + pub const IMAGE_JPEG: Encoding = Self(zenoh_protocol::core::Encoding { + id: 21, + schema: None, + }); + /// A Graphics Interchange Format (GIF) image. + /// + /// Constant alias for string: `"image/gif"`. + pub const IMAGE_GIF: Encoding = Self(zenoh_protocol::core::Encoding { + id: 22, + schema: None, + }); + /// A BitMap (BMP) image. + /// + /// Constant alias for string: `"image/bmp"`. + pub const IMAGE_BMP: Encoding = Self(zenoh_protocol::core::Encoding { + id: 23, + schema: None, + }); + /// A Web Protable (WebP) image. + /// + /// Constant alias for string: `"image/webp"`. + pub const IMAGE_WEBP: Encoding = Self(zenoh_protocol::core::Encoding { + id: 24, + schema: None, + }); + /// An XML file intended to be consumed by an application.. + /// + /// Constant alias for string: `"application/xml"`. + pub const APPLICATION_XML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 25, + schema: None, + }); + /// An encoded a list of tuples, each consisting of a name and a value. + /// + /// Constant alias for string: `"application/x-www-form-urlencoded"`. + pub const APPLICATION_X_WWW_FORM_URLENCODED: Encoding = Self(zenoh_protocol::core::Encoding { + id: 26, + schema: None, + }); + /// An HTML file. + /// + /// Constant alias for string: `"text/html"`. + pub const TEXT_HTML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 27, + schema: None, + }); + /// An XML file that is human readable. + /// + /// Constant alias for string: `"text/xml"`. + pub const TEXT_XML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 28, + schema: None, + }); + /// A CSS file. + /// + /// Constant alias for string: `"text/css"`. + pub const TEXT_CSS: Encoding = Self(zenoh_protocol::core::Encoding { + id: 29, + schema: None, + }); + /// A JavaScript file. + /// + /// Constant alias for string: `"text/javascript"`. + pub const TEXT_JAVASCRIPT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 30, + schema: None, + }); + /// A MarkDown file. + /// + /// Constant alias for string: `"text/markdown"`. + pub const TEXT_MARKDOWN: Encoding = Self(zenoh_protocol::core::Encoding { + id: 31, + schema: None, + }); + /// A CSV file. + /// + /// Constant alias for string: `"text/csv"`. + pub const TEXT_CSV: Encoding = Self(zenoh_protocol::core::Encoding { + id: 32, + schema: None, + }); + /// An application-specific SQL query. + /// + /// Constant alias for string: `"application/sql"`. + pub const APPLICATION_SQL: Encoding = Self(zenoh_protocol::core::Encoding { + id: 33, + schema: None, + }); + /// Constrained Application Protocol (CoAP) data intended for CoAP-to-HTTP and HTTP-to-CoAP proxies. + /// + /// Constant alias for string: `"application/coap-payload"`. + pub const APPLICATION_COAP_PAYLOAD: Encoding = Self(zenoh_protocol::core::Encoding { + id: 34, + schema: None, + }); + /// Defines a JSON document structure for expressing a sequence of operations to apply to a JSON document. + /// + /// Constant alias for string: `"application/json-patch+json"`. + pub const APPLICATION_JSON_PATCH_JSON: Encoding = Self(zenoh_protocol::core::Encoding { + id: 35, + schema: None, + }); + /// A JSON text sequence consists of any number of JSON texts, all encoded in UTF-8. + /// + /// Constant alias for string: `"application/json-seq"`. + pub const APPLICATION_JSON_SEQ: Encoding = Self(zenoh_protocol::core::Encoding { + id: 36, + schema: None, + }); + /// A JSONPath defines a string syntax for selecting and extracting JSON values from within a given JSON value. + /// + /// Constant alias for string: `"application/jsonpath"`. + pub const APPLICATION_JSONPATH: Encoding = Self(zenoh_protocol::core::Encoding { + id: 37, + schema: None, + }); + /// A JSON Web Token (JWT). + /// + /// Constant alias for string: `"application/jwt"`. + pub const APPLICATION_JWT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 38, + schema: None, + }); + /// An application-specific MPEG-4 encoded data, either audio or video. + /// + /// Constant alias for string: `"application/mp4"`. + pub const APPLICATION_MP4: Encoding = Self(zenoh_protocol::core::Encoding { + id: 39, + schema: None, + }); + /// A SOAP 1.2 message serialized as XML 1.0. + /// + /// Constant alias for string: `"application/soap+xml"`. + pub const APPLICATION_SOAP_XML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 40, + schema: None, + }); + /// A YANG-encoded data commonly used by the Network Configuration Protocol (NETCONF). + /// + /// Constant alias for string: `"application/yang"`. + pub const APPLICATION_YANG: Encoding = Self(zenoh_protocol::core::Encoding { + id: 41, + schema: None, + }); + /// A MPEG-4 Advanced Audio Coding (AAC) media. + /// + /// Constant alias for string: `"audio/aac"`. + pub const AUDIO_AAC: Encoding = Self(zenoh_protocol::core::Encoding { + id: 42, + schema: None, + }); + /// A Free Lossless Audio Codec (FLAC) media. + /// + /// Constant alias for string: `"audio/flac"`. + pub const AUDIO_FLAC: Encoding = Self(zenoh_protocol::core::Encoding { + id: 43, + schema: None, + }); + /// An audio codec defined in MPEG-1, MPEG-2, MPEG-4, or registered at the MP4 registration authority. + /// + /// Constant alias for string: `"audio/mp4"`. + pub const AUDIO_MP4: Encoding = Self(zenoh_protocol::core::Encoding { + id: 44, + schema: None, + }); + /// An Ogg-encapsulated audio stream. + /// + /// Constant alias for string: `"audio/ogg"`. + pub const AUDIO_OGG: Encoding = Self(zenoh_protocol::core::Encoding { + id: 45, + schema: None, + }); + /// A Vorbis-encoded audio stream. + /// + /// Constant alias for string: `"audio/vorbis"`. + pub const AUDIO_VORBIS: Encoding = Self(zenoh_protocol::core::Encoding { + id: 46, + schema: None, + }); + /// A h261-encoded video stream. + /// + /// Constant alias for string: `"video/h261"`. + pub const VIDEO_H261: Encoding = Self(zenoh_protocol::core::Encoding { + id: 47, + schema: None, + }); + /// A h263-encoded video stream. + /// + /// Constant alias for string: `"video/h263"`. + pub const VIDEO_H263: Encoding = Self(zenoh_protocol::core::Encoding { + id: 48, + schema: None, + }); + /// A h264-encoded video stream. + /// + /// Constant alias for string: `"video/h264"`. + pub const VIDEO_H264: Encoding = Self(zenoh_protocol::core::Encoding { + id: 49, + schema: None, + }); + /// A h265-encoded video stream. + /// + /// Constant alias for string: `"video/h265"`. + pub const VIDEO_H265: Encoding = Self(zenoh_protocol::core::Encoding { + id: 50, + schema: None, + }); + /// A h266-encoded video stream. + /// + /// Constant alias for string: `"video/h266"`. + pub const VIDEO_H266: Encoding = Self(zenoh_protocol::core::Encoding { + id: 51, + schema: None, + }); + /// A video codec defined in MPEG-1, MPEG-2, MPEG-4, or registered at the MP4 registration authority. + /// + /// Constant alias for string: `"video/mp4"`. + pub const VIDEO_MP4: Encoding = Self(zenoh_protocol::core::Encoding { + id: 52, + schema: None, + }); + /// An Ogg-encapsulated video stream. + /// + /// Constant alias for string: `"video/ogg"`. + pub const VIDEO_OGG: Encoding = Self(zenoh_protocol::core::Encoding { + id: 53, + schema: None, + }); + /// An uncompressed, studio-quality video stream. + /// + /// Constant alias for string: `"video/raw"`. + pub const VIDEO_RAW: Encoding = Self(zenoh_protocol::core::Encoding { + id: 54, + schema: None, + }); + /// A VP8-encoded video stream. + /// + /// Constant alias for string: `"video/vp8"`. + pub const VIDEO_VP8: Encoding = Self(zenoh_protocol::core::Encoding { + id: 55, + schema: None, + }); + /// A VP9-encoded video stream. + /// + /// Constant alias for string: `"video/vp9"`. + pub const VIDEO_VP9: Encoding = Self(zenoh_protocol::core::Encoding { + id: 56, + schema: None, + }); + + const ID_TO_STR: phf::Map = phf_map! { + 0u16 => "zenoh/bytes", + 1u16 => "zenoh/int", + 2u16 => "zenoh/uint", + 3u16 => "zenoh/float", + 4u16 => "zenoh/bool", + 5u16 => "zenoh/string", + 6u16 => "zenoh/error", + 7u16 => "application/octet-stream", + 8u16 => "text/plain", + 9u16 => "application/json", + 10u16 => "text/json", + 11u16 => "application/cdr", + 12u16 => "application/cbor", + 13u16 => "application/yaml", + 14u16 => "text/yaml", + 15u16 => "text/json5", + 16u16 => "application/python-serialized-object", + 17u16 => "application/protobuf", + 18u16 => "application/java-serialized-object", + 19u16 => "application/openmetrics-text", + 20u16 => "image/png", + 21u16 => "image/jpeg", + 22u16 => "image/gif", + 23u16 => "image/bmp", + 24u16 => "image/webp", + 25u16 => "application/xml", + 26u16 => "application/x-www-form-urlencoded", + 27u16 => "text/html", + 28u16 => "text/xml", + 29u16 => "text/css", + 30u16 => "text/javascript", + 31u16 => "text/markdown", + 32u16 => "text/csv", + 33u16 => "application/sql", + 34u16 => "application/coap-payload", + 35u16 => "application/json-patch+json", + 36u16 => "application/json-seq", + 37u16 => "application/jsonpath", + 38u16 => "application/jwt", + 39u16 => "application/mp4", + 40u16 => "application/soap+xml", + 41u16 => "application/yang", + 42u16 => "audio/aac", + 43u16 => "audio/flac", + 44u16 => "audio/mp4", + 45u16 => "audio/ogg", + 46u16 => "audio/vorbis", + 47u16 => "video/h261", + 48u16 => "video/h263", + 49u16 => "video/h264", + 50u16 => "video/h265", + 51u16 => "video/h266", + 52u16 => "video/mp4", + 53u16 => "video/ogg", + 54u16 => "video/raw", + 55u16 => "video/vp8", + 56u16 => "video/vp9", + }; + + const STR_TO_ID: phf::Map<&'static str, EncodingId> = phf_map! { + "zenoh/bytes" => 0u16, + "zenoh/int" => 1u16, + "zenoh/uint" => 2u16, + "zenoh/float" => 3u16, + "zenoh/bool" => 4u16, + "zenoh/string" => 5u16, + "zenoh/error" => 6u16, + "application/octet-stream" => 7u16, + "text/plain" => 8u16, + "application/json" => 9u16, + "text/json" => 10u16, + "application/cdr" => 11u16, + "application/cbor" => 12u16, + "application/yaml" => 13u16, + "text/yaml" => 14u16, + "text/json5" => 15u16, + "application/python-serialized-object" => 16u16, + "application/protobuf" => 17u16, + "application/java-serialized-object" => 18u16, + "application/openmetrics-text" => 19u16, + "image/png" => 20u16, + "image/jpeg" => 21u16, + "image/gif" => 22u16, + "image/bmp" => 23u16, + "image/webp" => 24u16, + "application/xml" => 25u16, + "application/x-www-form-urlencoded" => 26u16, + "text/html" => 27u16, + "text/xml" => 28u16, + "text/css" => 29u16, + "text/javascript" => 30u16, + "text/markdown" => 31u16, + "text/csv" => 32u16, + "application/sql" => 33u16, + "application/coap-payload" => 34u16, + "application/json-patch+json" => 35u16, + "application/json-seq" => 36u16, + "application/jsonpath" => 37u16, + "application/jwt" => 38u16, + "application/mp4" => 39u16, + "application/soap+xml" => 40u16, + "application/yang" => 41u16, + "audio/aac" => 42u16, + "audio/flac" => 43u16, + "audio/mp4" => 44u16, + "audio/ogg" => 45u16, + "audio/vorbis" => 46u16, + "video/h261" => 47u16, + "video/h263" => 48u16, + "video/h264" => 49u16, + "video/h265" => 50u16, + "video/h266" => 51u16, + "video/mp4" => 52u16, + "video/ogg" => 53u16, + "video/raw" => 54u16, + "video/vp8" => 55u16, + "video/vp9" => 56u16, + }; + + /// The default [`Encoding`] is [`ZENOH_BYTES`](Encoding::ZENOH_BYTES). + pub const fn default() -> Self { + Self::ZENOH_BYTES + } + + /// Set a schema to this encoding. Zenoh does not define what a schema is and its semantichs is left to the implementer. + /// E.g. a common schema for `text/plain` encoding is `utf-8`. + pub fn with_schema(mut self, s: S) -> Self + where + S: Into, + { + let s: String = s.into(); + self.0.schema = Some(s.into_boxed_str().into_boxed_bytes().into()); + self + } +} + +impl Default for Encoding { + fn default() -> Self { + Self::default() + } +} + +impl From<&str> for Encoding { + fn from(t: &str) -> Self { + let mut inner = zenoh_protocol::core::Encoding::empty(); + + // Check if empty + if t.is_empty() { + return Encoding(inner); + } + + // Everything before `;` may be mapped to a known id + let (id, schema) = t.split_once(Encoding::SCHEMA_SEP).unwrap_or((t, "")); + if let Some(id) = Encoding::STR_TO_ID.get(id).copied() { + inner.id = id; + }; + if !schema.is_empty() { + inner.schema = Some(ZSlice::from(schema.to_string().into_bytes())); + } + + Encoding(inner) + } +} + +impl From for Encoding { + fn from(value: String) -> Self { + Self::from(value.as_str()) + } +} + +impl FromStr for Encoding { + type Err = Infallible; + + fn from_str(s: &str) -> Result { + Ok(Self::from(s)) + } +} + +impl From<&Encoding> for Cow<'static, str> { + fn from(encoding: &Encoding) -> Self { + fn su8_to_str(schema: &[u8]) -> &str { + std::str::from_utf8(schema).unwrap_or("unknown(non-utf8)") + } + + match ( + Encoding::ID_TO_STR.get(&encoding.0.id).copied(), + encoding.0.schema.as_ref(), + ) { + // Perfect match + (Some(i), None) => Cow::Borrowed(i), + // ID and schema + (Some(i), Some(s)) => { + Cow::Owned(format!("{}{}{}", i, Encoding::SCHEMA_SEP, su8_to_str(s))) + } + // + (None, Some(s)) => Cow::Owned(format!( + "unknown({}){}{}", + encoding.0.id, + Encoding::SCHEMA_SEP, + su8_to_str(s) + )), + (None, None) => Cow::Owned(format!("unknown({})", encoding.0.id)), + } + } +} + +impl From for Cow<'static, str> { + fn from(encoding: Encoding) -> Self { + Self::from(&encoding) + } +} + +impl From for String { + fn from(encoding: Encoding) -> Self { + encoding.to_string() + } +} + +impl From for zenoh_protocol::core::Encoding { + fn from(value: Encoding) -> Self { + value.0 + } +} + +impl From for Encoding { + fn from(value: zenoh_protocol::core::Encoding) -> Self { + Self(value) + } +} + +impl fmt::Display for Encoding { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result { + let s = Cow::from(self); + f.write_str(s.as_ref()) + } +} + +// - Encoding trait +pub trait EncodingMapping { + const ENCODING: Encoding; +} + +// Bytes +impl EncodingMapping for Payload { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +impl EncodingMapping for ZBuf { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +impl EncodingMapping for Vec { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +impl EncodingMapping for &[u8] { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +impl EncodingMapping for Cow<'_, [u8]> { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +// String +impl EncodingMapping for String { + const ENCODING: Encoding = Encoding::ZENOH_STRING; +} + +impl EncodingMapping for &str { + const ENCODING: Encoding = Encoding::ZENOH_STRING; +} + +impl EncodingMapping for Cow<'_, str> { + const ENCODING: Encoding = Encoding::ZENOH_STRING; +} + +// Zenoh unsigned integers +impl EncodingMapping for u8 { + const ENCODING: Encoding = Encoding::ZENOH_UINT; +} + +impl EncodingMapping for u16 { + const ENCODING: Encoding = Encoding::ZENOH_UINT; +} + +impl EncodingMapping for u32 { + const ENCODING: Encoding = Encoding::ZENOH_UINT; +} + +impl EncodingMapping for u64 { + const ENCODING: Encoding = Encoding::ZENOH_UINT; +} + +impl EncodingMapping for usize { + const ENCODING: Encoding = Encoding::ZENOH_UINT; +} + +// Zenoh signed integers +impl EncodingMapping for i8 { + const ENCODING: Encoding = Encoding::ZENOH_INT; +} + +impl EncodingMapping for i16 { + const ENCODING: Encoding = Encoding::ZENOH_INT; +} + +impl EncodingMapping for i32 { + const ENCODING: Encoding = Encoding::ZENOH_INT; +} + +impl EncodingMapping for i64 { + const ENCODING: Encoding = Encoding::ZENOH_INT; +} + +impl EncodingMapping for isize { + const ENCODING: Encoding = Encoding::ZENOH_INT; +} + +// Zenoh floats +impl EncodingMapping for f32 { + const ENCODING: Encoding = Encoding::ZENOH_FLOAT; +} + +impl EncodingMapping for f64 { + const ENCODING: Encoding = Encoding::ZENOH_FLOAT; +} + +// Zenoh bool +impl EncodingMapping for bool { + const ENCODING: Encoding = Encoding::ZENOH_BOOL; +} + +// - Zenoh advanced types encoders/decoders +impl EncodingMapping for serde_json::Value { + const ENCODING: Encoding = Encoding::APPLICATION_JSON; +} + +impl EncodingMapping for serde_yaml::Value { + const ENCODING: Encoding = Encoding::APPLICATION_YAML; +} + +impl EncodingMapping for serde_cbor::Value { + const ENCODING: Encoding = Encoding::APPLICATION_CBOR; +} + +impl EncodingMapping for serde_pickle::Value { + const ENCODING: Encoding = Encoding::APPLICATION_PYTHON_SERIALIZED_OBJECT; +} + +// - Zenoh SHM +#[cfg(feature = "shared-memory")] +impl EncodingMapping for Arc { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +#[cfg(feature = "shared-memory")] +impl EncodingMapping for Box { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +#[cfg(feature = "shared-memory")] +impl EncodingMapping for SharedMemoryBuf { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 0a8f1feb64..bae81d3a54 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -53,7 +53,7 @@ //! let session = zenoh::open(config::default()).res().await.unwrap(); //! let subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); //! while let Ok(sample) = subscriber.recv_async().await { -//! println!("Received: {}", sample); +//! println!("Received: {:?}", sample); //! }; //! } //! ``` @@ -79,9 +79,11 @@ extern crate zenoh_core; #[macro_use] extern crate zenoh_result; +pub(crate) type Id = usize; + use git_version::git_version; use handlers::DefaultHandler; -#[zenoh_macros::unstable] +#[cfg(feature = "unstable")] use net::runtime::Runtime; use prelude::*; use scouting::ScoutBuilder; @@ -132,10 +134,12 @@ pub use net::runtime; pub mod selector; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; +pub(crate) mod encoding; pub mod handlers; pub mod info; #[cfg(feature = "unstable")] pub mod liveliness; +pub mod payload; pub mod plugins; pub mod prelude; pub mod publication; @@ -168,23 +172,6 @@ pub mod time { } } -/// A map of key/value (String,String) properties. -pub mod properties { - use super::prelude::Value; - pub use zenoh_collections::Properties; - - /// Convert a set of [`Properties`] into a [`Value`]. - /// For instance, Properties: `[("k1", "v1"), ("k2, v2")]` - /// is converted into Json: `{ "k1": "v1", "k2": "v2" }` - pub fn properties_to_json_value(props: &Properties) -> Value { - let json_map = props - .iter() - .map(|(k, v)| (k.clone(), serde_json::Value::String(v.clone()))) - .collect::>(); - serde_json::Value::Object(json_map).into() - } -} - /// Scouting primitives. pub mod scouting; diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 26a803fa43..9f14866363 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -15,8 +15,7 @@ //! Liveliness primitives. //! //! see [`Liveliness`] - -use crate::query::Reply; +use crate::{query::Reply, Id}; #[zenoh_macros::unstable] use { @@ -426,7 +425,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) + /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) /// .res() /// .await /// .unwrap(); @@ -500,7 +499,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { - /// println!("Received: {} {}", sample.key_expr, sample.value); + /// println!("Received: {} {:?}", sample.key_expr, sample.payload); /// } /// # }) /// ``` @@ -508,7 +507,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { #[zenoh_macros::unstable] pub fn with(self, handler: Handler) -> LivelinessSubscriberBuilder<'a, 'b, Handler> where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Sample>, + Handler: crate::handlers::IntoCallbackReceiverPair<'static, Sample>, { let LivelinessSubscriberBuilder { session, @@ -594,8 +593,8 @@ where /// .unwrap(); /// while let Ok(token) = tokens.recv_async().await { /// match token.sample { -/// Ok(sample) => println!("Alive token ('{}')", sample.key_expr.as_str(),), -/// Err(err) => println!("Received (ERROR: '{}')", String::try_from(&err).unwrap()), +/// Ok(sample) => println!("Alive token ('{}')", sample.key_expr.as_str()), +/// Err(err) => println!("Received (ERROR: '{:?}')", err.payload), /// } /// } /// # }) diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index e8e84395f8..b0f7f7f7ef 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -559,7 +559,7 @@ pub fn route_query( payload: ReplyBody::Put(Put { // @TODO: handle Del case timestamp: None, // @TODO: handle timestamp - encoding: Encoding::DEFAULT, // @TODO: handle encoding + encoding: Encoding::empty(), // @TODO: handle encoding ext_sinfo: None, // @TODO: handle source info ext_attachment: None, // @TODO: expose it in the API #[cfg(feature = "shared-memory")] diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index cd7cf448cd..03b447aae0 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -12,8 +12,10 @@ // ZettaScale Zenoh Team, use super::routing::dispatcher::face::Face; use super::Runtime; +use crate::encoding::Encoding; use crate::key_expr::KeyExpr; use crate::net::primitives::Primitives; +use crate::payload::Payload; use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::{Sample, SyncResolve}; use crate::queryable::Query; @@ -30,9 +32,11 @@ use std::sync::Mutex; use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{ConfigValidator, ValidatedMap, WhatAmI}; use zenoh_plugin_trait::{PluginControl, PluginStatus}; -use zenoh_protocol::core::key_expr::keyexpr; use zenoh_protocol::{ - core::{key_expr::OwnedKeyExpr, ExprId, KnownEncoding, WireExpr, ZenohId, EMPTY_EXPR_ID}, + core::{ + key_expr::{keyexpr, OwnedKeyExpr}, + ExprId, WireExpr, ZenohId, EMPTY_EXPR_ID, + }, network::{ declare::{queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo}, ext, Declare, DeclareBody, DeclareQueryable, DeclareSubscriber, Push, Request, Response, @@ -420,7 +424,7 @@ impl Primitives for AdminSpace { parameters, value: query .ext_body - .map(|b| Value::from(b.payload).encoding(b.encoding)), + .map(|b| Value::from(b.payload).with_encoding(b.encoding)), qid: msg.id, zid, primitives, @@ -561,13 +565,18 @@ fn router_data(context: &AdminContext, query: Query) { } log::trace!("AdminSpace router_data: {:?}", json); + let payload = match Payload::try_from(json) { + Ok(p) => p, + Err(e) => { + log::error!("Error serializing AdminSpace reply: {:?}", e); + return; + } + }; if let Err(e) = query - .reply(Ok(Sample::new( - reply_key, - Value::from(json.to_string().as_bytes().to_vec()) - .encoding(KnownEncoding::AppJson.into()), - ))) - .res() + .reply(Ok( + Sample::new(reply_key, payload).with_encoding(Encoding::APPLICATION_JSON) + )) + .res_sync() { log::error!("Error sending AdminSpace reply: {:?}", e); } @@ -596,13 +605,7 @@ zenoh_build{{version="{}"}} 1 .openmetrics_text(), ); - if let Err(e) = query - .reply(Ok(Sample::new( - reply_key, - Value::from(metrics.as_bytes().to_vec()).encoding(KnownEncoding::TextPlain.into()), - ))) - .res() - { + if let Err(e) = query.reply(Ok(Sample::new(reply_key, metrics))).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -617,14 +620,7 @@ fn routers_linkstate_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(Ok(Sample::new( reply_key, - Value::from( - tables - .hat_code - .info(&tables, WhatAmI::Router) - .as_bytes() - .to_vec(), - ) - .encoding(KnownEncoding::TextPlain.into()), + tables.hat_code.info(&tables, WhatAmI::Router), ))) .res() { @@ -642,14 +638,7 @@ fn peers_linkstate_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(Ok(Sample::new( reply_key, - Value::from( - tables - .hat_code - .info(&tables, WhatAmI::Peer) - .as_bytes() - .to_vec(), - ) - .encoding(KnownEncoding::TextPlain.into()), + tables.hat_code.info(&tables, WhatAmI::Peer), ))) .res() { @@ -667,7 +656,7 @@ fn subscribers_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(Ok(Sample::new(key, Value::empty()))).res() { + if let Err(e) = query.reply(Ok(Sample::new(key, Payload::empty()))).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -684,7 +673,7 @@ fn queryables_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(Ok(Sample::new(key, Value::empty()))).res() { + if let Err(e) = query.reply(Ok(Sample::new(key, Payload::empty()))).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -702,8 +691,13 @@ fn plugins_data(context: &AdminContext, query: Query) { log::debug!("plugin status: {:?}", status); let key = root_key.join(status.name()).unwrap(); let status = serde_json::to_value(status).unwrap(); - if let Err(e) = query.reply(Ok(Sample::new(key, Value::from(status)))).res() { - log::error!("Error sending AdminSpace reply: {:?}", e); + match Payload::try_from(status) { + Ok(zbuf) => { + if let Err(e) = query.reply(Ok(Sample::new(key, zbuf))).res_sync() { + log::error!("Error sending AdminSpace reply: {:?}", e); + } + } + Err(e) => log::debug!("Admin query error: {}", e), } } } @@ -720,12 +714,7 @@ fn plugins_status(context: &AdminContext, query: Query) { with_extended_string(plugin_key, &["/__path__"], |plugin_path_key| { if let Ok(key_expr) = KeyExpr::try_from(plugin_path_key.clone()) { if query.key_expr().intersects(&key_expr) { - if let Err(e) = query - .reply(Ok(Sample::new( - key_expr, - Value::from(plugin.path()).encoding(KnownEncoding::AppJson.into()), - ))) - .res() + if let Err(e) = query.reply(Ok(Sample::new(key_expr, plugin.path()))).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } @@ -748,13 +737,13 @@ fn plugins_status(context: &AdminContext, query: Query) { Ok(Ok(responses)) => { for response in responses { if let Ok(key_expr) = KeyExpr::try_from(response.key) { - if let Err(e) = query.reply(Ok(Sample::new( - key_expr, - Value::from(response.value).encoding(KnownEncoding::AppJson.into()), - ))) - .res() - { - log::error!("Error sending AdminSpace reply: {:?}", e); + match Payload::try_from(response.value) { + Ok(zbuf) => { + if let Err(e) = query.reply(Ok(Sample::new(key_expr, zbuf))).res_sync() { + log::error!("Error sending AdminSpace reply: {:?}", e); + } + }, + Err(e) => log::debug!("Admin query error: {}", e), } } else { log::error!("Error: plugin {} replied with an invalid key", plugin_key); diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index fdf0b6fe65..80a9dd458a 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -628,7 +628,7 @@ fn client_test() { ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -661,7 +661,7 @@ fn client_test() { ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -694,7 +694,7 @@ fn client_test() { ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -727,7 +727,7 @@ fn client_test() { ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -760,7 +760,7 @@ fn client_test() { ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs new file mode 100644 index 0000000000..f499db50da --- /dev/null +++ b/zenoh/src/payload.rs @@ -0,0 +1,673 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Payload primitives. +use crate::buffers::ZBuf; +use std::{ + borrow::Cow, + convert::Infallible, + fmt::Debug, + ops::{Deref, DerefMut}, + string::FromUtf8Error, + sync::Arc, +}; +use zenoh_buffers::{buffer::SplitBuffer, reader::HasReader, writer::HasWriter, ZSlice}; +use zenoh_result::ZResult; +#[cfg(feature = "shared-memory")] +use zenoh_shm::SharedMemoryBuf; + +#[repr(transparent)] +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct Payload(ZBuf); + +impl Payload { + /// Create an empty payload. + pub const fn empty() -> Self { + Self(ZBuf::empty()) + } + + /// Create a [`Payload`] from any type `T` that can implements [`Into`]. + pub fn new(t: T) -> Self + where + T: Into, + { + Self(t.into()) + } +} + +impl Deref for Payload { + type Target = ZBuf; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Payload { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +/// Provide some facilities specific to the Rust API to encode/decode a [`Value`] with an `Serialize`. +impl Payload { + /// Encode an object of type `T` as a [`Value`] using the [`ZSerde`]. + /// + /// ```rust + /// use zenoh::payload::Payload; + /// + /// let start = String::from("abc"); + /// let payload = Payload::serialize(start.clone()); + /// let end: String = payload.deserialize().unwrap(); + /// assert_eq!(start, end); + /// ``` + pub fn serialize(t: T) -> Self + where + ZSerde: Serialize, + { + ZSerde.serialize(t) + } + + /// Decode an object of type `T` from a [`Value`] using the [`ZSerde`]. + /// See [encode](Value::encode) for an example. + pub fn deserialize(&self) -> ZResult + where + ZSerde: Deserialize, + >::Error: Debug, + { + let t: T = ZSerde.deserialize(self).map_err(|e| zerror!("{:?}", e))?; + Ok(t) + } +} + +/// Trait to encode a type `T` into a [`Value`]. +pub trait Serialize { + type Output; + + /// The implementer should take care of serializing the type `T` and set the proper [`Encoding`]. + fn serialize(self, t: T) -> Self::Output; +} + +pub trait Deserialize { + type Error; + + /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. + fn deserialize(self, t: &Payload) -> Result; +} + +/// The default serializer for Zenoh payload. It supports primitives types, such as: vec, int, uint, float, string, bool. +/// It also supports common Rust serde values. +#[derive(Clone, Copy, Debug)] +pub struct ZSerde; + +#[derive(Debug, Clone, Copy)] +pub struct ZDeserializeError; + +// Bytes +impl Serialize for ZSerde { + type Output = Payload; + + fn serialize(self, t: ZBuf) -> Self::Output { + Payload::new(t) + } +} + +impl From for ZBuf { + fn from(value: Payload) -> Self { + value.0 + } +} + +impl Deserialize for ZSerde { + type Error = Infallible; + + fn deserialize(self, v: &Payload) -> Result { + Ok(v.into()) + } +} + +impl From<&Payload> for ZBuf { + fn from(value: &Payload) -> Self { + value.0.clone() + } +} + +impl Serialize> for ZSerde { + type Output = Payload; + + fn serialize(self, t: Vec) -> Self::Output { + Payload::new(t) + } +} + +impl Serialize<&[u8]> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &[u8]) -> Self::Output { + Payload::new(t.to_vec()) + } +} + +impl Deserialize> for ZSerde { + type Error = Infallible; + + fn deserialize(self, v: &Payload) -> Result, Self::Error> { + let v: ZBuf = v.into(); + Ok(v.contiguous().to_vec()) + } +} + +impl From<&Payload> for Vec { + fn from(value: &Payload) -> Self { + value.contiguous().to_vec() + } +} + +impl<'a> Serialize> for ZSerde { + type Output = Payload; + + fn serialize(self, t: Cow<'a, [u8]>) -> Self::Output { + Payload::new(t.to_vec()) + } +} + +impl<'a> Deserialize> for ZSerde { + type Error = Infallible; + + fn deserialize(self, v: &Payload) -> Result, Self::Error> { + let v: Vec = Self.deserialize(v)?; + Ok(Cow::Owned(v)) + } +} + +impl<'a> From<&'a Payload> for Cow<'a, [u8]> { + fn from(value: &'a Payload) -> Self { + value.contiguous() + } +} + +// String +impl Serialize for ZSerde { + type Output = Payload; + + fn serialize(self, s: String) -> Self::Output { + Payload::new(s.into_bytes()) + } +} + +impl Serialize<&str> for ZSerde { + type Output = Payload; + + fn serialize(self, s: &str) -> Self::Output { + Self.serialize(s.to_string()) + } +} + +impl Deserialize for ZSerde { + type Error = FromUtf8Error; + + fn deserialize(self, v: &Payload) -> Result { + String::from_utf8(v.contiguous().to_vec()) + } +} + +impl TryFrom<&Payload> for String { + type Error = FromUtf8Error; + + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom for String { + type Error = FromUtf8Error; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} + +impl<'a> Serialize> for ZSerde { + type Output = Payload; + + fn serialize(self, s: Cow<'a, str>) -> Self::Output { + Self.serialize(s.to_string()) + } +} + +impl<'a> Deserialize> for ZSerde { + type Error = FromUtf8Error; + + fn deserialize(self, v: &Payload) -> Result, Self::Error> { + let v: String = Self.deserialize(v)?; + Ok(Cow::Owned(v)) + } +} + +impl TryFrom<&Payload> for Cow<'_, str> { + type Error = FromUtf8Error; + + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) + } +} + +// - Integers impl +macro_rules! impl_int { + ($t:ty, $encoding:expr) => { + impl Serialize<$t> for ZSerde { + type Output = Payload; + + fn serialize(self, t: $t) -> Self::Output { + let bs = t.to_le_bytes(); + let end = 1 + bs.iter().rposition(|b| *b != 0).unwrap_or(bs.len() - 1); + // SAFETY: + // - 0 is a valid start index because bs is guaranteed to always have a length greater or equal than 1 + // - end is a valid end index because is bounded between 0 and bs.len() + Payload::new(unsafe { ZSlice::new_unchecked(Arc::new(bs), 0, end) }) + } + } + + impl Serialize<&$t> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &$t) -> Self::Output { + Self.serialize(*t) + } + } + + impl Serialize<&mut $t> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &mut $t) -> Self::Output { + Self.serialize(*t) + } + } + + impl Deserialize<$t> for ZSerde { + type Error = ZDeserializeError; + + fn deserialize(self, v: &Payload) -> Result<$t, Self::Error> { + let p = v.contiguous(); + let mut bs = (0 as $t).to_le_bytes(); + if p.len() > bs.len() { + return Err(ZDeserializeError); + } + bs[..p.len()].copy_from_slice(&p); + let t = <$t>::from_le_bytes(bs); + Ok(t) + } + } + + impl TryFrom<&Payload> for $t { + type Error = ZDeserializeError; + + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) + } + } + }; +} + +// Zenoh unsigned integers +impl_int!(u8, ZSerde::ZENOH_UINT); +impl_int!(u16, ZSerde::ZENOH_UINT); +impl_int!(u32, ZSerde::ZENOH_UINT); +impl_int!(u64, ZSerde::ZENOH_UINT); +impl_int!(usize, ZSerde::ZENOH_UINT); + +// Zenoh signed integers +impl_int!(i8, ZSerde::ZENOH_INT); +impl_int!(i16, ZSerde::ZENOH_INT); +impl_int!(i32, ZSerde::ZENOH_INT); +impl_int!(i64, ZSerde::ZENOH_INT); +impl_int!(isize, ZSerde::ZENOH_INT); + +// Zenoh floats +impl_int!(f32, ZSerde::ZENOH_FLOAT); +impl_int!(f64, ZSerde::ZENOH_FLOAT); + +// Zenoh bool +impl Serialize for ZSerde { + type Output = ZBuf; + + fn serialize(self, t: bool) -> Self::Output { + // SAFETY: casting a bool into an integer is well-defined behaviour. + // 0 is false, 1 is true: https://doc.rust-lang.org/std/primitive.bool.html + ZBuf::from((t as u8).to_le_bytes()) + } +} + +impl Deserialize for ZSerde { + type Error = ZDeserializeError; + + fn deserialize(self, v: &Payload) -> Result { + let p = v.contiguous(); + if p.len() != 1 { + return Err(ZDeserializeError); + } + match p[0] { + 0 => Ok(false), + 1 => Ok(true), + _ => Err(ZDeserializeError), + } + } +} + +impl TryFrom<&Payload> for bool { + type Error = ZDeserializeError; + + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) + } +} + +// - Zenoh advanced types encoders/decoders +// JSON +impl Serialize<&serde_json::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &serde_json::Value) -> Self::Output { + let mut payload = Payload::empty(); + serde_json::to_writer(payload.writer(), t)?; + Ok(payload) + } +} + +impl Serialize for ZSerde { + type Output = Result; + + fn serialize(self, t: serde_json::Value) -> Self::Output { + Self.serialize(&t) + } +} + +impl Deserialize for ZSerde { + type Error = serde_json::Error; + + fn deserialize(self, v: &Payload) -> Result { + serde_json::from_reader(v.reader()) + } +} + +impl TryFrom for Payload { + type Error = serde_json::Error; + + fn try_from(value: serde_json::Value) -> Result { + ZSerde.serialize(value) + } +} + +// Yaml +impl Serialize<&serde_yaml::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &serde_yaml::Value) -> Self::Output { + let mut payload = Payload::empty(); + serde_yaml::to_writer(payload.writer(), t)?; + Ok(payload) + } +} + +impl Serialize for ZSerde { + type Output = Result; + + fn serialize(self, t: serde_yaml::Value) -> Self::Output { + Self.serialize(&t) + } +} + +impl Deserialize for ZSerde { + type Error = serde_yaml::Error; + + fn deserialize(self, v: &Payload) -> Result { + serde_yaml::from_reader(v.reader()) + } +} + +impl TryFrom for Payload { + type Error = serde_yaml::Error; + + fn try_from(value: serde_yaml::Value) -> Result { + ZSerde.serialize(value) + } +} + +// CBOR +impl Serialize<&serde_cbor::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &serde_cbor::Value) -> Self::Output { + let mut payload = Payload::empty(); + serde_cbor::to_writer(payload.writer(), t)?; + Ok(payload) + } +} + +impl Serialize for ZSerde { + type Output = Result; + + fn serialize(self, t: serde_cbor::Value) -> Self::Output { + Self.serialize(&t) + } +} + +impl Deserialize for ZSerde { + type Error = serde_cbor::Error; + + fn deserialize(self, v: &Payload) -> Result { + serde_cbor::from_reader(v.reader()) + } +} + +impl TryFrom for Payload { + type Error = serde_cbor::Error; + + fn try_from(value: serde_cbor::Value) -> Result { + ZSerde.serialize(value) + } +} + +// Pickle +impl Serialize<&serde_pickle::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &serde_pickle::Value) -> Self::Output { + let mut payload = Payload::empty(); + serde_pickle::value_to_writer( + &mut payload.writer(), + t, + serde_pickle::SerOptions::default(), + )?; + Ok(payload) + } +} + +impl Serialize for ZSerde { + type Output = Result; + + fn serialize(self, t: serde_pickle::Value) -> Self::Output { + Self.serialize(&t) + } +} + +impl Deserialize for ZSerde { + type Error = serde_pickle::Error; + + fn deserialize(self, v: &Payload) -> Result { + serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) + } +} + +impl TryFrom for Payload { + type Error = serde_pickle::Error; + + fn try_from(value: serde_pickle::Value) -> Result { + ZSerde.serialize(value) + } +} + +// Shared memory conversion +#[cfg(feature = "shared-memory")] +impl Serialize> for ZSerde { + type Output = Payload; + + fn serialize(self, t: Arc) -> Self::Output { + Payload::new(t) + } +} + +#[cfg(feature = "shared-memory")] +impl Serialize> for ZSerde { + type Output = Payload; + + fn serialize(self, t: Box) -> Self::Output { + let smb: Arc = t.into(); + Self.serialize(smb) + } +} + +#[cfg(feature = "shared-memory")] +impl Serialize for ZSerde { + type Output = Payload; + + fn serialize(self, t: SharedMemoryBuf) -> Self::Output { + Payload::new(t) + } +} + +impl From for Payload +where + ZSerde: Serialize, +{ + fn from(t: T) -> Self { + ZSerde.serialize(t) + } +} + +// For convenience to always convert a Value the examples +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum StringOrBase64 { + String(String), + Base64(String), +} + +impl Deref for StringOrBase64 { + type Target = String; + + fn deref(&self) -> &Self::Target { + match self { + Self::String(s) | Self::Base64(s) => s, + } + } +} + +impl std::fmt::Display for StringOrBase64 { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(self) + } +} + +impl From for StringOrBase64 { + fn from(v: Payload) -> Self { + use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; + match v.deserialize::() { + Ok(s) => StringOrBase64::String(s), + Err(_) => StringOrBase64::Base64(b64_std_engine.encode(v.contiguous())), + } + } +} + +mod tests { + #[test] + fn serializer() { + use super::Payload; + use rand::Rng; + use zenoh_buffers::ZBuf; + + const NUM: usize = 1_000; + + macro_rules! serialize_deserialize { + ($t:ty, $in:expr) => { + let i = $in; + let t = i.clone(); + let v = Payload::serialize(t); + let o: $t = v.deserialize().unwrap(); + assert_eq!(i, o) + }; + } + + let mut rng = rand::thread_rng(); + + serialize_deserialize!(u8, u8::MIN); + serialize_deserialize!(u16, u16::MIN); + serialize_deserialize!(u32, u32::MIN); + serialize_deserialize!(u64, u64::MIN); + serialize_deserialize!(usize, usize::MIN); + + serialize_deserialize!(u8, u8::MAX); + serialize_deserialize!(u16, u16::MAX); + serialize_deserialize!(u32, u32::MAX); + serialize_deserialize!(u64, u64::MAX); + serialize_deserialize!(usize, usize::MAX); + + for _ in 0..NUM { + serialize_deserialize!(u8, rng.gen::()); + serialize_deserialize!(u16, rng.gen::()); + serialize_deserialize!(u32, rng.gen::()); + serialize_deserialize!(u64, rng.gen::()); + serialize_deserialize!(usize, rng.gen::()); + } + + serialize_deserialize!(i8, i8::MIN); + serialize_deserialize!(i16, i16::MIN); + serialize_deserialize!(i32, i32::MIN); + serialize_deserialize!(i64, i64::MIN); + serialize_deserialize!(isize, isize::MIN); + + serialize_deserialize!(i8, i8::MAX); + serialize_deserialize!(i16, i16::MAX); + serialize_deserialize!(i32, i32::MAX); + serialize_deserialize!(i64, i64::MAX); + serialize_deserialize!(isize, isize::MAX); + + for _ in 0..NUM { + serialize_deserialize!(i8, rng.gen::()); + serialize_deserialize!(i16, rng.gen::()); + serialize_deserialize!(i32, rng.gen::()); + serialize_deserialize!(i64, rng.gen::()); + serialize_deserialize!(isize, rng.gen::()); + } + + serialize_deserialize!(f32, f32::MIN); + serialize_deserialize!(f64, f64::MIN); + + serialize_deserialize!(f32, f32::MAX); + serialize_deserialize!(f64, f64::MAX); + + for _ in 0..NUM { + serialize_deserialize!(f32, rng.gen::()); + serialize_deserialize!(f64, rng.gen::()); + } + + serialize_deserialize!(String, ""); + serialize_deserialize!(String, String::from("abcdefghijklmnopqrstuvwxyz")); + + serialize_deserialize!(Vec, vec![0u8; 0]); + serialize_deserialize!(Vec, vec![0u8; 64]); + + serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); + serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); + } +} diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index ad28470f63..59a4bbd96e 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -31,21 +31,20 @@ pub(crate) mod common { writer::HasWriter, }; pub use zenoh_core::Resolve; - - pub(crate) type Id = usize; + pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; pub use crate::config::{self, Config, ValidatedMap}; pub use crate::handlers::IntoCallbackReceiverPair; - pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::session::{Session, SessionDeclarations}; - pub use crate::query::{QueryConsolidation, QueryTarget}; + pub use crate::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; + pub use crate::selector::{Parameter, Parameters, Selector}; - pub use crate::value::Value; + pub use crate::encoding::Encoding; /// The encoding of a zenoh `Value`. - pub use zenoh_protocol::core::{Encoding, KnownEncoding}; + pub use crate::payload::{Deserialize, Payload, Serialize}; + pub use crate::value::Value; - pub use crate::query::ConsolidationMode; #[zenoh_macros::unstable] pub use crate::sample::Locality; #[cfg(not(feature = "unstable"))] @@ -56,13 +55,6 @@ pub(crate) mod common { #[zenoh_macros::unstable] pub use crate::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - - /// A [`Locator`] contains a choice of protocol, an address and port, as well as optional additional properties to work with. - pub use zenoh_protocol::core::EndPoint; - /// A [`Locator`] contains a choice of protocol, an address and port, as well as optional additional properties to work with. - pub use zenoh_protocol::core::Locator; - /// The global unique id of a zenoh peer. - pub use zenoh_protocol::core::ZenohId; } /// Prelude to import when using Zenoh's sync API. diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 4d45c3919d..9fb4bdf6c3 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -13,21 +13,24 @@ // //! Publishing primitives. -#[zenoh_macros::unstable] -use crate::handlers::Callback; -#[zenoh_macros::unstable] -use crate::handlers::DefaultHandler; +use crate::encoding::Encoding; +use crate::key_expr::KeyExpr; use crate::net::primitives::Primitives; -use crate::prelude::*; +use crate::payload::Payload; #[zenoh_macros::unstable] use crate::sample::Attachment; -use crate::sample::DataInfo; -use crate::sample::QoS; -use crate::Encoding; +use crate::sample::{DataInfo, QoS, Sample, SampleKind}; +use crate::Locality; use crate::SessionRef; use crate::Undeclarable; +#[cfg(feature = "unstable")] +use crate::{ + handlers::{Callback, DefaultHandler, IntoCallbackReceiverPair}, + Id, +}; use std::future::Ready; use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh_keyexpr::keyexpr; use zenoh_protocol::network::push::ext; use zenoh_protocol::network::Mapping; use zenoh_protocol::network::Push; @@ -67,8 +70,8 @@ pub type DeleteBuilder<'a, 'b> = PutBuilder<'a, 'b>; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session -/// .put("key/expression", "value") -/// .encoding(KnownEncoding::TextPlain) +/// .put("key/expression", "payload") +/// .with_encoding(Encoding::TEXT_PLAIN) /// .congestion_control(CongestionControl::Block) /// .res() /// .await @@ -79,22 +82,14 @@ pub type DeleteBuilder<'a, 'b> = PutBuilder<'a, 'b>; #[derive(Debug, Clone)] pub struct PutBuilder<'a, 'b> { pub(crate) publisher: PublisherBuilder<'a, 'b>, - pub(crate) value: Value, + pub(crate) payload: Payload, pub(crate) kind: SampleKind, + pub(crate) encoding: Encoding, #[cfg(feature = "unstable")] pub(crate) attachment: Option, } impl PutBuilder<'_, '_> { - /// Change the encoding of the written data. - #[inline] - pub fn encoding(mut self, encoding: IntoEncoding) -> Self - where - IntoEncoding: Into, - { - self.value.encoding = encoding.into(); - self - } /// Change the `congestion_control` to apply when routing the data. #[inline] pub fn congestion_control(mut self, congestion_control: CongestionControl) -> Self { @@ -118,12 +113,18 @@ impl PutBuilder<'_, '_> { self } - pub fn kind(mut self, kind: SampleKind) -> Self { - self.kind = kind; + /// Set the [`Encoding`] of the written data. + #[inline] + pub fn with_encoding(mut self, encoding: IntoEncoding) -> Self + where + IntoEncoding: Into, + { + self.encoding = encoding.into(); self } #[zenoh_macros::unstable] + /// Attach user-provided data to the written data. pub fn with_attachment(mut self, attachment: Attachment) -> Self { self.attachment = Some(attachment); self @@ -155,8 +156,9 @@ impl SyncResolve for PutBuilder<'_, '_> { resolve_put( &publisher, - self.value, + self.payload, self.kind, + self.encoding, #[cfg(feature = "unstable")] self.attachment, ) @@ -308,11 +310,12 @@ impl<'a> Publisher<'a> { std::sync::Arc::new(self) } - fn _write(&self, kind: SampleKind, value: Value) -> Publication { + fn _write(&self, kind: SampleKind, payload: Payload) -> Publication { Publication { publisher: self, - value, + payload, kind, + encoding: Encoding::ZENOH_BYTES, #[cfg(feature = "unstable")] attachment: None, } @@ -327,12 +330,12 @@ impl<'a> Publisher<'a> { /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.write(SampleKind::Put, "value").res().await.unwrap(); + /// publisher.write(SampleKind::Put, "payload").res().await.unwrap(); /// # }) /// ``` - pub fn write(&self, kind: SampleKind, value: IntoValue) -> Publication + pub fn write(&self, kind: SampleKind, value: IntoPayload) -> Publication where - IntoValue: Into, + IntoPayload: Into, { self._write(kind, value.into()) } @@ -350,11 +353,11 @@ impl<'a> Publisher<'a> { /// # }) /// ``` #[inline] - pub fn put(&self, value: IntoValue) -> Publication + pub fn put(&self, payload: IntoPayload) -> Publication where - IntoValue: Into, + IntoPayload: Into, { - self._write(SampleKind::Put, value.into()) + self._write(SampleKind::Put, payload.into()) } /// Delete data. @@ -370,7 +373,7 @@ impl<'a> Publisher<'a> { /// # }) /// ``` pub fn delete(&self) -> Publication { - self._write(SampleKind::Delete, Value::empty()) + self._write(SampleKind::Delete, Payload::empty()) } /// Return the [`MatchingStatus`] of the publisher. @@ -597,13 +600,19 @@ impl Drop for Publisher<'_> { #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct Publication<'a> { publisher: &'a Publisher<'a>, - value: Value, + payload: Payload, kind: SampleKind, + encoding: Encoding, #[cfg(feature = "unstable")] pub(crate) attachment: Option, } impl<'a> Publication<'a> { + pub fn with_encoding(mut self, encoding: Encoding) -> Self { + self.encoding = encoding; + self + } + #[zenoh_macros::unstable] pub fn with_attachment(mut self, attachment: Attachment) -> Self { self.attachment = Some(attachment); @@ -619,8 +628,9 @@ impl SyncResolve for Publication<'_> { fn res_sync(self) -> ::To { resolve_put( self.publisher, - self.value, + self.payload, self.kind, + self.encoding, #[cfg(feature = "unstable")] self.attachment, ) @@ -635,10 +645,7 @@ impl AsyncResolve for Publication<'_> { } } -impl<'a, IntoValue> Sink for Publisher<'a> -where - IntoValue: Into, -{ +impl<'a> Sink for Publisher<'a> { type Error = Error; #[inline] @@ -647,8 +654,16 @@ where } #[inline] - fn start_send(self: Pin<&mut Self>, item: IntoValue) -> Result<(), Self::Error> { - self.put(item.into()).res_sync() + fn start_send(self: Pin<&mut Self>, item: Sample) -> Result<(), Self::Error> { + Publication { + publisher: &self, + payload: item.payload, + kind: item.kind, + encoding: item.encoding, + #[cfg(feature = "unstable")] + attachment: item.attachment, + } + .res_sync() } #[inline] @@ -791,8 +806,9 @@ impl<'a, 'b> AsyncResolve for PublisherBuilder<'a, 'b> { fn resolve_put( publisher: &Publisher<'_>, - value: Value, + payload: Payload, kind: SampleKind, + encoding: Encoding, #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { log::trace!("write({:?}, [...])", &publisher.key_expr); @@ -825,13 +841,13 @@ fn resolve_put( } PushBody::Put(Put { timestamp, - encoding: value.encoding.clone(), + encoding: encoding.clone().into(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, ext_attachment, ext_unknown: vec![], - payload: value.payload.clone(), + payload: payload.clone().into(), }) } SampleKind::Delete => { @@ -856,7 +872,7 @@ fn resolve_put( if publisher.destination != Locality::Remote { let data_info = DataInfo { kind, - encoding: Some(value.encoding), + encoding: Some(encoding), timestamp, source_id: None, source_sn: None, @@ -871,7 +887,7 @@ fn resolve_put( true, &publisher.key_expr.to_wire(&publisher.session), Some(data_info), - value.payload, + payload.into(), #[cfg(feature = "unstable")] attachment, ); @@ -1366,7 +1382,7 @@ mod tests { let sample = sub.recv().unwrap(); assert_eq!(sample.kind, kind); - assert_eq!(sample.value.to_string(), VALUE); + assert_eq!(sample.payload.deserialize::().unwrap(), VALUE); } sample_kind_integrity_in_publication_with(SampleKind::Put); @@ -1392,7 +1408,7 @@ mod tests { assert_eq!(sample.kind, kind); if let SampleKind::Put = kind { - assert_eq!(sample.value.to_string(), VALUE); + assert_eq!(sample.payload.deserialize::().unwrap(), VALUE); } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index b278bcfa26..6bd78d4fc7 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -17,14 +17,12 @@ use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -#[zenoh_macros::unstable] -use crate::query::ReplyKeyExpr; -#[zenoh_macros::unstable] -use crate::sample::Attachment; use crate::sample::DataInfo; +use crate::Id; use crate::SessionRef; use crate::Undeclarable; - +#[cfg(feature = "unstable")] +use crate::{query::ReplyKeyExpr, sample::Attachment}; use std::fmt; use std::future::Ready; use std::ops::Deref; @@ -190,8 +188,9 @@ impl SyncResolve for ReplyBuilder<'_> { } let Sample { key_expr, - value: Value { payload, encoding }, + payload, kind, + encoding, timestamp, qos, #[cfg(feature = "unstable")] @@ -251,13 +250,13 @@ impl SyncResolve for ReplyBuilder<'_> { payload: match kind { SampleKind::Put => ReplyBody::Put(Put { timestamp: data_info.timestamp, - encoding: data_info.encoding.unwrap_or_default(), + encoding: data_info.encoding.unwrap_or_default().into(), ext_sinfo, #[cfg(feature = "shared-memory")] ext_shm: None, ext_attachment: ext_attachment!(), ext_unknown: vec![], - payload, + payload: payload.into(), }), SampleKind::Delete => ReplyBody::Del(Del { timestamp, @@ -292,8 +291,8 @@ impl SyncResolve for ReplyBuilder<'_> { ext_body: Some(ValueType { #[cfg(feature = "shared-memory")] ext_shm: None, - payload: payload.payload, - encoding: payload.encoding, + payload: payload.payload.into(), + encoding: payload.encoding.into(), }), code: 0, // TODO }), diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 89b787fef5..543dd62e84 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -13,19 +13,19 @@ // //! Sample primitives -use crate::buffers::ZBuf; -use crate::prelude::{KeyExpr, Value, ZenohId}; -use crate::query::Reply; +use crate::encoding::Encoding; +use crate::payload::Payload; +use crate::prelude::{KeyExpr, ZenohId}; use crate::time::{new_reception_timestamp, Timestamp}; use crate::Priority; +use crate::Value; #[zenoh_macros::unstable] use serde::Serialize; use std::{ convert::{TryFrom, TryInto}, fmt, }; -use zenoh_protocol::core::{CongestionControl, Encoding}; -use zenoh_protocol::network::push::ext::QoSType; +use zenoh_protocol::{core::CongestionControl, network::push::ext::QoSType}; pub type SourceSn = u64; @@ -357,10 +357,12 @@ pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; pub struct Sample { /// The key expression on which this Sample was published. pub key_expr: KeyExpr<'static>, - /// The value of this Sample. - pub value: Value, + /// The payload of this Sample. + pub payload: Payload, /// The kind of this Sample. pub kind: SampleKind, + /// The encoding of this sample + pub encoding: Encoding, /// The [`Timestamp`] of this Sample. pub timestamp: Option, /// Quality of service settings this sample was sent with. @@ -390,14 +392,15 @@ pub struct Sample { impl Sample { /// Creates a new Sample. #[inline] - pub fn new(key_expr: IntoKeyExpr, value: IntoValue) -> Self + pub fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self where IntoKeyExpr: Into>, - IntoValue: Into, + IntoPayload: Into, { Sample { key_expr: key_expr.into(), - value: value.into(), + payload: payload.into(), + encoding: Encoding::default(), kind: SampleKind::default(), timestamp: None, qos: QoS::default(), @@ -409,18 +412,19 @@ impl Sample { } /// Creates a new Sample. #[inline] - pub fn try_from( + pub fn try_from( key_expr: TryIntoKeyExpr, - value: IntoValue, + payload: IntoPayload, ) -> Result where TryIntoKeyExpr: TryInto>, >>::Error: Into, - IntoValue: Into, + IntoPayload: Into, { Ok(Sample { key_expr: key_expr.try_into().map_err(Into::into)?, - value: value.into(), + payload: payload.into(), + encoding: Encoding::default(), kind: SampleKind::default(), timestamp: None, qos: QoS::default(), @@ -433,40 +437,30 @@ impl Sample { /// Creates a new Sample with optional data info. #[inline] - pub(crate) fn with_info( - key_expr: KeyExpr<'static>, - payload: ZBuf, - data_info: Option, - ) -> Self { - let mut value: Value = payload.into(); - if let Some(data_info) = data_info { - if let Some(encoding) = &data_info.encoding { - value.encoding = encoding.clone(); + pub(crate) fn with_info(mut self, mut data_info: Option) -> Self { + if let Some(mut data_info) = data_info.take() { + self.kind = data_info.kind; + if let Some(encoding) = data_info.encoding.take() { + self.encoding = encoding; } - Sample { - key_expr, - value, - kind: data_info.kind, - timestamp: data_info.timestamp, - qos: data_info.qos, - #[cfg(feature = "unstable")] - source_info: data_info.into(), - #[cfg(feature = "unstable")] - attachment: None, - } - } else { - Sample { - key_expr, - value, - kind: SampleKind::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, + self.qos = data_info.qos; + self.timestamp = data_info.timestamp; + #[cfg(feature = "unstable")] + { + self.source_info = SourceInfo { + source_id: data_info.source_id, + source_sn: data_info.source_sn, + }; } } + self + } + + /// Sets the encoding of this Sample. + #[inline] + pub fn with_encoding(mut self, encoding: Encoding) -> Self { + self.encoding = encoding; + self } /// Gets the timestamp of this Sample. @@ -522,34 +516,9 @@ impl Sample { } } -impl std::ops::Deref for Sample { - type Target = Value; - - fn deref(&self) -> &Self::Target { - &self.value - } -} - -impl std::ops::DerefMut for Sample { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.value - } -} - -impl std::fmt::Display for Sample { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self.kind { - SampleKind::Delete => write!(f, "{}({})", self.kind, self.key_expr), - _ => write!(f, "{}({}: {})", self.kind, self.key_expr, self.value), - } - } -} - -impl TryFrom for Sample { - type Error = Value; - - fn try_from(value: Reply) -> Result { - value.sample +impl From for Value { + fn from(sample: Sample) -> Self { + Value::new(sample.payload).with_encoding(sample.encoding) } } diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 9ab0242f16..87c416c209 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -11,10 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // - use crate::admin; use crate::config::Config; use crate::config::Notifier; +use crate::encoding::Encoding; use crate::handlers::{Callback, DefaultHandler}; use crate::info::*; use crate::key_expr::KeyExprInner; @@ -23,6 +23,7 @@ use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; +use crate::payload::Payload; use crate::prelude::Locality; use crate::prelude::{KeyExpr, Parameters}; use crate::publication::*; @@ -670,7 +671,7 @@ impl Session { /// # Arguments /// /// * `key_expr` - Key expression matching the resources to put - /// * `value` - The value to put + /// * `payload` - The payload to put /// /// # Examples /// ``` @@ -679,28 +680,29 @@ impl Session { /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session - /// .put("key/expression", "value") - /// .encoding(KnownEncoding::TextPlain) + /// .put("key/expression", "payload") + /// .with_encoding(Encoding::TEXT_PLAIN) /// .res() /// .await /// .unwrap(); /// # }) /// ``` #[inline] - pub fn put<'a, 'b: 'a, TryIntoKeyExpr, IntoValue>( + pub fn put<'a, 'b: 'a, TryIntoKeyExpr, IntoPayload>( &'a self, key_expr: TryIntoKeyExpr, - value: IntoValue, + payload: IntoPayload, ) -> PutBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, - IntoValue: Into, + IntoPayload: Into, { PutBuilder { publisher: self.declare_publisher(key_expr), - value: value.into(), + payload: payload.into(), kind: SampleKind::Put, + encoding: Encoding::default(), #[cfg(feature = "unstable")] attachment: None, } @@ -732,8 +734,9 @@ impl Session { { PutBuilder { publisher: self.declare_publisher(key_expr), - value: Value::empty(), + payload: Payload::empty(), kind: SampleKind::Delete, + encoding: Encoding::default(), #[cfg(feature = "unstable")] attachment: None, } @@ -1669,7 +1672,7 @@ impl Session { let zenoh_collections::single_or_vec::IntoIter { drain, last } = callbacks.into_iter(); for (cb, key_expr) in drain { #[allow(unused_mut)] - let mut sample = Sample::with_info(key_expr, payload.clone(), info.clone()); + let mut sample = Sample::new(key_expr, payload.clone()).with_info(info.clone()); #[cfg(feature = "unstable")] { sample.attachment = attachment.clone(); @@ -1678,7 +1681,7 @@ impl Session { } if let Some((cb, key_expr)) = last { #[allow(unused_mut)] - let mut sample = Sample::with_info(key_expr, payload, info); + let mut sample = Sample::new(key_expr, payload).with_info(info); #[cfg(feature = "unstable")] { sample.attachment = attachment; @@ -1785,8 +1788,8 @@ impl Session { ); let primitives = state.primitives.as_ref().unwrap().clone(); - drop(state); + if destination != Locality::SessionLocal { #[allow(unused_mut)] let mut ext_attachment = None; @@ -1812,8 +1815,8 @@ impl Session { ext_body: value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, - encoding: v.encoding.clone(), - payload: v.payload.clone(), + encoding: v.encoding.clone().into(), + payload: v.payload.clone().into(), }), ext_attachment, ext_unknown: vec![], @@ -1831,8 +1834,8 @@ impl Session { value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, - encoding: v.encoding.clone(), - payload: v.payload.clone(), + encoding: v.encoding.clone().into(), + payload: v.payload.clone().into(), }), #[cfg(feature = "unstable")] attachment, @@ -1902,8 +1905,8 @@ impl Session { key_expr, parameters, value: body.map(|b| Value { - payload: b.payload, - encoding: b.encoding, + payload: b.payload.into(), + encoding: b.encoding.into(), }), qid, zid, @@ -2188,7 +2191,7 @@ impl Primitives for Session { PushBody::Put(m) => { let info = DataInfo { kind: SampleKind::Put, - encoding: Some(m.encoding), + encoding: Some(m.encoding.into()), timestamp: m.timestamp, qos: QoS::from(msg.ext_qos), source_id: m.ext_sinfo.as_ref().map(|i| i.zid), @@ -2260,12 +2263,12 @@ impl Primitives for Session { std::mem::drop(state); let value = match e.ext_body { Some(body) => Value { - payload: body.payload, - encoding: body.encoding, + payload: body.payload.into(), + encoding: body.encoding.into(), }, None => Value { - payload: ZBuf::empty(), - encoding: zenoh_protocol::core::Encoding::EMPTY, + payload: Payload::empty(), + encoding: Encoding::default(), }, }; let replier_id = match e.ext_sinfo { @@ -2360,7 +2363,7 @@ impl Primitives for Session { payload, info: DataInfo { kind: SampleKind::Put, - encoding: Some(encoding), + encoding: Some(encoding.into()), timestamp, qos: QoS::from(msg.ext_qos), source_id: ext_sinfo.as_ref().map(|i| i.zid), @@ -2391,7 +2394,7 @@ impl Primitives for Session { #[allow(unused_mut)] let mut sample = - Sample::with_info(key_expr.into_owned(), payload, Some(info)); + Sample::new(key_expr.into_owned(), payload).with_info(Some(info)); #[cfg(feature = "unstable")] { sample.attachment = attachment; diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index e0123ec6b1..c707218017 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -13,9 +13,11 @@ // //! Subscribing primitives. -use crate::handlers::{locked, Callback, DefaultHandler}; +use crate::handlers::{locked, Callback, DefaultHandler, IntoCallbackReceiverPair}; +use crate::key_expr::KeyExpr; use crate::prelude::Locality; -use crate::prelude::{Id, IntoCallbackReceiverPair, KeyExpr, Sample}; +use crate::sample::Sample; +use crate::Id; use crate::Undeclarable; use crate::{Result as ZResult, SessionRef}; use std::fmt; @@ -62,7 +64,7 @@ impl fmt::Debug for SubscriberState { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") -/// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) +/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload) }) /// .res() /// .await /// .unwrap(); @@ -95,7 +97,7 @@ pub(crate) struct SubscriberInner<'a> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") -/// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) +/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) /// .pull_mode() /// .res() /// .await @@ -118,7 +120,7 @@ impl<'a> PullSubscriberInner<'a> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) + /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) /// .pull_mode() /// .res() /// .await @@ -327,7 +329,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) + /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) /// .res() /// .await /// .unwrap(); @@ -402,7 +404,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { - /// println!("Received: {} {}", sample.key_expr, sample.value); + /// println!("Received: {} {:?}", sample.key_expr, sample.payload); /// } /// # }) /// ``` @@ -631,7 +633,7 @@ where /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { -/// println!("Received: {} {}", sample.key_expr, sample.value); +/// println!("Received: {} {:?}", sample.key_expr, sample.payload); /// } /// # }) /// ``` diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 849cfd57d5..128f0ff605 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -13,693 +13,57 @@ // //! Value primitives. +use crate::{encoding::Encoding, payload::Payload}; -use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; -use std::borrow::Cow; -use std::convert::TryFrom; -#[cfg(feature = "shared-memory")] -use std::sync::Arc; - -use zenoh_collections::Properties; -use zenoh_result::ZError; - -use crate::buffers::ZBuf; -use crate::prelude::{Encoding, KnownEncoding, Sample, SplitBuffer}; -#[cfg(feature = "shared-memory")] -use zenoh_shm::SharedMemoryBuf; - -/// A zenoh Value. +/// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] -#[derive(Clone)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct Value { - /// The payload of this Value. - pub payload: ZBuf, - /// An encoding description indicating how the associated payload is encoded. + /// The binary [`Payload`] of this [`Value`]. + pub payload: Payload, + /// The [`Encoding`] of this [`Value`]. pub encoding: Encoding, } impl Value { - /// Creates a new zenoh Value. - pub fn new(payload: ZBuf) -> Self { + /// Creates a new [`Value`] with default [`Encoding`]. + pub fn new(payload: T) -> Self + where + T: Into, + { Value { - payload, - encoding: KnownEncoding::AppOctetStream.into(), + payload: payload.into(), + encoding: Encoding::default(), } } - /// Creates an empty Value. - pub fn empty() -> Self { + /// Creates an empty [`Value`]. + pub const fn empty() -> Self { Value { - payload: ZBuf::empty(), - encoding: KnownEncoding::AppOctetStream.into(), + payload: Payload::empty(), + encoding: Encoding::default(), } } - /// Sets the encoding of this zenoh Value. + /// Sets the encoding of this [`Value`]`. #[inline(always)] - pub fn encoding(mut self, encoding: Encoding) -> Self { - self.encoding = encoding; + pub fn with_encoding(mut self, encoding: IntoEncoding) -> Self + where + IntoEncoding: Into, + { + self.encoding = encoding.into(); self } } -impl std::fmt::Debug for Value { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!( - f, - "Value{{ payload: {:?}, encoding: {} }}", - self.payload, self.encoding - ) - } -} - -impl std::fmt::Display for Value { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let payload = self.payload.contiguous(); - write!( - f, - "{}", - String::from_utf8(payload.clone().into_owned()) - .unwrap_or_else(|_| b64_std_engine.encode(payload)) - ) - } -} - -impl std::error::Error for Value {} - -// Shared memory conversion -#[cfg(feature = "shared-memory")] -impl From> for Value { - fn from(smb: Arc) -> Self { - Value { - payload: smb.into(), - encoding: KnownEncoding::AppOctetStream.into(), - } - } -} - -#[cfg(feature = "shared-memory")] -impl From> for Value { - fn from(smb: Box) -> Self { - let smb: Arc = smb.into(); - Self::from(smb) - } -} - -#[cfg(feature = "shared-memory")] -impl From for Value { - fn from(smb: SharedMemoryBuf) -> Self { +impl From for Value +where + T: Into, +{ + fn from(t: T) -> Self { Value { - payload: smb.into(), - encoding: KnownEncoding::AppOctetStream.into(), + payload: t.into(), + encoding: Encoding::default(), } } } - -// Bytes conversion -impl From for Value { - fn from(buf: ZBuf) -> Self { - Value { - payload: buf, - encoding: KnownEncoding::AppOctetStream.into(), - } - } -} - -impl TryFrom<&Value> for ZBuf { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppOctetStream => Ok(v.payload.clone()), - unexpected => Err(zerror!( - "{:?} can not be converted into Cow<'a, [u8]>", - unexpected - )), - } - } -} - -impl TryFrom for ZBuf { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -impl From<&[u8]> for Value { - fn from(buf: &[u8]) -> Self { - Value::from(ZBuf::from(buf.to_vec())) - } -} - -impl<'a> TryFrom<&'a Value> for Cow<'a, [u8]> { - type Error = ZError; - - fn try_from(v: &'a Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppOctetStream => Ok(v.payload.contiguous()), - unexpected => Err(zerror!( - "{:?} can not be converted into Cow<'a, [u8]>", - unexpected - )), - } - } -} - -impl From> for Value { - fn from(buf: Vec) -> Self { - Value::from(ZBuf::from(buf)) - } -} - -impl TryFrom<&Value> for Vec { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppOctetStream => Ok(v.payload.contiguous().to_vec()), - unexpected => Err(zerror!( - "{:?} can not be converted into Vec", - unexpected - )), - } - } -} - -impl TryFrom for Vec { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// String conversion -impl From for Value { - fn from(s: String) -> Self { - Value { - payload: ZBuf::from(s.into_bytes()), - encoding: KnownEncoding::TextPlain.into(), - } - } -} - -impl From<&str> for Value { - fn from(s: &str) -> Self { - Value { - payload: ZBuf::from(Vec::::from(s)), - encoding: KnownEncoding::TextPlain.into(), - } - } -} - -impl TryFrom<&Value> for String { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::TextPlain => { - String::from_utf8(v.payload.contiguous().to_vec()).map_err(|e| zerror!("{}", e)) - } - unexpected => Err(zerror!("{:?} can not be converted into String", unexpected)), - } - } -} - -impl TryFrom for String { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// Sample conversion -impl From for Value { - fn from(s: Sample) -> Self { - s.value - } -} - -// i64 conversion -impl From for Value { - fn from(i: i64) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for i64 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into i64", unexpected)), - } - } -} - -impl TryFrom for i64 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// i32 conversion -impl From for Value { - fn from(i: i32) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for i32 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into i32", unexpected)), - } - } -} - -impl TryFrom for i32 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// i16 conversion -impl From for Value { - fn from(i: i16) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for i16 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into i16", unexpected)), - } - } -} - -impl TryFrom for i16 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// i8 conversion -impl From for Value { - fn from(i: i8) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for i8 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into i8", unexpected)), - } - } -} - -impl TryFrom for i8 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// isize conversion -impl From for Value { - fn from(i: isize) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for isize { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into isize", unexpected)), - } - } -} - -impl TryFrom for isize { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// u64 conversion -impl From for Value { - fn from(i: u64) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for u64 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into u64", unexpected)), - } - } -} - -impl TryFrom for u64 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// u32 conversion -impl From for Value { - fn from(i: u32) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for u32 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into u32", unexpected)), - } - } -} - -impl TryFrom for u32 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// u16 conversion -impl From for Value { - fn from(i: u16) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for u16 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into u16", unexpected)), - } - } -} - -impl TryFrom for u16 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// u8 conversion -impl From for Value { - fn from(i: u8) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for u8 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into u8", unexpected)), - } - } -} - -impl TryFrom for u8 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// usize conversion -impl From for Value { - fn from(i: usize) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for usize { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into usize", unexpected)), - } - } -} - -impl TryFrom for usize { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// f64 conversion -impl From for Value { - fn from(f: f64) -> Self { - Value { - payload: ZBuf::from(Vec::::from(f.to_string())), - encoding: KnownEncoding::AppFloat.into(), - } - } -} - -impl TryFrom<&Value> for f64 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppFloat => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into f64", unexpected)), - } - } -} - -impl TryFrom for f64 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// f32 conversion -impl From for Value { - fn from(f: f32) -> Self { - Value { - payload: ZBuf::from(Vec::::from(f.to_string())), - encoding: KnownEncoding::AppFloat.into(), - } - } -} - -impl TryFrom<&Value> for f32 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppFloat => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into f32", unexpected)), - } - } -} - -impl TryFrom for f32 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// JSON conversion -impl From<&serde_json::Value> for Value { - fn from(json: &serde_json::Value) -> Self { - Value { - payload: ZBuf::from(Vec::::from(json.to_string())), - encoding: KnownEncoding::AppJson.into(), - } - } -} - -impl From for Value { - fn from(json: serde_json::Value) -> Self { - Value::from(&json) - } -} - -impl TryFrom<&Value> for serde_json::Value { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppJson | KnownEncoding::TextJson => { - let r = serde::Deserialize::deserialize(&mut serde_json::Deserializer::from_slice( - &v.payload.contiguous(), - )); - r.map_err(|e| zerror!("{}", e)) - } - unexpected => Err(zerror!( - "{:?} can not be converted into Properties", - unexpected - )), - } - } -} - -impl TryFrom for serde_json::Value { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// Properties conversion -impl From for Value { - fn from(p: Properties) -> Self { - Value { - payload: ZBuf::from(Vec::::from(p.to_string())), - encoding: KnownEncoding::AppProperties.into(), - } - } -} - -impl TryFrom<&Value> for Properties { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match *v.encoding.prefix() { - KnownEncoding::AppProperties => Ok(Properties::from( - std::str::from_utf8(&v.payload.contiguous()).map_err(|e| zerror!("{}", e))?, - )), - unexpected => Err(zerror!( - "{:?} can not be converted into Properties", - unexpected - )), - } - } -} - -impl TryFrom for Properties { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index d1fbd1086a..89dd3e231f 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -75,7 +75,7 @@ fn queries() { query .reply(Ok(Sample::new( query.key_expr().clone(), - query.value().unwrap().clone(), + query.value().unwrap().payload.clone(), ) .with_attachment(attachment))) .res() diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 3b10f12f03..5c96f080f8 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -58,7 +58,7 @@ impl Task { let sub = ztimeout!(session.declare_subscriber(ke).res_async())?; let mut counter = 0; while let Ok(sample) = sub.recv_async().await { - let recv_size = sample.value.payload.len(); + let recv_size = sample.payload.len(); if recv_size != *expected_size { bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); } @@ -75,7 +75,7 @@ impl Task { let value: Value = vec![0u8; *payload_size].into(); while remaining_checkpoints.load(Ordering::Relaxed) > 0 { ztimeout!(session - .put(ke, value.clone()) + .put(ke, value.payload.clone()) .congestion_control(CongestionControl::Block) .res_async())?; } @@ -91,7 +91,7 @@ impl Task { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { - let recv_size = sample.value.payload.len(); + let recv_size = sample.payload.len(); if recv_size != *expected_size { bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); } @@ -99,7 +99,7 @@ impl Task { Err(err) => { log::warn!( - "Sample got from {} failed to unwrap! Error: {}.", + "Sample got from {} failed to unwrap! Error: {:?}.", ke, err ); diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index f727ad60c3..8a3f4381d2 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -95,7 +95,7 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re let sub = ztimeout!(peer01 .declare_subscriber(key_expr) .callback(move |sample| { - assert_eq!(sample.value.payload.len(), size); + assert_eq!(sample.payload.len(), size); c_msgs.fetch_add(1, Ordering::Relaxed); }) .res_async()) @@ -194,7 +194,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re while let Ok(s) = ztimeout!(rs.recv_async()) { let s = s.sample.unwrap(); assert_eq!(s.kind, SampleKind::Put); - assert_eq!(s.value.payload.len(), size); + assert_eq!(s.payload.len(), size); cnt += 1; } } @@ -212,7 +212,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re while let Ok(s) = ztimeout!(rs.recv_async()) { let s = s.sample.unwrap(); assert_eq!(s.kind, SampleKind::Delete); - assert_eq!(s.value.payload.len(), 0); + assert_eq!(s.payload.len(), 0); cnt += 1; } } diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index b986c92e8f..76910ee5de 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -114,7 +114,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { let sub1 = ztimeout!(s01 .declare_subscriber(key_expr) .callback(move |sample| { - assert_eq!(sample.value.payload.len(), size); + assert_eq!(sample.payload.len(), size); c_msgs1.fetch_add(1, Ordering::Relaxed); }) .res_async()) @@ -126,7 +126,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { let sub2 = ztimeout!(s02 .declare_subscriber(key_expr) .callback(move |sample| { - assert_eq!(sample.value.payload.len(), size); + assert_eq!(sample.payload.len(), size); c_msgs2.fetch_add(1, Ordering::Relaxed); }) .res_async()) @@ -224,7 +224,7 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { for _ in 0..msg_count { let rs = ztimeout!(s03.get(key_expr).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - assert_eq!(s.sample.unwrap().value.payload.len(), size); + assert_eq!(s.sample.unwrap().payload.len(), size); cnt += 1; } } From b11a20e11f3c2c7c66644417cb0ed08b852cb88a Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 12 Mar 2024 09:42:32 +0100 Subject: [PATCH 007/598] Fix protocol comment --- commons/zenoh-protocol/src/zenoh/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index 4c8458885b..1284116888 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -214,7 +214,7 @@ pub mod ext { /// +-+-+-+-+-+-+-+-+ /// ~ encoding ~ /// +---------------+ - /// ~ pl: ~ -- Payload + /// ~ pl: [u8;z32] ~ -- Payload /// +---------------+ /// ``` #[derive(Debug, Clone, PartialEq, Eq)] From fcbceb07ae9bd4160a294ab5c982b6882eca6a7b Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 12 Mar 2024 12:43:21 +0100 Subject: [PATCH 008/598] Improve Endpoint and Locator doc --- commons/zenoh-protocol/src/core/endpoint.rs | 7 ++++++- commons/zenoh-protocol/src/core/locator.rs | 6 +++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index 5e921345e4..a8fcb3ae98 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -497,7 +497,12 @@ impl fmt::Debug for ConfigMut<'_> { } } -/// A `String` that respects the [`EndPoint`] canon form: `#`, such that `` is a valid [`Locator`] `` is of the form `=;...;=` where keys are alphabetically sorted. +/// A string that respects the [`EndPoint`] canon form: `[#]`. +/// +/// `` is a valid [`Locator`] and `` is of the form `=;...;=` where keys are alphabetically sorted. +/// `` is optional and can be provided to configure some aspectes for an [`EndPoint`], e.g. the interface to listen on or connect to. +/// +/// A full [`EndPoint`] string is hence in the form of `/
[?][#config]`. #[derive(Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] #[serde(into = "String")] #[serde(try_from = "String")] diff --git a/commons/zenoh-protocol/src/core/locator.rs b/commons/zenoh-protocol/src/core/locator.rs index 42379f2b65..50b909b12f 100644 --- a/commons/zenoh-protocol/src/core/locator.rs +++ b/commons/zenoh-protocol/src/core/locator.rs @@ -16,9 +16,9 @@ use alloc::{borrow::ToOwned, string::String}; use core::{convert::TryFrom, fmt, hash::Hash, str::FromStr}; use zenoh_result::{Error as ZError, ZResult}; -// Locator -/// A `String` that respects the [`Locator`] canon form: `/
[?]`, -/// such that `` is of the form `=;...;=` where keys are alphabetically sorted. +/// A string that respects the [`Locator`] canon form: `/
[?]`. +/// +/// `` is of the form `=;...;=` where keys are alphabetically sorted. #[derive(Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] #[serde(into = "String")] #[serde(try_from = "String")] From 41e25579f9f3c851f44c992946fd1d0c61fccfa9 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 12 Mar 2024 13:42:18 +0100 Subject: [PATCH 009/598] Protocol changes: EntityId (into protocol_changes) (#774) * New Subscribers EntityId behavior for clients and peers * Improve routing logging * New Queryables EntityId behavior for clients and peers * Improve routing logging * Use proper QueryableId in Session and AdminSpace * Sessions use runtime Id generator to avoid collisions * AdminSpace use runtime Id generator to avoid collisions * Use proper ResponderId * Define EntityId type * Add source_eid to SourceInfo * Update source_info_stack_size test * Update source_info_stack_size test * Introduce EntityGlobalId type * Add id() function to Subscriber, Queryable and Publisher * Add Publication::with_source_info() function * Code format * Remove ref to PR #703 * Fix doctests * Add comments * Remove comments --- commons/zenoh-codec/src/network/declare.rs | 13 +- commons/zenoh-codec/src/network/mod.rs | 22 +- commons/zenoh-codec/src/zenoh/mod.rs | 26 +- commons/zenoh-protocol/src/core/mod.rs | 21 + commons/zenoh-protocol/src/core/wire_expr.rs | 4 + commons/zenoh-protocol/src/network/declare.rs | 17 +- commons/zenoh-protocol/src/network/mod.rs | 10 +- .../zenoh-protocol/src/network/response.rs | 2 +- commons/zenoh-protocol/src/zenoh/mod.rs | 10 +- zenoh/src/lib.rs | 2 +- zenoh/src/net/routing/dispatcher/face.rs | 6 +- zenoh/src/net/routing/dispatcher/pubsub.rs | 115 +++-- zenoh/src/net/routing/dispatcher/queries.rs | 103 ++-- zenoh/src/net/routing/dispatcher/resource.rs | 14 +- zenoh/src/net/routing/hat/client/mod.rs | 26 +- zenoh/src/net/routing/hat/client/pubsub.rs | 107 ++-- zenoh/src/net/routing/hat/client/queries.rs | 113 +++-- .../src/net/routing/hat/linkstate_peer/mod.rs | 30 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 136 ++--- .../net/routing/hat/linkstate_peer/queries.rs | 142 +++--- zenoh/src/net/routing/hat/mod.rs | 17 +- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 29 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 106 ++-- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 113 +++-- zenoh/src/net/routing/hat/router/mod.rs | 31 +- zenoh/src/net/routing/hat/router/pubsub.rs | 213 ++++---- zenoh/src/net/routing/hat/router/queries.rs | 220 +++++---- zenoh/src/net/runtime/adminspace.rs | 8 +- zenoh/src/net/runtime/mod.rs | 8 + zenoh/src/net/tests/tables.rs | 88 +++- zenoh/src/prelude.rs | 5 + zenoh/src/publication.rs | 83 +++- zenoh/src/queryable.rs | 73 +-- zenoh/src/sample.rs | 20 +- zenoh/src/session.rs | 467 +++++++----------- zenoh/src/subscriber.rs | 26 + 36 files changed, 1349 insertions(+), 1077 deletions(-) diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index 6df25a8d2a..bcc55ed62b 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -441,14 +441,19 @@ where let subscriber::UndeclareSubscriber { id, ext_wire_expr } = x; // Header - let header = declare::id::U_SUBSCRIBER | subscriber::flag::Z; + let mut header = declare::id::U_SUBSCRIBER; + if !ext_wire_expr.is_null() { + header |= subscriber::flag::Z; + } self.write(&mut *writer, header)?; // Body self.write(&mut *writer, id)?; // Extension - self.write(&mut *writer, (ext_wire_expr, false))?; + if !ext_wire_expr.is_null() { + self.write(&mut *writer, (ext_wire_expr, false))?; + } Ok(()) } @@ -483,7 +488,6 @@ where let id: subscriber::SubscriberId = self.codec.read(&mut *reader)?; // Extensions - // WARNING: this is a temporary and mandatory extension used for undeclarations let mut ext_wire_expr = common::ext::WireExprType::null(); let mut has_ext = imsg::has_flag(self.header, subscriber::flag::Z); @@ -665,7 +669,6 @@ where let id: queryable::QueryableId = self.codec.read(&mut *reader)?; // Extensions - // WARNING: this is a temporary and mandatory extension used for undeclarations let mut ext_wire_expr = common::ext::WireExprType::null(); let mut has_ext = imsg::has_flag(self.header, queryable::flag::Z); @@ -813,7 +816,6 @@ where let id: token::TokenId = self.codec.read(&mut *reader)?; // Extensions - // WARNING: this is a temporary and mandatory extension used for undeclarations let mut ext_wire_expr = common::ext::WireExprType::null(); let mut has_ext = imsg::has_flag(self.header, interest::flag::Z); @@ -1032,7 +1034,6 @@ where let id: interest::InterestId = self.codec.read(&mut *reader)?; // Extensions - // WARNING: this is a temporary and mandatory extension used for undeclarations let mut ext_wire_expr = common::ext::WireExprType::null(); let mut has_ext = imsg::has_flag(self.header, interest::flag::Z); diff --git a/commons/zenoh-codec/src/network/mod.rs b/commons/zenoh-codec/src/network/mod.rs index dade13d362..3a227cd42a 100644 --- a/commons/zenoh-codec/src/network/mod.rs +++ b/commons/zenoh-codec/src/network/mod.rs @@ -26,8 +26,8 @@ use zenoh_buffers::{ }; use zenoh_protocol::{ common::{imsg, ZExtZ64, ZExtZBufHeader}, - core::{Reliability, ZenohId}, - network::{ext::EntityIdType, *}, + core::{EntityId, Reliability, ZenohId}, + network::{ext::EntityGlobalIdType, *}, }; // NetworkMessage @@ -218,21 +218,21 @@ where } // Extension: EntityId -impl LCodec<&ext::EntityIdType<{ ID }>> for Zenoh080 { - fn w_len(self, x: &ext::EntityIdType<{ ID }>) -> usize { - let EntityIdType { zid, eid } = x; +impl LCodec<&ext::EntityGlobalIdType<{ ID }>> for Zenoh080 { + fn w_len(self, x: &ext::EntityGlobalIdType<{ ID }>) -> usize { + let EntityGlobalIdType { zid, eid } = x; 1 + self.w_len(zid) + self.w_len(*eid) } } -impl WCodec<(&ext::EntityIdType<{ ID }>, bool), &mut W> for Zenoh080 +impl WCodec<(&ext::EntityGlobalIdType<{ ID }>, bool), &mut W> for Zenoh080 where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: (&ext::EntityIdType<{ ID }>, bool)) -> Self::Output { + fn write(self, writer: &mut W, x: (&ext::EntityGlobalIdType<{ ID }>, bool)) -> Self::Output { let (x, more) = x; let header: ZExtZBufHeader<{ ID }> = ZExtZBufHeader::new(self.w_len(x)); self.write(&mut *writer, (&header, more))?; @@ -248,13 +248,13 @@ where } } -impl RCodec<(ext::EntityIdType<{ ID }>, bool), &mut R> for Zenoh080Header +impl RCodec<(ext::EntityGlobalIdType<{ ID }>, bool), &mut R> for Zenoh080Header where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result<(ext::EntityIdType<{ ID }>, bool), Self::Error> { + fn read(self, reader: &mut R) -> Result<(ext::EntityGlobalIdType<{ ID }>, bool), Self::Error> { let (_, more): (ZExtZBufHeader<{ ID }>, bool) = self.read(&mut *reader)?; let flags: u8 = self.codec.read(&mut *reader)?; @@ -263,8 +263,8 @@ where let lodec = Zenoh080Length::new(length); let zid: ZenohId = lodec.read(&mut *reader)?; - let eid: u32 = self.codec.read(&mut *reader)?; + let eid: EntityId = self.codec.read(&mut *reader)?; - Ok((ext::EntityIdType { zid, eid }, more)) + Ok((ext::EntityGlobalIdType { zid, eid }, more)) } } diff --git a/commons/zenoh-codec/src/zenoh/mod.rs b/commons/zenoh-codec/src/zenoh/mod.rs index fdff09be94..0d7146dc90 100644 --- a/commons/zenoh-codec/src/zenoh/mod.rs +++ b/commons/zenoh-codec/src/zenoh/mod.rs @@ -32,7 +32,7 @@ use zenoh_buffers::{ use zenoh_protocol::common::{iext, ZExtUnit}; use zenoh_protocol::{ common::{imsg, ZExtZBufHeader}, - core::{Encoding, ZenohId}, + core::{Encoding, EntityGlobalId, EntityId, ZenohId}, zenoh::{ext, id, PushBody, RequestBody, ResponseBody}, }; @@ -150,9 +150,9 @@ where // Extension: SourceInfo impl LCodec<&ext::SourceInfoType<{ ID }>> for Zenoh080 { fn w_len(self, x: &ext::SourceInfoType<{ ID }>) -> usize { - let ext::SourceInfoType { zid, eid, sn } = x; + let ext::SourceInfoType { id, sn } = x; - 1 + self.w_len(zid) + self.w_len(*eid) + self.w_len(*sn) + 1 + self.w_len(&id.zid) + self.w_len(id.eid) + self.w_len(*sn) } } @@ -164,18 +164,18 @@ where fn write(self, writer: &mut W, x: (&ext::SourceInfoType<{ ID }>, bool)) -> Self::Output { let (x, more) = x; - let ext::SourceInfoType { zid, eid, sn } = x; + let ext::SourceInfoType { id, sn } = x; let header: ZExtZBufHeader<{ ID }> = ZExtZBufHeader::new(self.w_len(x)); self.write(&mut *writer, (&header, more))?; - let flags: u8 = (zid.size() as u8 - 1) << 4; + let flags: u8 = (id.zid.size() as u8 - 1) << 4; self.write(&mut *writer, flags)?; - let lodec = Zenoh080Length::new(zid.size()); - lodec.write(&mut *writer, zid)?; + let lodec = Zenoh080Length::new(id.zid.size()); + lodec.write(&mut *writer, &id.zid)?; - self.write(&mut *writer, eid)?; + self.write(&mut *writer, id.eid)?; self.write(&mut *writer, sn)?; Ok(()) } @@ -196,10 +196,16 @@ where let lodec = Zenoh080Length::new(length); let zid: ZenohId = lodec.read(&mut *reader)?; - let eid: u32 = self.codec.read(&mut *reader)?; + let eid: EntityId = self.codec.read(&mut *reader)?; let sn: u32 = self.codec.read(&mut *reader)?; - Ok((ext::SourceInfoType { zid, eid, sn }, more)) + Ok(( + ext::SourceInfoType { + id: EntityGlobalId { zid, eid }, + sn, + }, + more, + )) } } diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 82658db2fd..20fcf85dd9 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -261,6 +261,27 @@ impl<'de> serde::Deserialize<'de> for ZenohId { } } +/// The unique id of a zenoh entity inside it's parent [`Session`]. +pub type EntityId = u32; + +/// The global unique id of a zenoh entity. +#[derive(Debug, Default, Clone, Eq, Hash, PartialEq)] +pub struct EntityGlobalId { + pub zid: ZenohId, + pub eid: EntityId, +} + +impl EntityGlobalId { + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + Self { + zid: ZenohId::rand(), + eid: rand::thread_rng().gen(), + } + } +} + #[repr(u8)] #[derive(Debug, Default, Copy, Clone, Eq, Hash, PartialEq)] pub enum Priority { diff --git a/commons/zenoh-protocol/src/core/wire_expr.rs b/commons/zenoh-protocol/src/core/wire_expr.rs index 6d9623d6ca..a66b1aa212 100644 --- a/commons/zenoh-protocol/src/core/wire_expr.rs +++ b/commons/zenoh-protocol/src/core/wire_expr.rs @@ -71,6 +71,10 @@ impl<'a> WireExpr<'a> { } } + pub fn is_empty(&self) -> bool { + self.scope == 0 && self.suffix.as_ref().is_empty() + } + pub fn as_str(&'a self) -> &'a str { if self.scope == 0 { self.suffix.as_ref() diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 8164d9440d..2dd8de4ef8 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -177,7 +177,6 @@ pub mod common { pub mod ext { use super::*; - // WARNING: this is a temporary and mandatory extension used for undeclarations pub type WireExprExt = zextzbuf!(0x0f, true); #[derive(Debug, Clone, PartialEq, Eq)] pub struct WireExprType { @@ -195,6 +194,10 @@ pub mod common { } } + pub fn is_null(&self) -> bool { + self.wire_expr.is_empty() + } + #[cfg(feature = "test")] pub fn rand() -> Self { Self { @@ -286,9 +289,11 @@ pub mod keyexpr { } pub mod subscriber { + use crate::core::EntityId; + use super::*; - pub type SubscriberId = u32; + pub type SubscriberId = EntityId; pub mod flag { pub const N: u8 = 1 << 5; // 0x20 Named if N==1 then the key expr has name/suffix @@ -441,7 +446,6 @@ pub mod subscriber { #[derive(Debug, Clone, PartialEq, Eq)] pub struct UndeclareSubscriber { pub id: SubscriberId, - // WARNING: this is a temporary and mandatory extension used for undeclarations pub ext_wire_expr: common::ext::WireExprType, } @@ -460,9 +464,11 @@ pub mod subscriber { } pub mod queryable { + use crate::core::EntityId; + use super::*; - pub type QueryableId = u32; + pub type QueryableId = EntityId; pub mod flag { pub const N: u8 = 1 << 5; // 0x20 Named if N==1 then the key expr has name/suffix @@ -597,7 +603,6 @@ pub mod queryable { #[derive(Debug, Clone, PartialEq, Eq)] pub struct UndeclareQueryable { pub id: QueryableId, - // WARNING: this is a temporary and mandatory extension used for undeclarations pub ext_wire_expr: common::ext::WireExprType, } @@ -683,7 +688,6 @@ pub mod token { #[derive(Debug, Clone, PartialEq, Eq)] pub struct UndeclareToken { pub id: TokenId, - // WARNING: this is a temporary and mandatory extension used for undeclarations pub ext_wire_expr: common::ext::WireExprType, } @@ -1097,7 +1101,6 @@ pub mod interest { #[derive(Debug, Clone, PartialEq, Eq)] pub struct UndeclareInterest { pub id: InterestId, - // WARNING: this is a temporary and mandatory extension used for undeclarations pub ext_wire_expr: common::ext::WireExprType, } diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index bb76cb8946..6af7fef243 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -200,7 +200,7 @@ impl From for NetworkMessage { pub mod ext { use crate::{ common::{imsg, ZExtZ64}, - core::{CongestionControl, Priority, ZenohId}, + core::{CongestionControl, EntityId, Priority, ZenohId}, }; use core::fmt; @@ -407,19 +407,19 @@ pub mod ext { /// % eid % /// +---------------+ #[derive(Debug, Clone, PartialEq, Eq)] - pub struct EntityIdType { + pub struct EntityGlobalIdType { pub zid: ZenohId, - pub eid: u32, + pub eid: EntityId, } - impl EntityIdType<{ ID }> { + impl EntityGlobalIdType<{ ID }> { #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; let mut rng = rand::thread_rng(); let zid = ZenohId::rand(); - let eid: u32 = rng.gen(); + let eid: EntityId = rng.gen(); Self { zid, eid } } } diff --git a/commons/zenoh-protocol/src/network/response.rs b/commons/zenoh-protocol/src/network/response.rs index 9ef2c26a10..6f0925429b 100644 --- a/commons/zenoh-protocol/src/network/response.rs +++ b/commons/zenoh-protocol/src/network/response.rs @@ -67,7 +67,7 @@ pub mod ext { pub type TimestampType = crate::network::ext::TimestampType<{ Timestamp::ID }>; pub type ResponderId = zextzbuf!(0x3, false); - pub type ResponderIdType = crate::network::ext::EntityIdType<{ ResponderId::ID }>; + pub type ResponderIdType = crate::network::ext::EntityGlobalIdType<{ ResponderId::ID }>; } impl Response { diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index 1284116888..3e5d573c43 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -158,7 +158,7 @@ impl From for ResponseBody { pub mod ext { use zenoh_buffers::ZBuf; - use crate::core::{Encoding, ZenohId}; + use crate::core::{Encoding, EntityGlobalId}; /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ @@ -172,8 +172,7 @@ pub mod ext { /// +---------------+ #[derive(Debug, Clone, PartialEq, Eq)] pub struct SourceInfoType { - pub zid: ZenohId, - pub eid: u32, + pub id: EntityGlobalId, pub sn: u32, } @@ -183,10 +182,9 @@ pub mod ext { use rand::Rng; let mut rng = rand::thread_rng(); - let zid = ZenohId::rand(); - let eid: u32 = rng.gen(); + let id = EntityGlobalId::rand(); let sn: u32 = rng.gen(); - Self { zid, eid, sn } + Self { id, sn } } } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index bae81d3a54..eb1ba1bcd1 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -79,7 +79,7 @@ extern crate zenoh_core; #[macro_use] extern crate zenoh_result; -pub(crate) type Id = usize; +pub(crate) type Id = u32; use git_version::git_version; use handlers::DefaultHandler; diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 6ef5c063d0..79c9da9127 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -171,6 +171,7 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), + m.id, &m.wire_expr, &m.ext_info, msg.ext_nodeid.node_id, @@ -181,6 +182,7 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), + m.id, &m.ext_wire_expr.wire_expr, msg.ext_nodeid.node_id, ); @@ -190,6 +192,7 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), + m.id, &m.wire_expr, &m.ext_info, msg.ext_nodeid.node_id, @@ -200,6 +203,7 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), + m.id, &m.ext_wire_expr.wire_expr, msg.ext_nodeid.node_id, ); @@ -244,7 +248,7 @@ impl Primitives for Face { pull_data(&self.tables.tables, &self.state.clone(), msg.wire_expr); } _ => { - log::error!("Unsupported request"); + log::error!("{} Unsupported request!", self); } } } diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index d6497a80b3..c0d1bb4a34 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -22,7 +22,7 @@ use std::sync::RwLock; use zenoh_core::zread; use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::declare::Mode; +use zenoh_protocol::network::declare::{Mode, SubscriberId}; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::{declare::ext, Push}, @@ -34,17 +34,24 @@ pub(crate) fn declare_subscription( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, + id: SubscriberId, expr: &WireExpr, sub_info: &SubscriberInfo, node_id: NodeId, ) { - log::debug!("Declare subscription {}", face); let rtables = zread!(tables.tables); match rtables .get_mapping(face, &expr.scope, expr.mapping) .cloned() { Some(mut prefix) => { + log::debug!( + "{} Declare subscriber {} ({}{})", + face, + id, + prefix.expr(), + expr.suffix + ); let res = Resource::get_resource(&prefix, &expr.suffix); let (mut res, mut wtables) = if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { @@ -66,7 +73,7 @@ pub(crate) fn declare_subscription( (res, wtables) }; - hat_code.declare_subscription(&mut wtables, face, &mut res, sub_info, node_id); + hat_code.declare_subscription(&mut wtables, face, id, &mut res, sub_info, node_id); disable_matches_data_routes(&mut wtables, &mut res); drop(wtables); @@ -86,7 +93,12 @@ pub(crate) fn declare_subscription( } drop(wtables); } - None => log::error!("Declare subscription for unknown scope {}!", expr.scope), + None => log::error!( + "{} Declare subscriber {} for unknown scope {}!", + face, + id, + expr.scope + ), } } @@ -94,41 +106,60 @@ pub(crate) fn undeclare_subscription( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, + id: SubscriberId, expr: &WireExpr, node_id: NodeId, ) { - log::debug!("Undeclare subscription {}", face); - let rtables = zread!(tables.tables); - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - - hat_code.undeclare_subscription(&mut wtables, face, &mut res, node_id); - - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); + let res = if expr.is_empty() { + None + } else { + let rtables = zread!(tables.tables); + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(res) => Some(res), + None => { + log::error!( + "{} Undeclare unknown subscriber {}{}!", + face, + prefix.expr(), + expr.suffix + ); + return; } - Resource::clean(&mut res); - drop(wtables); + }, + None => { + log::error!( + "{} Undeclare subscriber with unknown scope {}", + face, + expr.scope + ); + return; } - None => log::error!("Undeclare unknown subscription!"), - }, - None => log::error!("Undeclare subscription with unknown scope!"), + } + }; + let mut wtables = zwrite!(tables.tables); + if let Some(mut res) = hat_code.undeclare_subscription(&mut wtables, face, id, res, node_id) { + log::debug!("{} Undeclare subscriber {} ({})", face, id, res.expr()); + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes, matching_pulls) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); + } + Resource::clean(&mut res); + drop(wtables); + } else { + log::error!("{} Undeclare unknown subscriber {}", face, id); } } @@ -445,7 +476,8 @@ pub fn full_reentrant_route_data( match tables.get_mapping(face, &expr.scope, expr.mapping).cloned() { Some(prefix) => { log::trace!( - "Route data for res {}{}", + "{} Route data for res {}{}", + face, prefix.expr(), expr.suffix.as_ref() ); @@ -561,7 +593,7 @@ pub fn full_reentrant_route_data( } } None => { - log::error!("Route data with unknown scope {}!", expr.scope); + log::error!("{} Route data with unknown scope {}!", face, expr.scope); } } } @@ -602,14 +634,16 @@ pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireE } None => { log::error!( - "Pull data for unknown subscription {} (no info)!", + "{} Pull data for unknown subscriber {} (no info)!", + face, prefix.expr() + expr.suffix.as_ref() ); } }, None => { log::error!( - "Pull data for unknown subscription {} (no context)!", + "{} Pull data for unknown subscriber {} (no context)!", + face, prefix.expr() + expr.suffix.as_ref() ); } @@ -617,13 +651,14 @@ pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireE } None => { log::error!( - "Pull data for unknown subscription {} (no resource)!", + "{} Pull data for unknown subscriber {} (no resource)!", + face, prefix.expr() + expr.suffix.as_ref() ); } }, None => { - log::error!("Pull data with unknown scope {}!", expr.scope); + log::error!("{} Pull data with unknown scope {}!", face, expr.scope); } }; } diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index b0f7f7f7ef..287621151a 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -21,16 +21,14 @@ use async_trait::async_trait; use std::collections::HashMap; use std::sync::{Arc, Weak}; use zenoh_config::WhatAmI; -use zenoh_protocol::zenoh::reply::ReplyBody; -use zenoh_protocol::zenoh::Put; use zenoh_protocol::{ core::{key_expr::keyexpr, Encoding, WireExpr}, network::{ - declare::{ext, queryable::ext::QueryableInfo}, + declare::{ext, queryable::ext::QueryableInfo, QueryableId}, request::{ext::TargetType, Request, RequestId}, response::{self, ext::ResponderIdType, Response, ResponseFinal}, }, - zenoh::{query::Consolidation, Reply, RequestBody, ResponseBody}, + zenoh::{query::Consolidation, reply::ReplyBody, Put, Reply, RequestBody, ResponseBody}, }; use zenoh_sync::get_mut_unchecked; use zenoh_util::Timed; @@ -44,17 +42,24 @@ pub(crate) fn declare_queryable( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, + id: QueryableId, expr: &WireExpr, qabl_info: &QueryableInfo, node_id: NodeId, ) { - log::debug!("Register queryable {}", face); let rtables = zread!(tables.tables); match rtables .get_mapping(face, &expr.scope, expr.mapping) .cloned() { Some(mut prefix) => { + log::debug!( + "{} Declare queryable {} ({}{})", + face, + id, + prefix.expr(), + expr.suffix + ); let res = Resource::get_resource(&prefix, &expr.suffix); let (mut res, mut wtables) = if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { @@ -76,7 +81,7 @@ pub(crate) fn declare_queryable( (res, wtables) }; - hat_code.declare_queryable(&mut wtables, face, &mut res, qabl_info, node_id); + hat_code.declare_queryable(&mut wtables, face, id, &mut res, qabl_info, node_id); disable_matches_query_routes(&mut wtables, &mut res); drop(wtables); @@ -93,7 +98,12 @@ pub(crate) fn declare_queryable( } drop(wtables); } - None => log::error!("Declare queryable for unknown scope {}!", expr.scope), + None => log::error!( + "{} Declare queryable {} for unknown scope {}!", + face, + id, + expr.scope + ), } } @@ -101,37 +111,57 @@ pub(crate) fn undeclare_queryable( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, + id: QueryableId, expr: &WireExpr, node_id: NodeId, ) { - let rtables = zread!(tables.tables); - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - - hat_code.undeclare_queryable(&mut wtables, face, &mut res, node_id); - - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); + let res = if expr.is_empty() { + None + } else { + let rtables = zread!(tables.tables); + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(res) => Some(res), + None => { + log::error!( + "{} Undeclare unknown queryable {}{}!", + face, + prefix.expr(), + expr.suffix + ); + return; } - Resource::clean(&mut res); - drop(wtables); + }, + None => { + log::error!( + "{} Undeclare queryable with unknown scope {}", + face, + expr.scope + ); + return; } - None => log::error!("Undeclare unknown queryable!"), - }, - None => log::error!("Undeclare queryable with unknown scope!"), + } + }; + let mut wtables = zwrite!(tables.tables); + if let Some(mut res) = hat_code.undeclare_queryable(&mut wtables, face, id, res, node_id) { + log::debug!("{} Undeclare queryable {} ({})", face, id, res.expr()); + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + drop(wtables); + } else { + log::error!("{} Undeclare unknown queryable {}", face, id); } } @@ -586,7 +616,7 @@ pub fn route_query( ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { zid, - eid: 0, // @TODO use proper ResponderId (#703) + eid: 0, // 0 is reserved for routing core }), }, expr.full_expr().to_string(), @@ -701,8 +731,9 @@ pub fn route_query( } None => { log::error!( - "Route query with unknown scope {}! Send final reply.", - expr.scope + "{} Route query with unknown scope {}! Send final reply.", + face, + expr.scope, ); drop(rtables); face.primitives diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 813d72a661..9f43841025 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -667,7 +667,11 @@ pub fn register_expr( let mut fullexpr = prefix.expr(); fullexpr.push_str(expr.suffix.as_ref()); if res.expr() != fullexpr { - log::error!("Resource {} remapped. Remapping unsupported!", expr_id); + log::error!( + "{} Resource {} remapped. Remapping unsupported!", + face, + expr_id + ); } } None => { @@ -718,7 +722,11 @@ pub fn register_expr( drop(wtables); } }, - None => log::error!("Declare resource with unknown scope {}!", expr.scope), + None => log::error!( + "{} Declare resource with unknown scope {}!", + face, + expr.scope + ), } } @@ -726,7 +734,7 @@ pub fn unregister_expr(tables: &TablesLock, face: &mut Arc, expr_id: let wtables = zwrite!(tables.tables); match get_mut_unchecked(face).remote_mappings.remove(&expr_id) { Some(mut res) => Resource::clean(&mut res), - None => log::error!("Undeclare unknown resource!"), + None => log::error!("{} Undeclare unknown resource!", face), } drop(wtables); } diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index aa83c34f5d..05210bcaee 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -40,11 +40,11 @@ use super::{ }; use std::{ any::Any, - collections::{HashMap, HashSet}, - sync::Arc, + collections::HashMap, + sync::{atomic::AtomicU32, Arc}, }; use zenoh_config::WhatAmI; -use zenoh_protocol::network::declare::queryable::ext::QueryableInfo; +use zenoh_protocol::network::declare::{queryable::ext::QueryableInfo, QueryableId, SubscriberId}; use zenoh_protocol::network::Oam; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; @@ -131,7 +131,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for mut res in face + for (_id, mut res) in face .hat .downcast_mut::() .unwrap() @@ -159,7 +159,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for mut res in face + for (_id, mut res) in face .hat .downcast_mut::() .unwrap() @@ -290,19 +290,21 @@ impl HatContext { } struct HatFace { - local_subs: HashSet>, - remote_subs: HashSet>, - local_qabls: HashMap, QueryableInfo>, - remote_qabls: HashSet>, + next_id: AtomicU32, // @TODO: manage rollover and uniqueness + local_subs: HashMap, SubscriberId>, + remote_subs: HashMap>, + local_qabls: HashMap, (QueryableId, QueryableInfo)>, + remote_qabls: HashMap>, } impl HatFace { fn new() -> Self { Self { - local_subs: HashSet::new(), - remote_subs: HashSet::new(), + next_id: AtomicU32::new(0), + local_subs: HashMap::new(), + remote_subs: HashMap::new(), local_qabls: HashMap::new(), - remote_qabls: HashSet::new(), + remote_qabls: HashMap::new(), } } } diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 828915018d..f9f827ecc5 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -22,8 +22,10 @@ use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ @@ -43,10 +45,11 @@ fn propagate_simple_subscription_to( ) { if (src_face.id != dst_face.id || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) - && !face_hat!(dst_face).local_subs.contains(res) + && !face_hat!(dst_face).local_subs.contains_key(res) && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { - face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -54,7 +57,7 @@ fn propagate_simple_subscription_to( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id, wire_expr: key_expr, ext_info: *sub_info, }), @@ -83,13 +86,13 @@ fn propagate_simple_subscription( fn register_client_subscription( _tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); - log::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { Some(ctx) => match &ctx.subs { Some(info) => { @@ -118,16 +121,17 @@ fn register_client_subscription( } } } - face_hat_mut!(face).remote_subs.insert(res.clone()); + face_hat_mut!(face).remote_subs.insert(id, res.clone()); } fn declare_client_subscription( tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { - register_client_subscription(tables, face, res, sub_info); + register_client_subscription(tables, face, id, res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; @@ -144,7 +148,7 @@ fn declare_client_subscription( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id: 0, // @TODO use proper SubscriberId wire_expr: res.expr().into(), ext_info: *sub_info, }), @@ -170,21 +174,19 @@ fn client_subs(res: &Arc) -> Vec> { fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_subs.contains(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - face_hat_mut!(face).local_subs.remove(res); } } } @@ -194,45 +196,48 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client subscription {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } - face_hat_mut!(face).remote_subs.remove(res); - - let mut client_subs = client_subs(res); - if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res); - } - if client_subs.len() == 1 { - let face = &mut client_subs[0]; - if face_hat!(face).local_subs.contains(res) - && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } - face_hat_mut!(face).local_subs.remove(res); + let mut client_subs = client_subs(res); + if client_subs.is_empty() { + propagate_forget_simple_subscription(tables, res); + } + if client_subs.len() == 1 { + let face = &mut client_subs[0]; + if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } } } } + fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_subscription(tables, face, res); + id: SubscriberId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { + undeclare_client_subscription(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { @@ -246,7 +251,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { .cloned() .collect::>>() { - for sub in &face_hat!(src_face).remote_subs { + for sub in face_hat!(src_face).remote_subs.values() { propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); } } @@ -257,27 +262,29 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, _node_id: NodeId, ) { - declare_client_subscription(tables, face, res, sub_info); + declare_client_subscription(tables, face, id, res, sub_info); } fn undeclare_subscription( &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, + _res: Option>, _node_id: NodeId, - ) { - forget_client_subscription(tables, face, res); + ) -> Option> { + forget_client_subscription(tables, face, id) } fn get_subscriptions(&self, tables: &Tables) -> Vec> { let mut subs = HashSet::new(); for src_face in tables.faces.values() { - for sub in &face_hat!(src_face).remote_subs { + for sub in face_hat!(src_face).remote_subs.values() { subs.insert(sub.clone()); } } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index c6dfc34eac..4964a8880a 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -23,10 +23,12 @@ use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ @@ -83,16 +85,19 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current_info = face_hat!(dst_face).local_qabls.get(res); + let current = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current_info.is_none() || *current_info.unwrap() != info) + && (current.is_none() || current.unwrap().1 != info) && (src_face.is_none() || src_face.as_ref().unwrap().whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { + let id = current + .map(|c| c.0) + .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); face_hat_mut!(&mut dst_face) .local_qabls - .insert(res.clone(), info); + .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -100,7 +105,7 @@ fn propagate_simple_queryable( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -114,13 +119,13 @@ fn propagate_simple_queryable( fn register_client_queryable( _tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { // Register queryable { let res = get_mut_unchecked(res); - log::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -135,16 +140,17 @@ fn register_client_queryable( })) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(res.clone()); + face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } fn declare_client_queryable( tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { - register_client_queryable(tables, face, res, qabl_info); + register_client_queryable(tables, face, id, res, qabl_info); propagate_simple_queryable(tables, res, Some(face)); } @@ -164,22 +170,19 @@ fn client_qabls(res: &Arc) -> Vec> { fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - - face_hat_mut!(face).local_qabls.remove(res); } } } @@ -189,38 +192,37 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client queryable {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; - if ctx.qabl.is_none() { - face_hat_mut!(face).remote_qabls.remove(res); + if !face_hat_mut!(face) + .remote_qabls + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; } - } - let mut client_qabls = client_qabls(res); - if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - if client_qabls.len() == 1 { - let face = &mut client_qabls[0]; - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - - face_hat_mut!(face).local_qabls.remove(res); + let mut client_qabls = client_qabls(res); + if client_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res); + } else { + propagate_simple_queryable(tables, res, None); + } + if client_qabls.len() == 1 { + let face = &mut client_qabls[0]; + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } } } } @@ -228,9 +230,14 @@ pub(super) fn undeclare_client_queryable( fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_queryable(tables, face, res); + id: QueryableId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { + undeclare_client_queryable(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) { @@ -240,7 +247,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) .cloned() .collect::>>() { - for qabl in face_hat!(face).remote_qabls.iter() { + for qabl in face_hat!(face).remote_qabls.values() { propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); } } @@ -255,27 +262,29 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, _node_id: NodeId, ) { - declare_client_queryable(tables, face, res, qabl_info); + declare_client_queryable(tables, face, id, res, qabl_info); } fn undeclare_queryable( &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, + _res: Option>, _node_id: NodeId, - ) { - forget_client_queryable(tables, face, res); + ) -> Option> { + forget_client_queryable(tables, face, id) } fn get_queryables(&self, tables: &Tables) -> Vec> { let mut qabls = HashSet::new(); for src_face in tables.faces.values() { - for qabl in &face_hat!(src_face).remote_qabls { + for qabl in face_hat!(src_face).remote_qabls.values() { qabls.insert(qabl.clone()); } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index a655d2f0a3..5591ea3b3e 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -47,12 +47,16 @@ use async_std::task::JoinHandle; use std::{ any::Any, collections::{HashMap, HashSet}, - sync::Arc, + sync::{atomic::AtomicU32, Arc}, }; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, - network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, + network::{ + declare::{queryable::ext::QueryableInfo, QueryableId, SubscriberId}, + oam::id::OAM_LINKSTATE, + Oam, + }, }; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; @@ -126,7 +130,6 @@ impl HatTables { } fn schedule_compute_trees(&mut self, tables_ref: Arc) { - log::trace!("Schedule computations"); if self.peers_trees_task.is_none() { let task = Some(async_std::task::spawn(async move { async_std::task::sleep(std::time::Duration::from_millis( @@ -142,7 +145,6 @@ impl HatTables { pubsub::pubsub_tree_change(&mut tables, &new_childs); queries::queries_tree_change(&mut tables, &new_childs); - log::trace!("Computations completed"); hat_mut!(tables).peers_trees_task = None; })); self.peers_trees_task = task; @@ -248,7 +250,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for mut res in face + for (_id, mut res) in face .hat .downcast_mut::() .unwrap() @@ -276,7 +278,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for mut res in face + for (_, mut res) in face .hat .downcast_mut::() .unwrap() @@ -471,20 +473,22 @@ impl HatContext { struct HatFace { link_id: usize, - local_subs: HashSet>, - remote_subs: HashSet>, - local_qabls: HashMap, QueryableInfo>, - remote_qabls: HashSet>, + next_id: AtomicU32, // @TODO: manage rollover and uniqueness + local_subs: HashMap, SubscriberId>, + remote_subs: HashMap>, + local_qabls: HashMap, (QueryableId, QueryableInfo)>, + remote_qabls: HashMap>, } impl HatFace { fn new() -> Self { Self { link_id: 0, - local_subs: HashSet::new(), - remote_subs: HashSet::new(), + next_id: AtomicU32::new(0), + local_subs: HashMap::new(), + remote_subs: HashMap::new(), local_qabls: HashMap::new(), - remote_qabls: HashSet::new(), + remote_qabls: HashMap::new(), } } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index c364f7359f..9a41915333 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -25,8 +25,10 @@ use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ core::{Reliability, WhatAmI, ZenohId}, network::declare::{ @@ -53,8 +55,6 @@ fn send_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send subscription {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -63,7 +63,7 @@ fn send_sourced_subscription_to_net_childs( node_id: routing_context, }, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO + id: 0, // Sourced subscriptions do not use ids wire_expr: key_expr, ext_info: *sub_info, }), @@ -87,10 +87,11 @@ fn propagate_simple_subscription_to( src_face: &mut Arc, ) { if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) - && !face_hat!(dst_face).local_subs.contains(res) + && !face_hat!(dst_face).local_subs.contains_key(res) && dst_face.whatami == WhatAmI::Client { - face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -98,7 +99,7 @@ fn propagate_simple_subscription_to( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO + id, wire_expr: key_expr, ext_info: *sub_info, }), @@ -171,7 +172,6 @@ fn register_peer_subscription( if !res_hat!(res).peer_subs.contains(&peer) { // Register peer subscription { - log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); res_hat_mut!(res).peer_subs.insert(peer); hat_mut!(tables).peer_subs.insert(res.clone()); } @@ -199,13 +199,13 @@ fn declare_peer_subscription( fn register_client_subscription( _tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); - log::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { Some(ctx) => match &ctx.subs { Some(info) => { @@ -234,16 +234,17 @@ fn register_client_subscription( } } } - face_hat_mut!(face).remote_subs.insert(res.clone()); + face_hat_mut!(face).remote_subs.insert(id, res.clone()); } fn declare_client_subscription( tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { - register_client_subscription(tables, face, res, sub_info); + register_client_subscription(tables, face, id, res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; let zid = tables.zid; @@ -289,8 +290,6 @@ fn send_forget_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send forget subscription {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -299,7 +298,7 @@ fn send_forget_sourced_subscription_to_net_childs( node_id: routing_context.unwrap_or(0), }, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO + id: 0, // Sourced subscriptions do not use ids ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -315,21 +314,19 @@ fn send_forget_sourced_subscription_to_net_childs( fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_subs.contains(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - face_hat_mut!(face).local_subs.remove(res); } } } @@ -370,11 +367,6 @@ fn propagate_forget_sourced_subscription( } fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!( - "Unregister peer subscription {} (peer: {})", - res.expr(), - peer - ); res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); if res_hat!(res).peer_subs.is_empty() { @@ -414,37 +406,34 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client subscription {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } - face_hat_mut!(face).remote_subs.remove(res); - - let mut client_subs = client_subs(res); - let peer_subs = remote_peer_subs(tables, res); - if client_subs.is_empty() { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); - } - if client_subs.len() == 1 && !peer_subs { - let face = &mut client_subs[0]; - if face_hat!(face).local_subs.contains(res) - && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } - face_hat_mut!(face).local_subs.remove(res); + let mut client_subs = client_subs(res); + let peer_subs = remote_peer_subs(tables, res); + if client_subs.is_empty() { + undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); + } + if client_subs.len() == 1 && !peer_subs { + let face = &mut client_subs[0]; + if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } } } } @@ -452,20 +441,26 @@ pub(super) fn undeclare_client_subscription( fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_subscription(tables, face, res); + id: SubscriberId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { + undeclare_client_subscription(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers mode: Mode::Push, }; if face.whatami == WhatAmI::Client { for sub in &hat!(tables).peer_subs { - face_hat_mut!(face).local_subs.insert(sub.clone()); + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -473,7 +468,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO + id, wire_expr: key_expr, ext_info: sub_info, }), @@ -515,7 +510,7 @@ pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, @@ -585,7 +581,7 @@ impl HatPubSubTrait for HatCode { declare_peer_subscription(tables, face, res, sub_info, peer) } } else { - declare_client_subscription(tables, face, res, sub_info) + declare_client_subscription(tables, face, id, res, sub_info) } } @@ -593,15 +589,23 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, + res: Option>, node_id: NodeId, - ) { + ) -> Option> { if face.whatami != WhatAmI::Client { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_subscription(tables, face, res, &peer); + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_subscription(tables, face, &mut res, &peer); + Some(res) + } else { + None + } + } else { + None } } else { - forget_client_subscription(tables, face, res); + forget_client_subscription(tables, face, id) } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 4192f87e55..51aac2175a 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -26,10 +26,12 @@ use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::HashMap; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ @@ -133,8 +135,6 @@ fn send_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send queryable {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -143,7 +143,7 @@ fn send_sourced_queryable_to_net_childs( node_id: routing_context, }, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id: 0, // Sourced queryables do not use ids wire_expr: key_expr, ext_info: *qabl_info, }), @@ -166,14 +166,17 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current_info = face_hat!(dst_face).local_qabls.get(res); + let current = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current_info.is_none() || *current_info.unwrap() != info) + && (current.is_none() || current.unwrap().1 != info) && dst_face.whatami == WhatAmI::Client { + let id = current + .map(|c| c.0) + .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); face_hat_mut!(&mut dst_face) .local_qabls - .insert(res.clone(), info); + .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -181,7 +184,7 @@ fn propagate_simple_queryable( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -240,7 +243,6 @@ fn register_peer_queryable( if current_info.is_none() || current_info.unwrap() != qabl_info { // Register peer queryable { - log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); hat_mut!(tables).peer_qabls.insert(res.clone()); } @@ -269,13 +271,13 @@ fn declare_peer_queryable( fn register_client_queryable( _tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { // Register queryable { let res = get_mut_unchecked(res); - log::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -290,17 +292,17 @@ fn register_client_queryable( })) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(res.clone()); + face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } fn declare_client_queryable( tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { - register_client_queryable(tables, face, res, qabl_info); - + register_client_queryable(tables, face, id, res, qabl_info); let local_details = local_peer_qabl_info(tables, res); let zid = tables.zid; register_peer_queryable(tables, Some(face), res, &local_details, zid); @@ -345,8 +347,6 @@ fn send_forget_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send forget queryable {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -355,7 +355,7 @@ fn send_forget_sourced_queryable_to_net_childs( node_id: routing_context, }, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id: 0, ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -371,22 +371,19 @@ fn send_forget_sourced_queryable_to_net_childs( fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - - face_hat_mut!(face).local_qabls.remove(res); } } } @@ -427,7 +424,6 @@ fn propagate_forget_sourced_queryable( } fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.remove(peer); if res_hat!(res).peer_qabls.is_empty() { @@ -467,42 +463,41 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client queryable {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; - if ctx.qabl.is_none() { - face_hat_mut!(face).remote_qabls.remove(res); + if !face_hat_mut!(face) + .remote_qabls + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; } - } - let mut client_qabls = client_qabls(res); - let peer_qabls = remote_peer_qabls(tables, res); + let mut client_qabls = client_qabls(res); + let peer_qabls = remote_peer_qabls(tables, res); - if client_qabls.is_empty() { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, None, res, &local_info, tables.zid); - } - - if client_qabls.len() == 1 && !peer_qabls { - let face = &mut client_qabls[0]; - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + if client_qabls.is_empty() { + undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); + } else { + let local_info = local_peer_qabl_info(tables, res); + register_peer_queryable(tables, None, res, &local_info, tables.zid); + } - face_hat_mut!(face).local_qabls.remove(res); + if client_qabls.len() == 1 && !peer_qabls { + let face = &mut client_qabls[0]; + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } } } } @@ -510,9 +505,14 @@ pub(super) fn undeclare_client_queryable( fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_queryable(tables, face, res); + id: QueryableId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { + undeclare_client_queryable(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { @@ -520,7 +520,10 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { for qabl in &hat!(tables).peer_qabls { if qabl.context.is_some() { let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -528,7 +531,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -641,6 +644,7 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, node_id: NodeId, @@ -650,7 +654,7 @@ impl HatQueriesTrait for HatCode { declare_peer_queryable(tables, face, res, qabl_info, peer); } } else { - declare_client_queryable(tables, face, res, qabl_info); + declare_client_queryable(tables, face, id, res, qabl_info); } } @@ -658,15 +662,23 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, + res: Option>, node_id: NodeId, - ) { + ) -> Option> { if face.whatami != WhatAmI::Client { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_queryable(tables, face, res, &peer); + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_queryable(tables, face, &mut res, &peer); + Some(res) + } else { + None + } + } else { + None } } else { - forget_client_queryable(tables, face, res); + forget_client_queryable(tables, face, id) } } diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 4fbf9c9e5d..d9feb687f2 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -31,7 +31,10 @@ use zenoh_config::{unwrap_or_default, Config, WhatAmI}; use zenoh_protocol::{ core::WireExpr, network::{ - declare::{queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo}, + declare::{ + queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo, QueryableId, + SubscriberId, + }, Oam, }, }; @@ -117,6 +120,7 @@ pub(crate) trait HatPubSubTrait { &self, tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, @@ -125,9 +129,10 @@ pub(crate) trait HatPubSubTrait { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, + res: Option>, node_id: NodeId, - ); + ) -> Option>; fn get_subscriptions(&self, tables: &Tables) -> Vec>; @@ -147,6 +152,7 @@ pub(crate) trait HatQueriesTrait { &self, tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, node_id: NodeId, @@ -155,9 +161,10 @@ pub(crate) trait HatQueriesTrait { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, + res: Option>, node_id: NodeId, - ); + ) -> Option>; fn get_queryables(&self, tables: &Tables) -> Vec>; diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 8dc4f15ada..1a6c1ba407 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -45,11 +45,14 @@ use super::{ }; use std::{ any::Any, - collections::{HashMap, HashSet}, - sync::Arc, + collections::HashMap, + sync::{atomic::AtomicU32, Arc}, }; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; -use zenoh_protocol::network::Oam; +use zenoh_protocol::network::{ + declare::{QueryableId, SubscriberId}, + Oam, +}; use zenoh_protocol::{ common::ZExtBody, network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE}, @@ -177,7 +180,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for mut res in face + for (_id, mut res) in face .hat .downcast_mut::() .unwrap() @@ -205,7 +208,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for mut res in face + for (_id, mut res) in face .hat .downcast_mut::() .unwrap() @@ -363,19 +366,21 @@ impl HatContext { } struct HatFace { - local_subs: HashSet>, - remote_subs: HashSet>, - local_qabls: HashMap, QueryableInfo>, - remote_qabls: HashSet>, + next_id: AtomicU32, // @TODO: manage rollover and uniqueness + local_subs: HashMap, SubscriberId>, + remote_subs: HashMap>, + local_qabls: HashMap, (QueryableId, QueryableInfo)>, + remote_qabls: HashMap>, } impl HatFace { fn new() -> Self { Self { - local_subs: HashSet::new(), - remote_subs: HashSet::new(), + next_id: AtomicU32::new(0), + local_subs: HashMap::new(), + remote_subs: HashMap::new(), local_qabls: HashMap::new(), - remote_qabls: HashSet::new(), + remote_qabls: HashMap::new(), } } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index a7d58ce1a5..4f6ce5aeca 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -22,8 +22,10 @@ use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ @@ -43,10 +45,11 @@ fn propagate_simple_subscription_to( ) { if (src_face.id != dst_face.id || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) - && !face_hat!(dst_face).local_subs.contains(res) + && !face_hat!(dst_face).local_subs.contains_key(res) && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { - face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -54,7 +57,7 @@ fn propagate_simple_subscription_to( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id, wire_expr: key_expr, ext_info: *sub_info, }), @@ -83,13 +86,13 @@ fn propagate_simple_subscription( fn register_client_subscription( _tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); - log::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { Some(ctx) => match &ctx.subs { Some(info) => { @@ -118,16 +121,17 @@ fn register_client_subscription( } } } - face_hat_mut!(face).remote_subs.insert(res.clone()); + face_hat_mut!(face).remote_subs.insert(id, res.clone()); } fn declare_client_subscription( tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { - register_client_subscription(tables, face, res, sub_info); + register_client_subscription(tables, face, id, res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; @@ -144,7 +148,7 @@ fn declare_client_subscription( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id: 0, // @TODO use proper SubscriberId wire_expr: res.expr().into(), ext_info: *sub_info, }), @@ -170,21 +174,19 @@ fn client_subs(res: &Arc) -> Vec> { fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_subs.contains(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - face_hat_mut!(face).local_subs.remove(res); } } } @@ -194,36 +196,33 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client subscription {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } - face_hat_mut!(face).remote_subs.remove(res); - - let mut client_subs = client_subs(res); - if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res); - } - if client_subs.len() == 1 { - let face = &mut client_subs[0]; - if face_hat!(face).local_subs.contains(res) - && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } - face_hat_mut!(face).local_subs.remove(res); + let mut client_subs = client_subs(res); + if client_subs.is_empty() { + propagate_forget_simple_subscription(tables, res); + } + if client_subs.len() == 1 { + let face = &mut client_subs[0]; + if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } } } } @@ -231,9 +230,14 @@ pub(super) fn undeclare_client_subscription( fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_subscription(tables, face, res); + id: SubscriberId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { + undeclare_client_subscription(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { @@ -247,7 +251,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { .cloned() .collect::>>() { - for sub in &face_hat!(src_face).remote_subs { + for sub in face_hat!(src_face).remote_subs.values() { propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); } } @@ -258,27 +262,29 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, _node_id: NodeId, ) { - declare_client_subscription(tables, face, res, sub_info); + declare_client_subscription(tables, face, id, res, sub_info); } fn undeclare_subscription( &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, + _res: Option>, _node_id: NodeId, - ) { - forget_client_subscription(tables, face, res); + ) -> Option> { + forget_client_subscription(tables, face, id) } fn get_subscriptions(&self, tables: &Tables) -> Vec> { let mut subs = HashSet::new(); for src_face in tables.faces.values() { - for sub in &face_hat!(src_face).remote_subs { + for sub in face_hat!(src_face).remote_subs.values() { subs.insert(sub.clone()); } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 68f2669f6f..04b31b41ef 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -23,10 +23,12 @@ use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ @@ -83,16 +85,19 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current_info = face_hat!(dst_face).local_qabls.get(res); + let current = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current_info.is_none() || *current_info.unwrap() != info) + && (current.is_none() || current.unwrap().1 != info) && (src_face.is_none() || src_face.as_ref().unwrap().whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { + let id = current + .map(|c| c.0) + .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); face_hat_mut!(&mut dst_face) .local_qabls - .insert(res.clone(), info); + .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -100,7 +105,7 @@ fn propagate_simple_queryable( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -114,13 +119,13 @@ fn propagate_simple_queryable( fn register_client_queryable( _tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { // Register queryable { let res = get_mut_unchecked(res); - log::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -135,16 +140,17 @@ fn register_client_queryable( })) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(res.clone()); + face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } fn declare_client_queryable( tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { - register_client_queryable(tables, face, res, qabl_info); + register_client_queryable(tables, face, id, res, qabl_info); propagate_simple_queryable(tables, res, Some(face)); } @@ -164,22 +170,19 @@ fn client_qabls(res: &Arc) -> Vec> { fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - - face_hat_mut!(face).local_qabls.remove(res); } } } @@ -189,38 +192,37 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client queryable {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; - if ctx.qabl.is_none() { - face_hat_mut!(face).remote_qabls.remove(res); + if !face_hat_mut!(face) + .remote_qabls + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; } - } - let mut client_qabls = client_qabls(res); - if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - if client_qabls.len() == 1 { - let face = &mut client_qabls[0]; - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - - face_hat_mut!(face).local_qabls.remove(res); + let mut client_qabls = client_qabls(res); + if client_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res); + } else { + propagate_simple_queryable(tables, res, None); + } + if client_qabls.len() == 1 { + let face = &mut client_qabls[0]; + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } } } } @@ -228,9 +230,14 @@ pub(super) fn undeclare_client_queryable( fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_queryable(tables, face, res); + id: QueryableId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { + undeclare_client_queryable(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) { @@ -240,7 +247,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) .cloned() .collect::>>() { - for qabl in face_hat!(face).remote_qabls.iter() { + for qabl in face_hat!(face).remote_qabls.values() { propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); } } @@ -255,27 +262,29 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, _node_id: NodeId, ) { - declare_client_queryable(tables, face, res, qabl_info); + declare_client_queryable(tables, face, id, res, qabl_info); } fn undeclare_queryable( &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, + _res: Option>, _node_id: NodeId, - ) { - forget_client_queryable(tables, face, res); + ) -> Option> { + forget_client_queryable(tables, face, id) } fn get_queryables(&self, tables: &Tables) -> Vec> { let mut qabls = HashSet::new(); for src_face in tables.faces.values() { - for qabl in &face_hat!(src_face).remote_qabls { + for qabl in face_hat!(src_face).remote_qabls.values() { qabls.insert(qabl.clone()); } } diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 24c837e8f5..ff576ae271 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -52,12 +52,16 @@ use std::{ any::Any, collections::{hash_map::DefaultHasher, HashMap, HashSet}, hash::Hasher, - sync::Arc, + sync::{atomic::AtomicU32, Arc}, }; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, - network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, + network::{ + declare::{queryable::ext::QueryableInfo, QueryableId, SubscriberId}, + oam::id::OAM_LINKSTATE, + Oam, + }, }; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; @@ -232,14 +236,12 @@ impl HatTables { .as_ref() .map(|net| { let links = net.get_links(peer1); - log::debug!("failover_brokering {} {} ({:?})", peer1, peer2, links); HatTables::failover_brokering_to(links, peer2) }) .unwrap_or(false) } fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { - log::trace!("Schedule computations"); if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) { @@ -264,7 +266,6 @@ impl HatTables { pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); queries::queries_tree_change(&mut tables, &new_childs, net_type); - log::trace!("Computations completed"); match net_type { WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, _ => hat_mut!(tables).peers_trees_task = None, @@ -418,7 +419,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for mut res in face + for (_id, mut res) in face .hat .downcast_mut::() .unwrap() @@ -446,7 +447,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for mut res in face + for (_, mut res) in face .hat .downcast_mut::() .unwrap() @@ -773,20 +774,22 @@ impl HatContext { struct HatFace { link_id: usize, - local_subs: HashSet>, - remote_subs: HashSet>, - local_qabls: HashMap, QueryableInfo>, - remote_qabls: HashSet>, + next_id: AtomicU32, // @TODO: manage rollover and uniqueness + local_subs: HashMap, SubscriberId>, + remote_subs: HashMap>, + local_qabls: HashMap, (QueryableId, QueryableInfo)>, + remote_qabls: HashMap>, } impl HatFace { fn new() -> Self { Self { link_id: 0, - local_subs: HashSet::new(), - remote_subs: HashSet::new(), + next_id: AtomicU32::new(0), + local_subs: HashMap::new(), + remote_subs: HashMap::new(), local_qabls: HashMap::new(), - remote_qabls: HashSet::new(), + remote_qabls: HashMap::new(), } } } diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 6030269cfa..da1ca66efd 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -25,8 +25,10 @@ use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ core::{Reliability, WhatAmI, ZenohId}, network::declare::{ @@ -53,8 +55,6 @@ fn send_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send subscription {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -63,7 +63,7 @@ fn send_sourced_subscription_to_net_childs( node_id: routing_context, }, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id: 0, // Sourced subscriptions do not use ids wire_expr: key_expr, ext_info: *sub_info, }), @@ -89,7 +89,7 @@ fn propagate_simple_subscription_to( ) { if (src_face.id != dst_face.id || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) - && !face_hat!(dst_face).local_subs.contains(res) + && !face_hat!(dst_face).local_subs.contains_key(res) && if full_peer_net { dst_face.whatami == WhatAmI::Client } else { @@ -99,7 +99,8 @@ fn propagate_simple_subscription_to( || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) } { - face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -107,7 +108,7 @@ fn propagate_simple_subscription_to( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id, wire_expr: key_expr, ext_info: *sub_info, }), @@ -189,11 +190,6 @@ fn register_router_subscription( if !res_hat!(res).router_subs.contains(&router) { // Register router subscription { - log::debug!( - "Register router subscription {} (router: {})", - res.expr(), - router - ); res_hat_mut!(res).router_subs.insert(router); hat_mut!(tables).router_subs.insert(res.clone()); } @@ -230,7 +226,6 @@ fn register_peer_subscription( if !res_hat!(res).peer_subs.contains(&peer) { // Register peer subscription { - log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); res_hat_mut!(res).peer_subs.insert(peer); hat_mut!(tables).peer_subs.insert(res.clone()); } @@ -257,13 +252,13 @@ fn declare_peer_subscription( fn register_client_subscription( _tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); - log::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { Some(ctx) => match &ctx.subs { Some(info) => { @@ -292,16 +287,17 @@ fn register_client_subscription( } } } - face_hat_mut!(face).remote_subs.insert(res.clone()); + face_hat_mut!(face).remote_subs.insert(id, res.clone()); } fn declare_client_subscription( tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { - register_client_subscription(tables, face, res, sub_info); + register_client_subscription(tables, face, id, res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; let zid = tables.zid; @@ -356,8 +352,6 @@ fn send_forget_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send forget subscription {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -366,7 +360,7 @@ fn send_forget_sourced_subscription_to_net_childs( node_id: routing_context.unwrap_or(0), }, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id: 0, // Sourced subscriptions do not use ids ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -382,21 +376,19 @@ fn send_forget_sourced_subscription_to_net_childs( fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_subs.contains(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - face_hat_mut!(face).local_subs.remove(res); } } } @@ -413,7 +405,7 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< .collect::>>() { if face.whatami == WhatAmI::Peer - && face_hat!(face).local_subs.contains(res) + && face_hat!(face).local_subs.contains_key(res) && !res.session_ctxs.values().any(|s| { face.zid != s.face.zid && s.subs.is_some() @@ -422,21 +414,20 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< && hat!(tables).failover_brokering(s.face.zid, face.zid))) }) { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - - face_hat_mut!(&mut face).local_subs.remove(res); + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } } } } @@ -479,11 +470,6 @@ fn propagate_forget_sourced_subscription( } fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { - log::debug!( - "Unregister router subscription {} (router: {})", - res.expr(), - router - ); res_hat_mut!(res).router_subs.retain(|sub| sub != router); if res_hat!(res).router_subs.is_empty() { @@ -522,11 +508,6 @@ fn forget_router_subscription( } fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!( - "Unregister peer subscription {} (peer: {})", - res.expr(), - peer - ); res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); if res_hat!(res).peer_subs.is_empty() { @@ -568,40 +549,37 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client subscription {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } - face_hat_mut!(face).remote_subs.remove(res); - - let mut client_subs = client_subs(res); - let router_subs = remote_router_subs(tables, res); - let peer_subs = remote_peer_subs(tables, res); - if client_subs.is_empty() && !peer_subs { - undeclare_router_subscription(tables, None, res, &tables.zid.clone()); - } else { - propagate_forget_simple_subscription_to_peers(tables, res); - } - if client_subs.len() == 1 && !router_subs && !peer_subs { - let face = &mut client_subs[0]; - if face_hat!(face).local_subs.contains(res) - && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } - face_hat_mut!(face).local_subs.remove(res); + let mut client_subs = client_subs(res); + let router_subs = remote_router_subs(tables, res); + let peer_subs = remote_peer_subs(tables, res); + if client_subs.is_empty() && !peer_subs { + undeclare_router_subscription(tables, None, res, &tables.zid.clone()); + } else { + propagate_forget_simple_subscription_to_peers(tables, res); + } + if client_subs.len() == 1 && !router_subs && !peer_subs { + let face = &mut client_subs[0]; + if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } } } } @@ -609,9 +587,14 @@ pub(super) fn undeclare_client_subscription( fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_subscription(tables, face, res); + id: SubscriberId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { + undeclare_client_subscription(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { @@ -622,7 +605,8 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { if face.whatami == WhatAmI::Client { for sub in &hat!(tables).router_subs { - face_hat_mut!(face).local_subs.insert(sub.clone()); + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -630,7 +614,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id, wire_expr: key_expr, ext_info: sub_info, }), @@ -649,7 +633,8 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { && hat!(tables).failover_brokering(s.face.zid, face.zid))) })) { - face_hat_mut!(face).local_subs.insert(sub.clone()); + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -657,7 +642,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id, wire_expr: key_expr, ext_info: sub_info, }), @@ -760,7 +745,7 @@ pub(super) fn pubsub_tree_change( pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { if let Some(src_face) = tables.get_face(zid).cloned() { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { - for res in &face_hat!(src_face).remote_subs { + for res in face_hat!(src_face).remote_subs.values() { let client_subs = res .session_ctxs .values() @@ -772,7 +757,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: { let dst_face = &mut get_mut_unchecked(ctx).face; if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if face_hat!(dst_face).local_subs.contains(res) { + if let Some(id) = face_hat!(dst_face).local_subs.get(res).cloned() { let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { let ctx_links = hat!(tables) @@ -790,7 +775,6 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: }) }; if forget { - let wire_expr = Resource::get_best_key(res, "", dst_face.id); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -798,8 +782,8 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber( UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }, ), }, @@ -810,7 +794,8 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: } } else if HatTables::failover_brokering_to(links, ctx.face.zid) { let dst_face = &mut get_mut_unchecked(ctx).face; - face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers @@ -822,7 +807,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id, wire_expr: key_expr, ext_info: sub_info, }), @@ -876,6 +861,7 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, @@ -892,10 +878,10 @@ impl HatPubSubTrait for HatCode { declare_peer_subscription(tables, face, res, sub_info, peer) } } else { - declare_client_subscription(tables, face, res, sub_info) + declare_client_subscription(tables, face, id, res, sub_info) } } - _ => declare_client_subscription(tables, face, res, sub_info), + _ => declare_client_subscription(tables, face, id, res, sub_info), } } @@ -903,25 +889,40 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, + res: Option>, node_id: NodeId, - ) { + ) -> Option> { match face.whatami { WhatAmI::Router => { - if let Some(router) = get_router(tables, face, node_id) { - forget_router_subscription(tables, face, res, &router) + if let Some(mut res) = res { + if let Some(router) = get_router(tables, face, node_id) { + forget_router_subscription(tables, face, &mut res, &router); + Some(res) + } else { + None + } + } else { + None } } WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_subscription(tables, face, res, &peer) + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_subscription(tables, face, &mut res, &peer); + Some(res) + } else { + None + } + } else { + None } } else { - forget_client_subscription(tables, face, res) + forget_client_subscription(tables, face, id) } } - _ => forget_client_subscription(tables, face, res), + _ => forget_client_subscription(tables, face, id), } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 008e71d7af..b76f0adcc6 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -26,10 +26,12 @@ use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::HashMap; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ @@ -204,8 +206,6 @@ fn send_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send queryable {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -214,7 +214,7 @@ fn send_sourced_queryable_to_net_childs( node_id: routing_context, }, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id: 0, // Sourced queryables do not use ids wire_expr: key_expr, ext_info: *qabl_info, }), @@ -238,9 +238,9 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current_info = face_hat!(dst_face).local_qabls.get(res); + let current = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current_info.is_none() || *current_info.unwrap() != info) + && (current.is_none() || current.unwrap().1 != info) && if full_peers_net { dst_face.whatami == WhatAmI::Client } else { @@ -252,9 +252,12 @@ fn propagate_simple_queryable( .failover_brokering(src_face.as_ref().unwrap().zid, dst_face.zid)) } { + let id = current + .map(|c| c.0) + .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); face_hat_mut!(&mut dst_face) .local_qabls - .insert(res.clone(), info); + .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -262,7 +265,7 @@ fn propagate_simple_queryable( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -322,11 +325,6 @@ fn register_router_queryable( if current_info.is_none() || current_info.unwrap() != qabl_info { // Register router queryable { - log::debug!( - "Register router queryable {} (router: {})", - res.expr(), - router, - ); res_hat_mut!(res).router_qabls.insert(router, *qabl_info); hat_mut!(tables).router_qabls.insert(res.clone()); } @@ -375,7 +373,6 @@ fn register_peer_queryable( if current_info.is_none() || current_info.unwrap() != qabl_info { // Register peer queryable { - log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); hat_mut!(tables).peer_qabls.insert(res.clone()); } @@ -402,13 +399,13 @@ fn declare_peer_queryable( fn register_client_queryable( _tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { // Register queryable { let res = get_mut_unchecked(res); - log::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -423,16 +420,17 @@ fn register_client_queryable( })) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(res.clone()); + face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } fn declare_client_queryable( tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { - register_client_queryable(tables, face, res, qabl_info); + register_client_queryable(tables, face, id, res, qabl_info); let local_details = local_router_qabl_info(tables, res); let zid = tables.zid; register_router_queryable(tables, Some(face), res, &local_details, zid); @@ -486,8 +484,6 @@ fn send_forget_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send forget queryable {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -496,7 +492,7 @@ fn send_forget_sourced_queryable_to_net_childs( node_id: routing_context, }, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id: 0, // Sourced queryables do not use ids ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -512,22 +508,19 @@ fn send_forget_sourced_queryable_to_net_childs( fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - - face_hat_mut!(face).local_qabls.remove(res); } } } @@ -553,21 +546,20 @@ fn propagate_forget_simple_queryable_to_peers(tables: &mut Tables, res: &mut Arc && hat!(tables).failover_brokering(s.face.zid, face.zid))) }) { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - - face_hat_mut!(&mut face).local_qabls.remove(res); + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } } } } @@ -610,11 +602,6 @@ fn propagate_forget_sourced_queryable( } fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { - log::debug!( - "Unregister router queryable {} (router: {})", - res.expr(), - router, - ); res_hat_mut!(res).router_qabls.remove(router); if res_hat!(res).router_qabls.is_empty() { @@ -653,7 +640,6 @@ fn forget_router_queryable( } fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.remove(peer); if res_hat!(res).peer_qabls.is_empty() { @@ -699,44 +685,43 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client queryable {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; - if ctx.qabl.is_none() { - face_hat_mut!(face).remote_qabls.remove(res); + if !face_hat_mut!(face) + .remote_qabls + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; } - } - let mut client_qabls = client_qabls(res); - let router_qabls = remote_router_qabls(tables, res); - let peer_qabls = remote_peer_qabls(tables, res); + let mut client_qabls = client_qabls(res); + let router_qabls = remote_router_qabls(tables, res); + let peer_qabls = remote_peer_qabls(tables, res); - if client_qabls.is_empty() && !peer_qabls { - undeclare_router_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_router_qabl_info(tables, res); - register_router_queryable(tables, None, res, &local_info, tables.zid); - propagate_forget_simple_queryable_to_peers(tables, res); - } - - if client_qabls.len() == 1 && !router_qabls && !peer_qabls { - let face = &mut client_qabls[0]; - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + if client_qabls.is_empty() && !peer_qabls { + undeclare_router_queryable(tables, None, res, &tables.zid.clone()); + } else { + let local_info = local_router_qabl_info(tables, res); + register_router_queryable(tables, None, res, &local_info, tables.zid); + propagate_forget_simple_queryable_to_peers(tables, res); + } - face_hat_mut!(face).local_qabls.remove(res); + if client_qabls.len() == 1 && !router_qabls && !peer_qabls { + let face = &mut client_qabls[0]; + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } } } } @@ -744,9 +729,14 @@ pub(super) fn undeclare_client_queryable( fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_queryable(tables, face, res); + id: QueryableId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { + undeclare_client_queryable(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { @@ -754,7 +744,10 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { for qabl in hat!(tables).router_qabls.iter() { if qabl.context.is_some() { let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -762,7 +755,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -783,7 +776,10 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { })) { let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -791,7 +787,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -853,7 +849,7 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { if let Some(src_face) = tables.get_face(zid) { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { - for res in &face_hat!(src_face).remote_qabls { + for res in face_hat!(src_face).remote_qabls.values() { let client_qabls = res .session_ctxs .values() @@ -865,7 +861,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links { let dst_face = &mut get_mut_unchecked(ctx).face; if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if face_hat!(dst_face).local_qabls.contains_key(res) { + if let Some(id) = face_hat!(dst_face).local_subs.get(res).cloned() { let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { let ctx_links = hat!(tables) @@ -883,7 +879,6 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links }) }; if forget { - let wire_expr = Resource::get_best_key(res, "", dst_face.id); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -891,8 +886,8 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable( UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }, ), }, @@ -904,9 +899,10 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links } else if HatTables::failover_brokering_to(links, ctx.face.zid) { let dst_face = &mut get_mut_unchecked(ctx).face; let info = local_qabl_info(tables, res, dst_face); + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face) .local_qabls - .insert(res.clone(), info); + .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -914,7 +910,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -1024,6 +1020,7 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, node_id: NodeId, @@ -1040,10 +1037,10 @@ impl HatQueriesTrait for HatCode { declare_peer_queryable(tables, face, res, qabl_info, peer) } } else { - declare_client_queryable(tables, face, res, qabl_info) + declare_client_queryable(tables, face, id, res, qabl_info) } } - _ => declare_client_queryable(tables, face, res, qabl_info), + _ => declare_client_queryable(tables, face, id, res, qabl_info), } } @@ -1051,25 +1048,40 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, + res: Option>, node_id: NodeId, - ) { + ) -> Option> { match face.whatami { WhatAmI::Router => { - if let Some(router) = get_router(tables, face, node_id) { - forget_router_queryable(tables, face, res, &router) + if let Some(mut res) = res { + if let Some(router) = get_router(tables, face, node_id) { + forget_router_queryable(tables, face, &mut res, &router); + Some(res) + } else { + None + } + } else { + None } } WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_queryable(tables, face, res, &peer) + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_queryable(tables, face, &mut res, &peer); + Some(res) + } else { + None + } + } else { + None } } else { - forget_client_queryable(tables, face, res) + forget_client_queryable(tables, face, id) } } - _ => forget_client_queryable(tables, face, res), + _ => forget_client_queryable(tables, face, id), } } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 03b447aae0..e76475f447 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -32,6 +32,7 @@ use std::sync::Mutex; use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{ConfigValidator, ValidatedMap, WhatAmI}; use zenoh_plugin_trait::{PluginControl, PluginStatus}; +use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, @@ -59,6 +60,7 @@ type Handler = Arc; pub struct AdminSpace { zid: ZenohId, + queryable_id: QueryableId, primitives: Mutex>>, mappings: Mutex>, handlers: HashMap, @@ -189,6 +191,7 @@ impl AdminSpace { }); let admin = Arc::new(AdminSpace { zid: runtime.zid(), + queryable_id: runtime.next_id(), primitives: Mutex::new(None), mappings: Mutex::new(HashMap::new()), handlers, @@ -278,7 +281,7 @@ impl AdminSpace { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id: runtime.next_id(), wire_expr: [&root_key, "/**"].concat().into(), ext_info: QueryableInfo { complete: 0, @@ -292,7 +295,7 @@ impl AdminSpace { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id: runtime.next_id(), wire_expr: [&root_key, "/config/**"].concat().into(), ext_info: SubscriberInfo::DEFAULT, }), @@ -431,6 +434,7 @@ impl Primitives for AdminSpace { #[cfg(feature = "unstable")] attachment: query.ext_attachment.map(Into::into), }), + eid: self.queryable_id, }; for (key, handler) in &self.handlers { diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 7061b38622..8b116b1080 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -30,6 +30,7 @@ use async_std::task::JoinHandle; use futures::stream::StreamExt; use futures::Future; use std::any::Any; +use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::Arc; use stop_token::future::FutureExt; use stop_token::{StopSource, TimedOutError}; @@ -48,6 +49,7 @@ use zenoh_transport::{ struct RuntimeState { zid: ZenohId, whatami: WhatAmI, + next_id: AtomicU32, metadata: serde_json::Value, router: Arc, config: Notifier, @@ -114,6 +116,7 @@ impl Runtime { state: Arc::new(RuntimeState { zid, whatami, + next_id: AtomicU32::new(1), // 0 is reserved for routing core metadata, router, config: config.clone(), @@ -154,6 +157,11 @@ impl Runtime { zwrite!(self.state.transport_handlers).push(handler); } + #[inline] + pub fn next_id(&self) -> u32 { + self.state.next_id.fetch_add(1, Ordering::SeqCst) + } + pub async fn close(&self) -> ZResult<()> { log::trace!("Runtime::close())"); drop(self.state.stop_source.write().unwrap().take()); diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 80a9dd458a..4560eefaae 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -66,6 +66,7 @@ fn base_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face.upgrade().unwrap(), + 0, &WireExpr::from(1).with_suffix("four/five"), &sub_info, NodeId::default(), @@ -166,6 +167,76 @@ fn match_test() { } } +#[test] +fn multisub_test() { + let config = Config::default(); + let router = Router::new( + ZenohId::try_from([1]).unwrap(), + WhatAmI::Client, + Some(Arc::new(HLC::default())), + &config, + ) + .unwrap(); + let tables = router.tables.clone(); + + let primitives = Arc::new(DummyPrimitives {}); + let face0 = Arc::downgrade(&router.new_primitives(primitives).state); + assert!(face0.upgrade().is_some()); + + // -------------- + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, + mode: Mode::Push, + }; + declare_subscription( + zlock!(tables.ctrl_lock).as_ref(), + &tables, + &mut face0.upgrade().unwrap(), + 0, + &"sub".into(), + &sub_info, + NodeId::default(), + ); + let optres = Resource::get_resource(zread!(tables.tables)._get_root(), "sub") + .map(|res| Arc::downgrade(&res)); + assert!(optres.is_some()); + let res = optres.unwrap(); + assert!(res.upgrade().is_some()); + + declare_subscription( + zlock!(tables.ctrl_lock).as_ref(), + &tables, + &mut face0.upgrade().unwrap(), + 1, + &"sub".into(), + &sub_info, + NodeId::default(), + ); + assert!(res.upgrade().is_some()); + + undeclare_subscription( + zlock!(tables.ctrl_lock).as_ref(), + &tables, + &mut face0.upgrade().unwrap(), + 0, + &WireExpr::empty(), + NodeId::default(), + ); + assert!(res.upgrade().is_some()); + + undeclare_subscription( + zlock!(tables.ctrl_lock).as_ref(), + &tables, + &mut face0.upgrade().unwrap(), + 1, + &WireExpr::empty(), + NodeId::default(), + ); + assert!(res.upgrade().is_none()); + + tables::close_face(&tables, &face0); +} + #[test] fn clean_test() { let config = Config::default(); @@ -241,6 +312,7 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 0, &"todrop1/todrop11".into(), &sub_info, NodeId::default(), @@ -255,6 +327,7 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 1, &WireExpr::from(1).with_suffix("/todrop12"), &sub_info, NodeId::default(), @@ -270,7 +343,8 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - &WireExpr::from(1).with_suffix("/todrop12"), + 1, + &WireExpr::empty(), NodeId::default(), ); @@ -284,7 +358,8 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - &"todrop1/todrop11".into(), + 0, + &WireExpr::empty(), NodeId::default(), ); assert!(res1.upgrade().is_some()); @@ -302,6 +377,7 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 2, &"todrop3".into(), &sub_info, NodeId::default(), @@ -316,7 +392,8 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - &"todrop3".into(), + 2, + &WireExpr::empty(), NodeId::default(), ); assert!(res1.upgrade().is_some()); @@ -331,6 +408,7 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 3, &"todrop5".into(), &sub_info, NodeId::default(), @@ -339,6 +417,7 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 4, &"todrop6".into(), &sub_info, NodeId::default(), @@ -518,6 +597,7 @@ fn client_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 0, &WireExpr::from(11).with_suffix("/**"), &sub_info, NodeId::default(), @@ -565,6 +645,7 @@ fn client_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face1.upgrade().unwrap(), + 0, &WireExpr::from(21).with_suffix("/**"), &sub_info, NodeId::default(), @@ -612,6 +693,7 @@ fn client_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face2.upgrade().unwrap(), + 0, &WireExpr::from(31).with_suffix("/**"), &sub_info, NodeId::default(), diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 59a4bbd96e..177906e9b1 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -31,7 +31,10 @@ pub(crate) mod common { writer::HasWriter, }; pub use zenoh_core::Resolve; + pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; + #[zenoh_macros::unstable] + pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; pub use crate::config::{self, Config, ValidatedMap}; pub use crate::handlers::IntoCallbackReceiverPair; @@ -49,6 +52,8 @@ pub(crate) mod common { pub use crate::sample::Locality; #[cfg(not(feature = "unstable"))] pub(crate) use crate::sample::Locality; + #[zenoh_macros::unstable] + pub use crate::sample::SourceInfo; pub use crate::sample::{Sample, SampleKind}; pub use crate::publication::Priority; diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 9fb4bdf6c3..2a1a58ebd9 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -13,14 +13,11 @@ // //! Publishing primitives. -use crate::encoding::Encoding; -use crate::key_expr::KeyExpr; use crate::net::primitives::Primitives; -use crate::payload::Payload; +use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; use crate::sample::{DataInfo, QoS, Sample, SampleKind}; -use crate::Locality; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] @@ -30,10 +27,11 @@ use crate::{ }; use std::future::Ready; use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; -use zenoh_keyexpr::keyexpr; use zenoh_protocol::network::push::ext; use zenoh_protocol::network::Mapping; use zenoh_protocol::network::Push; +#[zenoh_macros::unstable] +use zenoh_protocol::zenoh::ext::SourceInfoType; use zenoh_protocol::zenoh::Del; use zenoh_protocol::zenoh::PushBody; use zenoh_protocol::zenoh::Put; @@ -148,6 +146,8 @@ impl SyncResolve for PutBuilder<'_, '_> { let publisher = Publisher { session, + #[cfg(feature = "unstable")] + eid: 0, // This is a one shot Publisher key_expr: key_expr?, congestion_control, priority, @@ -160,6 +160,8 @@ impl SyncResolve for PutBuilder<'_, '_> { self.kind, self.encoding, #[cfg(feature = "unstable")] + None, + #[cfg(feature = "unstable")] self.attachment, ) } @@ -241,6 +243,8 @@ impl std::fmt::Debug for PublisherRef<'_> { #[derive(Debug, Clone)] pub struct Publisher<'a> { pub(crate) session: SessionRef<'a>, + #[cfg(feature = "unstable")] + pub(crate) eid: EntityId, pub(crate) key_expr: KeyExpr<'a>, pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, @@ -248,6 +252,29 @@ pub struct Publisher<'a> { } impl<'a> Publisher<'a> { + /// Returns the [`EntityGlobalId`] of this Publisher. + /// + /// # Examples + /// ``` + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let publisher = session.declare_publisher("key/expression") + /// .res() + /// .await + /// .unwrap(); + /// let publisher_id = publisher.id(); + /// # }) + /// ``` + #[zenoh_macros::unstable] + pub fn id(&self) -> EntityGlobalId { + EntityGlobalId { + zid: self.session.zid(), + eid: self.eid, + } + } + pub fn key_expr(&self) -> &KeyExpr<'a> { &self.key_expr } @@ -317,6 +344,8 @@ impl<'a> Publisher<'a> { kind, encoding: Encoding::ZENOH_BYTES, #[cfg(feature = "unstable")] + source_info: None, + #[cfg(feature = "unstable")] attachment: None, } } @@ -604,6 +633,8 @@ pub struct Publication<'a> { kind: SampleKind, encoding: Encoding, #[cfg(feature = "unstable")] + pub(crate) source_info: Option, + #[cfg(feature = "unstable")] pub(crate) attachment: Option, } @@ -618,6 +649,27 @@ impl<'a> Publication<'a> { self.attachment = Some(attachment); self } + + /// Send data with the given [`SourceInfo`]. + /// + /// # Examples + /// ``` + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// publisher.put("Value").with_source_info(SourceInfo { + /// source_id: Some(publisher.id()), + /// source_sn: Some(0), + /// }).res().await.unwrap(); + /// # }) + /// ``` + #[zenoh_macros::unstable] + pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { + self.source_info = Some(source_info); + self + } } impl Resolvable for Publication<'_> { @@ -632,6 +684,8 @@ impl SyncResolve for Publication<'_> { self.kind, self.encoding, #[cfg(feature = "unstable")] + self.source_info, + #[cfg(feature = "unstable")] self.attachment, ) } @@ -661,6 +715,8 @@ impl<'a> Sink for Publisher<'a> { kind: item.kind, encoding: item.encoding, #[cfg(feature = "unstable")] + source_info: None, + #[cfg(feature = "unstable")] attachment: item.attachment, } .res_sync() @@ -784,8 +840,12 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { self.session .declare_publication_intent(key_expr.clone()) .res_sync()?; + #[cfg(feature = "unstable")] + let eid = self.session.runtime.next_id(); let publisher = Publisher { session: self.session, + #[cfg(feature = "unstable")] + eid, key_expr, congestion_control: self.congestion_control, priority: self.priority, @@ -809,6 +869,7 @@ fn resolve_put( payload: Payload, kind: SampleKind, encoding: Encoding, + #[cfg(feature = "unstable")] source_info: Option, #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { log::trace!("write({:?}, [...])", &publisher.key_expr); @@ -842,6 +903,12 @@ fn resolve_put( PushBody::Put(Put { timestamp, encoding: encoding.clone().into(), + #[cfg(feature = "unstable")] + ext_sinfo: source_info.map(|s| SourceInfoType { + id: s.source_id.unwrap_or_default(), + sn: s.source_sn.unwrap_or_default() as u32, + }), + #[cfg(not(feature = "unstable"))] ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -861,6 +928,12 @@ fn resolve_put( } PushBody::Del(Del { timestamp, + #[cfg(feature = "unstable")] + ext_sinfo: source_info.map(|s| SourceInfoType { + id: s.source_id.unwrap_or_default(), + sn: s.source_sn.unwrap_or_default() as u32, + }), + #[cfg(not(feature = "unstable"))] ext_sinfo: None, ext_attachment, ext_unknown: vec![], diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 6bd78d4fc7..bd5ec81101 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -17,7 +17,6 @@ use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::DataInfo; use crate::Id; use crate::SessionRef; use crate::Undeclarable; @@ -28,11 +27,9 @@ use std::future::Ready; use std::ops::Deref; use std::sync::Arc; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -use zenoh_protocol::{ - core::WireExpr, - network::{response, Mapping, RequestId, Response, ResponseFinal}, - zenoh::{self, ext::ValueType, reply::ReplyBody, Del, Put, ResponseBody}, -}; +use zenoh_protocol::core::{EntityId, WireExpr}; +use zenoh_protocol::network::{response, Mapping, RequestId, Response, ResponseFinal}; +use zenoh_protocol::zenoh::{self, ext::ValueType, reply::ReplyBody, Del, Put, ResponseBody}; use zenoh_result::ZResult; pub(crate) struct QueryInner { @@ -64,6 +61,7 @@ impl Drop for QueryInner { #[derive(Clone)] pub struct Query { pub(crate) inner: Arc, + pub(crate) eid: EntityId, } impl Query { @@ -192,22 +190,12 @@ impl SyncResolve for ReplyBuilder<'_> { kind, encoding, timestamp, - qos, #[cfg(feature = "unstable")] source_info, #[cfg(feature = "unstable")] attachment, + .. } = sample; - #[allow(unused_mut)] - let mut data_info = DataInfo { - kind, - encoding: Some(encoding), - timestamp, - qos, - source_id: None, - source_sn: None, - }; - // Use a macro for inferring the proper const extension ID between Put and Del cases macro_rules! ext_attachment { () => {{ @@ -222,21 +210,17 @@ impl SyncResolve for ReplyBuilder<'_> { ext_attachment }}; } - + #[allow(unused_mut)] + let mut ext_sinfo = None; #[cfg(feature = "unstable")] { - data_info.source_id = source_info.source_id; - data_info.source_sn = source_info.source_sn; + if source_info.source_id.is_some() || source_info.source_sn.is_some() { + ext_sinfo = Some(zenoh::put::ext::SourceInfoType { + id: source_info.source_id.unwrap_or_default(), + sn: source_info.source_sn.unwrap_or_default() as u32, + }) + } } - let ext_sinfo = if data_info.source_id.is_some() || data_info.source_sn.is_some() { - Some(zenoh::put::ext::SourceInfoType { - zid: data_info.source_id.unwrap_or_default(), - eid: 0, // @TODO use proper EntityId (#703) - sn: data_info.source_sn.unwrap_or_default() as u32, - }) - } else { - None - }; self.query.inner.primitives.send_response(Response { rid: self.query.inner.qid, wire_expr: WireExpr { @@ -249,8 +233,8 @@ impl SyncResolve for ReplyBuilder<'_> { ext_unknown: vec![], payload: match kind { SampleKind::Put => ReplyBody::Put(Put { - timestamp: data_info.timestamp, - encoding: data_info.encoding.unwrap_or_default().into(), + timestamp, + encoding: encoding.into(), ext_sinfo, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -270,7 +254,7 @@ impl SyncResolve for ReplyBuilder<'_> { ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { zid: self.query.inner.zid, - eid: 0, // @TODO use proper EntityId (#703) + eid: self.query.eid, }), }); Ok(()) @@ -300,7 +284,7 @@ impl SyncResolve for ReplyBuilder<'_> { ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { zid: self.query.inner.zid, - eid: 0, // @TODO use proper EntityId (#703) + eid: self.query.eid, }), }); Ok(()) @@ -607,6 +591,29 @@ pub struct Queryable<'a, Receiver> { } impl<'a, Receiver> Queryable<'a, Receiver> { + /// Returns the [`EntityGlobalId`] of this Queryable. + /// + /// # Examples + /// ``` + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let queryable = session.declare_queryable("key/expression") + /// .res() + /// .await + /// .unwrap(); + /// let queryable_id = queryable.id(); + /// # }) + /// ``` + #[zenoh_macros::unstable] + pub fn id(&self) -> EntityGlobalId { + EntityGlobalId { + zid: self.queryable.session.zid(), + eid: self.queryable.state.id, + } + } + #[inline] pub fn undeclare(self) -> impl Resolve> + 'a { Undeclarable::undeclare_inner(self, ()) diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 543dd62e84..af4a58956d 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -15,16 +15,16 @@ //! Sample primitives use crate::encoding::Encoding; use crate::payload::Payload; -use crate::prelude::{KeyExpr, ZenohId}; +use crate::prelude::{KeyExpr, Value}; use crate::time::{new_reception_timestamp, Timestamp}; use crate::Priority; -use crate::Value; #[zenoh_macros::unstable] use serde::Serialize; use std::{ convert::{TryFrom, TryInto}, fmt, }; +use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::{core::CongestionControl, network::push::ext::QoSType}; pub type SourceSn = u64; @@ -52,7 +52,7 @@ pub(crate) struct DataInfo { pub kind: SampleKind, pub encoding: Option, pub timestamp: Option, - pub source_id: Option, + pub source_id: Option, pub source_sn: Option, pub qos: QoS, } @@ -61,16 +61,24 @@ pub(crate) struct DataInfo { #[zenoh_macros::unstable] #[derive(Debug, Clone)] pub struct SourceInfo { - /// The [`ZenohId`] of the zenoh instance that published the concerned [`Sample`]. - pub source_id: Option, + /// The [`EntityGlobalId`] of the zenoh entity that published the concerned [`Sample`]. + pub source_id: Option, /// The sequence number of the [`Sample`] from the source. pub source_sn: Option, } #[test] #[cfg(feature = "unstable")] +#[cfg(not(all(target_os = "macos", target_arch = "aarch64")))] fn source_info_stack_size() { - assert_eq!(std::mem::size_of::(), 16 * 2); + assert_eq!(std::mem::size_of::(), 40); +} + +#[test] +#[cfg(feature = "unstable")] +#[cfg(all(target_os = "macos", target_arch = "aarch64"))] +fn source_info_stack_size() { + assert_eq!(std::mem::size_of::(), 48); } #[zenoh_macros::unstable] diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 87c416c209..861acf71de 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -48,7 +48,7 @@ use std::convert::TryFrom; use std::convert::TryInto; use std::fmt; use std::ops::Deref; -use std::sync::atomic::{AtomicU16, AtomicUsize, Ordering}; +use std::sync::atomic::{AtomicU16, Ordering}; use std::sync::Arc; use std::sync::RwLock; use std::time::Duration; @@ -57,6 +57,8 @@ use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; +#[cfg(feature = "unstable")] +use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::network::AtomicRequestId; use zenoh_protocol::network::RequestId; use zenoh_protocol::zenoh::reply::ReplyBody; @@ -97,9 +99,10 @@ pub(crate) struct SessionState { pub(crate) primitives: Option>, // @TODO replace with MaybeUninit ?? pub(crate) expr_id_counter: AtomicExprId, // @TODO: manage rollover and uniqueness pub(crate) qid_counter: AtomicRequestId, - pub(crate) decl_id_counter: AtomicUsize, pub(crate) local_resources: HashMap, pub(crate) remote_resources: HashMap, + #[cfg(feature = "unstable")] + pub(crate) remote_subscribers: HashMap>, //pub(crate) publications: Vec, pub(crate) subscribers: HashMap>, pub(crate) queryables: HashMap>, @@ -121,9 +124,10 @@ impl SessionState { primitives: None, expr_id_counter: AtomicExprId::new(1), // Note: start at 1 because 0 is reserved for NO_RESOURCE qid_counter: AtomicRequestId::new(0), - decl_id_counter: AtomicUsize::new(0), local_resources: HashMap::new(), remote_resources: HashMap::new(), + #[cfg(feature = "unstable")] + remote_subscribers: HashMap::new(), //publications: Vec::new(), subscribers: HashMap::new(), queryables: HashMap::new(), @@ -967,19 +971,20 @@ impl Session { ) -> ZResult> { let mut state = zwrite!(self.state); log::trace!("subscribe({:?})", key_expr); - let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); + let id = self.runtime.next_id(); let key_expr = match scope { Some(scope) => scope / key_expr, None => key_expr.clone(), }; - let sub_state = Arc::new(SubscriberState { + let mut sub_state = SubscriberState { id, + remote_id: id, key_expr: key_expr.clone().into_owned(), scope: scope.clone().map(|e| e.into_owned()), origin, callback, - }); + }; #[cfg(not(feature = "unstable"))] let declared_sub = origin != Locality::SessionLocal; @@ -989,29 +994,39 @@ impl Session { .as_str() .starts_with(crate::liveliness::PREFIX_LIVELINESS); - let declared_sub = declared_sub - .then(|| { - match state - .aggregated_subscribers // TODO: can this be an OwnedKeyExpr? - .iter() - .find(|s| s.includes( &key_expr)) - { - Some(join_sub) => { - let joined_sub = state.subscribers.values().any(|s| { - s.origin != Locality::SessionLocal && join_sub.includes(&s.key_expr) - }); - (!joined_sub).then(|| join_sub.clone().into()) - } - None => { - let twin_sub = state - .subscribers - .values() - .any(|s| s.origin != Locality::SessionLocal && s.key_expr == key_expr); - (!twin_sub).then(|| key_expr.clone()) + let declared_sub = + declared_sub + .then(|| { + match state + .aggregated_subscribers + .iter() + .find(|s| s.includes(&key_expr)) + { + Some(join_sub) => { + if let Some(joined_sub) = state.subscribers.values().find(|s| { + s.origin != Locality::SessionLocal && join_sub.includes(&s.key_expr) + }) { + sub_state.remote_id = joined_sub.remote_id; + None + } else { + Some(join_sub.clone().into()) + } + } + None => { + if let Some(twin_sub) = state.subscribers.values().find(|s| { + s.origin != Locality::SessionLocal && s.key_expr == key_expr + }) { + sub_state.remote_id = twin_sub.remote_id; + None + } else { + Some(key_expr.clone()) + } + } } - } - }) - .flatten(); + }) + .flatten(); + + let sub_state = Arc::new(sub_state); state.subscribers.insert(sub_state.id, sub_state.clone()); for res in state @@ -1064,7 +1079,7 @@ impl Session { ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: id as u32, + id, wire_expr: key_expr.to_wire(self).to_owned(), ext_info: *info, }), @@ -1080,7 +1095,7 @@ impl Session { Ok(sub_state) } - pub(crate) fn unsubscribe(&self, sid: usize) -> ZResult<()> { + pub(crate) fn unsubscribe(&self, sid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(sub_state) = state.subscribers.remove(&sid) { trace!("unsubscribe({:?})", sub_state); @@ -1110,65 +1125,28 @@ impl Session { if send_forget { // Note: there might be several Subscribers on the same KeyExpr. // Before calling forget_subscriber(key_expr), check if this was the last one. - let key_expr = &sub_state.key_expr; - match state - .aggregated_subscribers - .iter() - .find(|s| s.includes(key_expr)) - { - Some(join_sub) => { - let joined_sub = state.subscribers.values().any(|s| { - s.origin != Locality::SessionLocal && join_sub.includes(&s.key_expr) - }); - if !joined_sub { - let primitives = state.primitives.as_ref().unwrap().clone(); - let wire_expr = WireExpr::from(join_sub).to_owned(); - drop(state); - primitives.send_declare(Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }); - - #[cfg(feature = "unstable")] - { - let state = zread!(self.state); - self.update_status_down(&state, &sub_state.key_expr) - } - } - } - None => { - let twin_sub = state - .subscribers - .values() - .any(|s| s.origin != Locality::SessionLocal && s.key_expr == *key_expr); - if !twin_sub { - let primitives = state.primitives.as_ref().unwrap().clone(); - drop(state); - primitives.send_declare(Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { - wire_expr: key_expr.to_wire(self).to_owned(), - }, - }), - }); - - #[cfg(feature = "unstable")] - { - let state = zread!(self.state); - self.update_status_down(&state, &sub_state.key_expr) - } - } + if !state.subscribers.values().any(|s| { + s.origin != Locality::SessionLocal && s.remote_id == sub_state.remote_id + }) { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + primitives.send_declare(Declare { + ext_qos: declare::ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: sub_state.remote_id, + ext_wire_expr: WireExprType { + wire_expr: WireExpr::empty(), + }, + }), + }); + #[cfg(feature = "unstable")] + { + let state = zread!(self.state); + self.update_status_down(&state, &sub_state.key_expr) } - }; + } } Ok(()) } else { @@ -1185,7 +1163,7 @@ impl Session { ) -> ZResult> { let mut state = zwrite!(self.state); log::trace!("queryable({:?})", key_expr); - let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); + let id = self.runtime.next_id(); let qable_state = Arc::new(QueryableState { id, key_expr: key_expr.to_owned(), @@ -1193,158 +1171,48 @@ impl Session { origin, callback, }); - #[cfg(feature = "complete_n")] - { - state.queryables.insert(id, qable_state.clone()); - if origin != Locality::SessionLocal && complete { - let primitives = state.primitives.as_ref().unwrap().clone(); - let complete = Session::complete_twin_qabls(&state, key_expr); - drop(state); - let qabl_info = QueryableInfo { - complete, - distance: 0, - }; - primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: id as u32, - wire_expr: key_expr.to_owned(), - ext_info: qabl_info, - }), - }); - } - } - #[cfg(not(feature = "complete_n"))] - { - let twin_qabl = Session::twin_qabl(&state, key_expr); - let complete_twin_qabl = twin_qabl && Session::complete_twin_qabl(&state, key_expr); - - state.queryables.insert(id, qable_state.clone()); + state.queryables.insert(id, qable_state.clone()); - if origin != Locality::SessionLocal && (!twin_qabl || (!complete_twin_qabl && complete)) - { - let primitives = state.primitives.as_ref().unwrap().clone(); - let complete = u8::from(!complete_twin_qabl && complete); - drop(state); - let qabl_info = QueryableInfo { - complete, - distance: 0, - }; - primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: id as u32, - wire_expr: key_expr.to_owned(), - ext_info: qabl_info, - }), - }); - } + if origin != Locality::SessionLocal { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + let qabl_info = QueryableInfo { + complete: if complete { 1 } else { 0 }, + distance: 0, + }; + primitives.send_declare(Declare { + ext_qos: declare::ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr.to_owned(), + ext_info: qabl_info, + }), + }); } Ok(qable_state) } - pub(crate) fn twin_qabl(state: &SessionState, key: &WireExpr) -> bool { - state.queryables.values().any(|q| { - q.origin != Locality::SessionLocal - && state.local_wireexpr_to_expr(&q.key_expr).unwrap() - == state.local_wireexpr_to_expr(key).unwrap() - }) - } - - #[cfg(not(feature = "complete_n"))] - pub(crate) fn complete_twin_qabl(state: &SessionState, key: &WireExpr) -> bool { - state.queryables.values().any(|q| { - q.origin != Locality::SessionLocal - && q.complete - && state.local_wireexpr_to_expr(&q.key_expr).unwrap() - == state.local_wireexpr_to_expr(key).unwrap() - }) - } - - #[cfg(feature = "complete_n")] - pub(crate) fn complete_twin_qabls(state: &SessionState, key: &WireExpr) -> u8 { - state - .queryables - .values() - .filter(|q| { - q.origin != Locality::SessionLocal - && q.complete - && state.local_wireexpr_to_expr(&q.key_expr).unwrap() - == state.local_wireexpr_to_expr(key).unwrap() - }) - .count() as u8 - } - - pub(crate) fn close_queryable(&self, qid: usize) -> ZResult<()> { + pub(crate) fn close_queryable(&self, qid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(qable_state) = state.queryables.remove(&qid) { trace!("close_queryable({:?})", qable_state); if qable_state.origin != Locality::SessionLocal { let primitives = state.primitives.as_ref().unwrap().clone(); - if Session::twin_qabl(&state, &qable_state.key_expr) { - // There still exist Queryables on the same KeyExpr. - if qable_state.complete { - #[cfg(feature = "complete_n")] - { - let complete = - Session::complete_twin_qabls(&state, &qable_state.key_expr); - drop(state); - let qabl_info = QueryableInfo { - complete, - distance: 0, - }; - primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: qable_state.key_expr.clone(), - ext_info: qabl_info, - }), - }); - } - #[cfg(not(feature = "complete_n"))] - { - if !Session::complete_twin_qabl(&state, &qable_state.key_expr) { - drop(state); - let qabl_info = QueryableInfo { - complete: 0, - distance: 0, - }; - primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: qable_state.key_expr.clone(), - ext_info: qabl_info, - }), - }); - } - } - } - } else { - // There are no more Queryables on the same KeyExpr. - drop(state); - primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { - wire_expr: qable_state.key_expr.clone(), - }, - }), - }); - } + drop(state); + primitives.send_declare(Declare { + ext_qos: declare::ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: qable_state.id, + ext_wire_expr: WireExprType { + wire_expr: qable_state.key_expr.clone(), + }, + }), + }); } Ok(()) } else { @@ -1359,7 +1227,7 @@ impl Session { ) -> ZResult> { let mut state = zwrite!(self.state); log::trace!("declare_liveliness({:?})", key_expr); - let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); + let id = self.runtime.next_id(); let key_expr = KeyExpr::from(*crate::liveliness::KE_PREFIX_LIVELINESS / key_expr); let tok_state = Arc::new(LivelinessTokenState { id, @@ -1374,7 +1242,7 @@ impl Session { ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: id as u32, + id, wire_expr: key_expr.to_wire(self).to_owned(), ext_info: SubscriberInfo::DEFAULT, }), @@ -1383,7 +1251,7 @@ impl Session { } #[zenoh_macros::unstable] - pub(crate) fn undeclare_liveliness(&self, tid: usize) -> ZResult<()> { + pub(crate) fn undeclare_liveliness(&self, tid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(tok_state) = state.tokens.remove(&tid) { trace!("undeclare_liveliness({:?})", tok_state); @@ -1398,10 +1266,8 @@ impl Session { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { - wire_expr: key_expr.to_wire(self).to_owned(), - }, + id: tok_state.id, + ext_wire_expr: WireExprType::null(), }), }); } @@ -1418,8 +1284,7 @@ impl Session { callback: Callback<'static, MatchingStatus>, ) -> ZResult> { let mut state = zwrite!(self.state); - - let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); + let id = self.runtime.next_id(); log::trace!("matches_listener({:?}) => {id}", publisher.key_expr); let listener_state = Arc::new(MatchingListenerState { id, @@ -1554,7 +1419,7 @@ impl Session { } #[zenoh_macros::unstable] - pub(crate) fn undeclare_matches_listener_inner(&self, sid: usize) -> ZResult<()> { + pub(crate) fn undeclare_matches_listener_inner(&self, sid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(state) = state.matching_listeners.remove(&sid) { trace!("undeclare_matches_listener_inner({:?})", state); @@ -1856,15 +1721,15 @@ impl Session { body: Option, #[cfg(feature = "unstable")] attachment: Option, ) { - let (primitives, key_expr, callbacks) = { + let (primitives, key_expr, queryables) = { let state = zread!(self.state); match state.wireexpr_to_keyexpr(key_expr, local) { Ok(key_expr) => { - let callbacks = state + let queryables = state .queryables - .values() + .iter() .filter( - |queryable| + |(_, queryable)| (queryable.origin == Locality::Any || (local == (queryable.origin == Locality::SessionLocal))) && @@ -1881,12 +1746,12 @@ impl Session { } } ) - .map(|qable| qable.callback.clone()) - .collect::>>(); + .map(|(id, qable)| (*id, qable.callback.clone())) + .collect::)>>(); ( state.primitives.as_ref().unwrap().clone(), key_expr.into_owned(), - callbacks, + queryables, ) } Err(err) => { @@ -1898,29 +1763,30 @@ impl Session { let parameters = parameters.to_owned(); - let zid = self.runtime.zid(); // @TODO build/use prebuilt specific zid + let zid = self.runtime.zid(); - let query = Query { - inner: Arc::new(QueryInner { - key_expr, - parameters, - value: body.map(|b| Value { - payload: b.payload.into(), - encoding: b.encoding.into(), - }), - qid, - zid, - primitives: if local { - Arc::new(self.clone()) - } else { - primitives - }, - #[cfg(feature = "unstable")] - attachment, + let query_inner = Arc::new(QueryInner { + key_expr, + parameters, + value: body.map(|b| Value { + payload: b.payload.into(), + encoding: b.encoding.into(), }), - }; - for callback in callbacks.iter() { - callback(query.clone()); + qid, + zid, + primitives: if local { + Arc::new(self.clone()) + } else { + primitives + }, + #[cfg(feature = "unstable")] + attachment, + }); + for (eid, callback) in queryables { + callback(Query { + inner: query_inner.clone(), + eid, + }); } } } @@ -2111,9 +1977,13 @@ impl Primitives for Session { trace!("recv DeclareSubscriber {} {:?}", m.id, m.wire_expr); #[cfg(feature = "unstable")] { - let state = zread!(self.state); - match state.wireexpr_to_keyexpr(&m.wire_expr, false) { + let mut state = zwrite!(self.state); + match state + .wireexpr_to_keyexpr(&m.wire_expr, false) + .map(|e| e.into_owned()) + { Ok(expr) => { + state.remote_subscribers.insert(m.id, expr.clone()); self.update_status_up(&state, &expr); if expr @@ -2141,33 +2011,30 @@ impl Primitives for Session { trace!("recv UndeclareSubscriber {:?}", m.id); #[cfg(feature = "unstable")] { - let state = zread!(self.state); - match state.wireexpr_to_keyexpr(&m.ext_wire_expr.wire_expr, false) { - Ok(expr) => { - self.update_status_down(&state, &expr); + let mut state = zwrite!(self.state); + if let Some(expr) = state.remote_subscribers.remove(&m.id) { + self.update_status_down(&state, &expr); - if expr - .as_str() - .starts_with(crate::liveliness::PREFIX_LIVELINESS) - { - drop(state); - let data_info = DataInfo { - kind: SampleKind::Delete, - ..Default::default() - }; - self.handle_data( - false, - &m.ext_wire_expr.wire_expr, - Some(data_info), - ZBuf::default(), - #[cfg(feature = "unstable")] - None, - ); - } - } - Err(err) => { - log::error!("Received Forget Subscriber for unkown key_expr: {}", err) + if expr + .as_str() + .starts_with(crate::liveliness::PREFIX_LIVELINESS) + { + drop(state); + let data_info = DataInfo { + kind: SampleKind::Delete, + ..Default::default() + }; + self.handle_data( + false, + &m.ext_wire_expr.wire_expr, + Some(data_info), + ZBuf::default(), + #[cfg(feature = "unstable")] + None, + ); } + } else { + log::error!("Received Undeclare Subscriber for unkown id: {}", m.id); } } } @@ -2194,7 +2061,7 @@ impl Primitives for Session { encoding: Some(m.encoding.into()), timestamp: m.timestamp, qos: QoS::from(msg.ext_qos), - source_id: m.ext_sinfo.as_ref().map(|i| i.zid), + source_id: m.ext_sinfo.as_ref().map(|i| i.id.clone()), source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; self.handle_data( @@ -2212,7 +2079,7 @@ impl Primitives for Session { encoding: None, timestamp: m.timestamp, qos: QoS::from(msg.ext_qos), - source_id: m.ext_sinfo.as_ref().map(|i| i.zid), + source_id: m.ext_sinfo.as_ref().map(|i| i.id.clone()), source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; self.handle_data( @@ -2272,7 +2139,7 @@ impl Primitives for Session { }, }; let replier_id = match e.ext_sinfo { - Some(info) => info.zid, + Some(info) => info.id.zid, None => ZenohId::rand(), }; let new_reply = Reply { @@ -2366,7 +2233,7 @@ impl Primitives for Session { encoding: Some(encoding.into()), timestamp, qos: QoS::from(msg.ext_qos), - source_id: ext_sinfo.as_ref().map(|i| i.zid), + source_id: ext_sinfo.as_ref().map(|i| i.id.clone()), source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), }, #[cfg(feature = "unstable")] @@ -2384,7 +2251,7 @@ impl Primitives for Session { encoding: None, timestamp, qos: QoS::from(msg.ext_qos), - source_id: ext_sinfo.as_ref().map(|i| i.zid), + source_id: ext_sinfo.as_ref().map(|i| i.id.clone()), source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), }, #[cfg(feature = "unstable")] diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index c707218017..e276d0c6d0 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -25,6 +25,8 @@ use std::future::Ready; use std::ops::{Deref, DerefMut}; use std::sync::Arc; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +#[cfg(feature = "unstable")] +use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::network::declare::{subscriber::ext::SubscriberInfo, Mode}; /// The kind of reliability. @@ -32,6 +34,7 @@ pub use zenoh_protocol::core::Reliability; pub(crate) struct SubscriberState { pub(crate) id: Id, + pub(crate) remote_id: Id, pub(crate) key_expr: KeyExpr<'static>, pub(crate) scope: Option>, pub(crate) origin: Locality, @@ -741,6 +744,29 @@ impl<'a, Receiver> PullSubscriber<'a, Receiver> { } impl<'a, Receiver> Subscriber<'a, Receiver> { + /// Returns the [`EntityGlobalId`] of this Subscriber. + /// + /// # Examples + /// ``` + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let subscriber = session.declare_subscriber("key/expression") + /// .res() + /// .await + /// .unwrap(); + /// let subscriber_id = subscriber.id(); + /// # }) + /// ``` + #[zenoh_macros::unstable] + pub fn id(&self) -> EntityGlobalId { + EntityGlobalId { + zid: self.subscriber.session.zid(), + eid: self.subscriber.state.id, + } + } + /// Returns the [`KeyExpr`] this Subscriber subscribes to. pub fn key_expr(&self) -> &KeyExpr<'static> { &self.subscriber.state.key_expr From f12f3382fa38af82cdd8bf75bcbb2bad4eec7f68 Mon Sep 17 00:00:00 2001 From: DenisBiryukov91 <155981813+DenisBiryukov91@users.noreply.github.com> Date: Tue, 12 Mar 2024 19:00:48 +0100 Subject: [PATCH 010/598] refactor Query.reply() (#796) * refactor Query.reply() into seprate methods:reply, reply_del and reply_err * explain #[allow(unused_mut)]; replace unwrap on KeyxExpr.try_from with ? as it was originally for Sample in zenoh/tests/routing.rs * mark Query.reply_sample as unstable * format fix --- examples/examples/z_queryable.rs | 19 +- examples/examples/z_storage.rs | 2 +- plugins/zenoh-backend-traits/Cargo.toml | 2 +- plugins/zenoh-backend-traits/src/lib.rs | 2 +- plugins/zenoh-plugin-example/src/lib.rs | 2 +- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 6 +- .../src/replica/align_queryable.rs | 53 +-- .../src/replica/storage.rs | 6 +- zenoh-ext/src/group.rs | 6 +- zenoh-ext/src/publication_cache.rs | 6 +- zenoh/src/admin.rs | 6 +- zenoh/src/net/runtime/adminspace.rs | 30 +- zenoh/src/queryable.rs | 355 ++++++++++++------ zenoh/src/sample.rs | 6 + zenoh/src/session.rs | 12 +- zenoh/tests/attachments.rs | 4 +- zenoh/tests/routing.rs | 4 +- zenoh/tests/session.rs | 19 +- zenoh/tests/unicity.rs | 16 +- 19 files changed, 340 insertions(+), 216 deletions(-) diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index d7376835b7..12c1fc3f20 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -54,25 +54,28 @@ async fn main() { println!(">> [Queryable ] Received Query '{}' with value '{}'", query.selector(), payload); }, } - let reply = if send_errors.swap(false, Relaxed) { + if send_errors.swap(false, Relaxed) { println!( ">> [Queryable ] Replying (ERROR: '{}')", value, ); - Err(value.clone().into()) + query + .reply_err(value.clone()) + .res() + .await + .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); } else { println!( ">> [Queryable ] Responding ('{}': '{}')", key_expr.as_str(), value, ); - Ok(Sample::new(key_expr.clone(), value.clone())) + query + .reply(key_expr.clone(), value.clone()) + .res() + .await + .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); }; - query - .reply(reply) - .res() - .await - .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); }, _ = stdin.read_exact(&mut input).fuse() => { diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 5e0eaabd44..857181751b 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -67,7 +67,7 @@ async fn main() { println!(">> [Queryable ] Received Query '{}'", query.selector()); for (stored_name, sample) in stored.iter() { if query.selector().key_expr.intersects(unsafe {keyexpr::from_str_unchecked(stored_name)}) { - query.reply(Ok(sample.clone())).res().await.unwrap(); + query.reply(sample.key_expr.clone(), sample.payload.clone()).res().await.unwrap(); } } }, diff --git a/plugins/zenoh-backend-traits/Cargo.toml b/plugins/zenoh-backend-traits/Cargo.toml index f2b8a4a1eb..b3926ab955 100644 --- a/plugins/zenoh-backend-traits/Cargo.toml +++ b/plugins/zenoh-backend-traits/Cargo.toml @@ -31,7 +31,7 @@ async-std = { workspace = true, features = ["default"] } async-trait = { workspace = true } derive_more = { workspace = true } serde_json = { workspace = true } -zenoh = { workspace = true } +zenoh = { workspace = true, features = ["unstable"] } zenoh-result = { workspace = true } zenoh-util = { workspace = true } schemars = { workspace = true } diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 8b9fa359e0..d17e6dfd77 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -325,6 +325,6 @@ impl Query { sample }; // Send reply - self.q.reply(Ok(sample)) + self.q.reply_sample(sample) } } diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 592a08ca9b..12cc6ffa84 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -174,7 +174,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { info!("Handling query '{}'", query.selector()); for (key_expr, sample) in stored.iter() { if query.selector().key_expr.intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { - query.reply(Ok(sample.clone())).res().await.unwrap(); + query.reply_sample(sample.clone()).res().await.unwrap(); } } } diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index c5bdcc4c73..bb76005d6e 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -49,11 +49,7 @@ async fn main() { let receiver = queryable.receiver.clone(); async move { while let Ok(request) = receiver.recv_async().await { - request - .reply(Ok(Sample::new(key, HTML))) - .res() - .await - .unwrap(); + request.reply(key, HTML).res().await.unwrap(); } } }); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 359b8dd7e8..5fda8b576d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -95,34 +95,43 @@ impl AlignQueryable { for value in values { match value { AlignData::Interval(i, c) => { - let sample = Sample::new( - query.key_expr().clone(), - serde_json::to_string(&(i, c)).unwrap(), - ); - query.reply(Ok(sample)).res().await.unwrap(); + query + .reply( + query.key_expr().clone(), + serde_json::to_string(&(i, c)).unwrap(), + ) + .res() + .await + .unwrap(); } AlignData::Subinterval(i, c) => { - let sample = Sample::new( - query.key_expr().clone(), - serde_json::to_string(&(i, c)).unwrap(), - ); - query.reply(Ok(sample)).res().await.unwrap(); + query + .reply( + query.key_expr().clone(), + serde_json::to_string(&(i, c)).unwrap(), + ) + .res() + .await + .unwrap(); } AlignData::Content(i, c) => { - let sample = Sample::new( - query.key_expr().clone(), - serde_json::to_string(&(i, c)).unwrap(), - ); - query.reply(Ok(sample)).res().await.unwrap(); + query + .reply( + query.key_expr().clone(), + serde_json::to_string(&(i, c)).unwrap(), + ) + .res() + .await + .unwrap(); } AlignData::Data(k, (v, ts)) => { - let Value { - payload, encoding, .. - } = v; - let sample = Sample::new(k, payload) - .with_encoding(encoding) - .with_timestamp(ts); - query.reply(Ok(sample)).res().await.unwrap(); + query + .reply(k, v.payload) + .with_encoding(v.encoding) + .with_timestamp(ts) + .res() + .await + .unwrap(); } } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 1ef7e65390..6b48895612 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -532,7 +532,7 @@ impl StorageService { } else { sample }; - if let Err(e) = q.reply(Ok(sample)).res().await { + if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -570,7 +570,7 @@ impl StorageService { } else { sample }; - if let Err(e) = q.reply(Ok(sample)).res().await { + if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -583,7 +583,7 @@ impl StorageService { let err_message = format!("Storage '{}' raised an error on query: {}", self.name, e); log::warn!("{}", err_message); - if let Err(e) = q.reply(Err(err_message.into())).res().await { + if let Err(e) = q.reply_err(err_message).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 9078e61741..75a435e8f4 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -237,11 +237,7 @@ async fn query_handler(z: Arc, state: Arc) { while let Ok(query) = queryable.recv_async().await { log::trace!("Serving query for: {}", &qres); - query - .reply(Ok(Sample::new(qres.clone(), buf.clone()))) - .res() - .await - .unwrap(); + query.reply(qres.clone(), buf.clone()).res().await.unwrap(); } } diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index cd5ed964ad..1c9a286800 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -201,7 +201,7 @@ impl<'a> PublicationCache<'a> { } }, - // on query, reply with cach content + // on query, reply with cache content query = quer_recv.recv_async() => { if let Ok(query) = query { if !query.selector().key_expr.as_str().contains('*') { @@ -212,7 +212,7 @@ impl<'a> PublicationCache<'a> { continue; } } - if let Err(e) = query.reply(Ok(sample.clone())).res_async().await { + if let Err(e) = query.reply_sample(sample.clone()).res_async().await { log::warn!("Error replying to query: {}", e); } } @@ -226,7 +226,7 @@ impl<'a> PublicationCache<'a> { continue; } } - if let Err(e) = query.reply(Ok(sample.clone())).res_async().await { + if let Err(e) = query.reply_sample(sample.clone()).res_async().await { log::warn!("Error replying to query: {}", e); } } diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 5a242d51b7..268997d687 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -17,7 +17,7 @@ use crate::{ prelude::sync::{KeyExpr, Locality, SampleKind}, queryable::Query, sample::DataInfo, - Payload, Sample, Session, ZResult, + Payload, Session, ZResult, }; use async_std::task; use std::{ @@ -71,7 +71,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { if let Ok(value) = serde_json::value::to_value(peer.clone()) { match Payload::try_from(value) { Ok(zbuf) => { - let _ = query.reply(Ok(Sample::new(key_expr, zbuf))).res_sync(); + let _ = query.reply(key_expr, zbuf).res_sync(); } Err(e) => log::debug!("Admin query error: {}", e), } @@ -88,7 +88,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { if let Ok(value) = serde_json::value::to_value(link) { match Payload::try_from(value) { Ok(zbuf) => { - let _ = query.reply(Ok(Sample::new(key_expr, zbuf))).res_sync(); + let _ = query.reply(key_expr, zbuf).res_sync(); } Err(e) => log::debug!("Admin query error: {}", e), } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index e76475f447..b67692e704 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -17,7 +17,7 @@ use crate::key_expr::KeyExpr; use crate::net::primitives::Primitives; use crate::payload::Payload; use crate::plugins::sealed::{self as plugins}; -use crate::prelude::sync::{Sample, SyncResolve}; +use crate::prelude::sync::SyncResolve; use crate::queryable::Query; use crate::queryable::QueryInner; use crate::value::Value; @@ -577,9 +577,8 @@ fn router_data(context: &AdminContext, query: Query) { } }; if let Err(e) = query - .reply(Ok( - Sample::new(reply_key, payload).with_encoding(Encoding::APPLICATION_JSON) - )) + .reply(reply_key, payload) + .with_encoding(Encoding::APPLICATION_JSON) .res_sync() { log::error!("Error sending AdminSpace reply: {:?}", e); @@ -609,7 +608,7 @@ zenoh_build{{version="{}"}} 1 .openmetrics_text(), ); - if let Err(e) = query.reply(Ok(Sample::new(reply_key, metrics))).res() { + if let Err(e) = query.reply(reply_key, metrics).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -622,10 +621,7 @@ fn routers_linkstate_data(context: &AdminContext, query: Query) { let tables = zread!(context.runtime.state.router.tables.tables); if let Err(e) = query - .reply(Ok(Sample::new( - reply_key, - tables.hat_code.info(&tables, WhatAmI::Router), - ))) + .reply(reply_key, tables.hat_code.info(&tables, WhatAmI::Router)) .res() { log::error!("Error sending AdminSpace reply: {:?}", e); @@ -640,10 +636,7 @@ fn peers_linkstate_data(context: &AdminContext, query: Query) { let tables = zread!(context.runtime.state.router.tables.tables); if let Err(e) = query - .reply(Ok(Sample::new( - reply_key, - tables.hat_code.info(&tables, WhatAmI::Peer), - ))) + .reply(reply_key, tables.hat_code.info(&tables, WhatAmI::Peer)) .res() { log::error!("Error sending AdminSpace reply: {:?}", e); @@ -660,7 +653,7 @@ fn subscribers_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(Ok(Sample::new(key, Payload::empty()))).res() { + if let Err(e) = query.reply(key, Payload::empty()).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -677,7 +670,7 @@ fn queryables_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(Ok(Sample::new(key, Payload::empty()))).res() { + if let Err(e) = query.reply(key, Payload::empty()).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -697,7 +690,7 @@ fn plugins_data(context: &AdminContext, query: Query) { let status = serde_json::to_value(status).unwrap(); match Payload::try_from(status) { Ok(zbuf) => { - if let Err(e) = query.reply(Ok(Sample::new(key, zbuf))).res_sync() { + if let Err(e) = query.reply(key, zbuf).res_sync() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -718,8 +711,7 @@ fn plugins_status(context: &AdminContext, query: Query) { with_extended_string(plugin_key, &["/__path__"], |plugin_path_key| { if let Ok(key_expr) = KeyExpr::try_from(plugin_path_key.clone()) { if query.key_expr().intersects(&key_expr) { - if let Err(e) = query.reply(Ok(Sample::new(key_expr, plugin.path()))).res() - { + if let Err(e) = query.reply(key_expr, plugin.path()).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -743,7 +735,7 @@ fn plugins_status(context: &AdminContext, query: Query) { if let Ok(key_expr) = KeyExpr::try_from(response.key) { match Payload::try_from(response.value) { Ok(zbuf) => { - if let Err(e) = query.reply(Ok(Sample::new(key_expr, zbuf))).res_sync() { + if let Err(e) = query.reply(key_expr, zbuf).res_sync() { log::error!("Error sending AdminSpace reply: {:?}", e); } }, diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index bd5ec81101..ed3bd63b6a 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -14,9 +14,12 @@ //! Queryable primitives. +use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; +use crate::sample::QoS; +use crate::sample::SourceInfo; use crate::Id; use crate::SessionRef; use crate::Undeclarable; @@ -26,6 +29,7 @@ use std::fmt; use std::future::Ready; use std::ops::Deref; use std::sync::Arc; +use uhlc::Timestamp; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::{EntityId, WireExpr}; use zenoh_protocol::network::{response, Mapping, RequestId, Response, ResponseFinal}; @@ -96,6 +100,42 @@ impl Query { pub fn attachment(&self) -> Option<&Attachment> { self.inner.attachment.as_ref() } + /// Sends a reply in the form of [`Sample`] to this Query. + /// + /// By default, queries only accept replies whose key expression intersects with the query's. + /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), + /// replying on a disjoint key expression will result in an error when resolving the reply. + /// This api is for internal use only. + #[inline(always)] + #[cfg(feature = "unstable")] + #[doc(hidden)] + pub fn reply_sample(&self, sample: Sample) -> ReplyBuilder<'_> { + let Sample { + key_expr, + payload, + kind, + encoding, + timestamp, + qos, + #[cfg(feature = "unstable")] + source_info, + #[cfg(feature = "unstable")] + attachment, + } = sample; + ReplyBuilder { + query: self, + key_expr, + payload, + kind, + encoding, + timestamp, + qos, + #[cfg(feature = "unstable")] + source_info, + #[cfg(feature = "unstable")] + attachment, + } + } /// Sends a reply to this Query. /// @@ -103,10 +143,64 @@ impl Query { /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), /// replying on a disjoint key expression will result in an error when resolving the reply. #[inline(always)] - pub fn reply(&self, result: Result) -> ReplyBuilder<'_> { + pub fn reply( + &self, + key_expr: IntoKeyExpr, + payload: IntoPayload, + ) -> ReplyBuilder<'_> + where + IntoKeyExpr: Into>, + IntoPayload: Into, + { ReplyBuilder { query: self, - result, + key_expr: key_expr.into(), + payload: payload.into(), + kind: SampleKind::Put, + timestamp: None, + encoding: Encoding::default(), + qos: response::ext::QoSType::RESPONSE.into(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + } + } + /// Sends a error reply to this Query. + /// + #[inline(always)] + pub fn reply_err(&self, value: IntoValue) -> ReplyErrBuilder<'_> + where + IntoValue: Into, + { + ReplyErrBuilder { + query: self, + value: value.into(), + } + } + + /// Sends a delete reply to this Query. + /// + /// By default, queries only accept replies whose key expression intersects with the query's. + /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), + /// replying on a disjoint key expression will result in an error when resolving the reply. + #[inline(always)] + pub fn reply_del(&self, key_expr: IntoKeyExpr) -> ReplyBuilder<'_> + where + IntoKeyExpr: Into>, + { + ReplyBuilder { + query: self, + key_expr: key_expr.into(), + payload: Payload::empty(), + kind: SampleKind::Delete, + timestamp: None, + encoding: Encoding::default(), + qos: response::ext::QoSType::RESPONSE.into(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, } } @@ -149,25 +243,50 @@ impl fmt::Display for Query { } } -/// A builder returned by [`Query::reply()`](Query::reply). +/// A builder returned by [`Query::reply()`](Query::reply) or [`Query::reply()`](Query::reply). #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] pub struct ReplyBuilder<'a> { query: &'a Query, - result: Result, + key_expr: KeyExpr<'static>, + payload: Payload, + kind: SampleKind, + encoding: Encoding, + timestamp: Option, + qos: QoS, + #[cfg(feature = "unstable")] + source_info: SourceInfo, + #[cfg(feature = "unstable")] + attachment: Option, +} + +/// A builder returned by [`Query::reply_err()`](Query::reply_err). +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug)] +pub struct ReplyErrBuilder<'a> { + query: &'a Query, + value: Value, } impl<'a> ReplyBuilder<'a> { - #[allow(clippy::result_large_err)] #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Result { - match &mut self.result { - Ok(sample) => { - sample.attachment = Some(attachment); - Ok(self) - } - Err(_) => Err((self, attachment)), - } + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.attachment = Some(attachment); + self + } + #[zenoh_macros::unstable] + pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { + self.source_info = source_info; + self + } + pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { + self.timestamp = Some(timestamp); + self + } + + pub fn with_encoding(mut self, encoding: Encoding) -> Self { + self.encoding = encoding; + self } } @@ -177,119 +296,65 @@ impl<'a> Resolvable for ReplyBuilder<'a> { impl SyncResolve for ReplyBuilder<'_> { fn res_sync(self) -> ::To { - match self.result { - Ok(sample) => { - if !self.query._accepts_any_replies().unwrap_or(false) - && !self.query.key_expr().intersects(&sample.key_expr) - { - bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.query.key_expr()) - } - let Sample { - key_expr, - payload, - kind, - encoding, - timestamp, - #[cfg(feature = "unstable")] - source_info, - #[cfg(feature = "unstable")] - attachment, - .. - } = sample; - // Use a macro for inferring the proper const extension ID between Put and Del cases - macro_rules! ext_attachment { - () => {{ - #[allow(unused_mut)] - let mut ext_attachment = None; + if !self.query._accepts_any_replies().unwrap_or(false) + && !self.query.key_expr().intersects(&self.key_expr) + { + bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", self.key_expr, self.query.key_expr()) + } + #[allow(unused_mut)] // will be unused if feature = "unstable" is not enabled + let mut ext_sinfo = None; + #[cfg(feature = "unstable")] + { + if self.source_info.source_id.is_some() || self.source_info.source_sn.is_some() { + ext_sinfo = Some(zenoh::put::ext::SourceInfoType { + id: self.source_info.source_id.unwrap_or_default(), + sn: self.source_info.source_sn.unwrap_or_default() as u32, + }) + } + } + self.query.inner.primitives.send_response(Response { + rid: self.query.inner.qid, + wire_expr: WireExpr { + scope: 0, + suffix: std::borrow::Cow::Owned(self.key_expr.into()), + mapping: Mapping::Sender, + }, + payload: ResponseBody::Reply(zenoh::Reply { + consolidation: zenoh::Consolidation::DEFAULT, + ext_unknown: vec![], + payload: match self.kind { + SampleKind::Put => ReplyBody::Put(Put { + timestamp: self.timestamp, + encoding: self.encoding.into(), + ext_sinfo, + #[cfg(feature = "shared-memory")] + ext_shm: None, #[cfg(feature = "unstable")] - { - if let Some(attachment) = attachment { - ext_attachment = Some(attachment.into()); - } - } - ext_attachment - }}; - } - #[allow(unused_mut)] - let mut ext_sinfo = None; - #[cfg(feature = "unstable")] - { - if source_info.source_id.is_some() || source_info.source_sn.is_some() { - ext_sinfo = Some(zenoh::put::ext::SourceInfoType { - id: source_info.source_id.unwrap_or_default(), - sn: source_info.source_sn.unwrap_or_default() as u32, - }) - } - } - self.query.inner.primitives.send_response(Response { - rid: self.query.inner.qid, - wire_expr: WireExpr { - scope: 0, - suffix: std::borrow::Cow::Owned(key_expr.into()), - mapping: Mapping::Sender, - }, - payload: ResponseBody::Reply(zenoh::Reply { - consolidation: zenoh::Consolidation::DEFAULT, + ext_attachment: self.attachment.map(|a| a.into()), + #[cfg(not(feature = "unstable"))] + ext_attachment: None, ext_unknown: vec![], - payload: match kind { - SampleKind::Put => ReplyBody::Put(Put { - timestamp, - encoding: encoding.into(), - ext_sinfo, - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment: ext_attachment!(), - ext_unknown: vec![], - payload: payload.into(), - }), - SampleKind::Delete => ReplyBody::Del(Del { - timestamp, - ext_sinfo, - ext_attachment: ext_attachment!(), - ext_unknown: vec![], - }), - }, + payload: self.payload.into(), }), - ext_qos: response::ext::QoSType::RESPONSE, - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid: self.query.inner.zid, - eid: self.query.eid, - }), - }); - Ok(()) - } - Err(payload) => { - self.query.inner.primitives.send_response(Response { - rid: self.query.inner.qid, - wire_expr: WireExpr { - scope: 0, - suffix: std::borrow::Cow::Owned(self.query.key_expr().as_str().to_owned()), - mapping: Mapping::Sender, - }, - payload: ResponseBody::Err(zenoh::Err { - timestamp: None, - is_infrastructure: false, - ext_sinfo: None, + SampleKind::Delete => ReplyBody::Del(Del { + timestamp: self.timestamp, + ext_sinfo, + #[cfg(feature = "unstable")] + ext_attachment: self.attachment.map(|a| a.into()), + #[cfg(not(feature = "unstable"))] + ext_attachment: None, ext_unknown: vec![], - ext_body: Some(ValueType { - #[cfg(feature = "shared-memory")] - ext_shm: None, - payload: payload.payload.into(), - encoding: payload.encoding.into(), - }), - code: 0, // TODO }), - ext_qos: response::ext::QoSType::RESPONSE, - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid: self.query.inner.zid, - eid: self.query.eid, - }), - }); - Ok(()) - } - } + }, + }), + ext_qos: self.qos.into(), + ext_tstamp: None, + ext_respid: Some(response::ext::ResponderIdType { + zid: self.query.inner.zid, + eid: self.query.eid, + }), + }); + Ok(()) } } @@ -301,6 +366,50 @@ impl<'a> AsyncResolve for ReplyBuilder<'a> { } } +impl<'a> Resolvable for ReplyErrBuilder<'a> { + type To = ZResult<()>; +} + +impl SyncResolve for ReplyErrBuilder<'_> { + fn res_sync(self) -> ::To { + self.query.inner.primitives.send_response(Response { + rid: self.query.inner.qid, + wire_expr: WireExpr { + scope: 0, + suffix: std::borrow::Cow::Owned(self.query.key_expr().as_str().to_owned()), + mapping: Mapping::Sender, + }, + payload: ResponseBody::Err(zenoh::Err { + timestamp: None, + is_infrastructure: false, + ext_sinfo: None, + ext_unknown: vec![], + ext_body: Some(ValueType { + #[cfg(feature = "shared-memory")] + ext_shm: None, + payload: self.value.payload.into(), + encoding: self.value.encoding.into(), + }), + code: 0, // TODO + }), + ext_qos: response::ext::QoSType::RESPONSE, + ext_tstamp: None, + ext_respid: Some(response::ext::ResponderIdType { + zid: self.query.inner.zid, + eid: self.query.eid, + }), + }); + Ok(()) + } +} +impl<'a> AsyncResolve for ReplyErrBuilder<'a> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + pub(crate) struct QueryableState { pub(crate) id: Id, pub(crate) key_expr: WireExpr<'static>, @@ -338,7 +447,7 @@ impl fmt::Debug for QueryableState { /// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); /// while let Ok(query) = queryable.recv_async().await { /// println!(">> Handling query '{}'", query.selector()); -/// query.reply(Ok(Sample::try_from("key/expression", "value").unwrap())) +/// query.reply(KeyExpr::try_from("key/expression").unwrap(), "value") /// .res() /// .await /// .unwrap(); @@ -576,7 +685,7 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { /// .unwrap(); /// while let Ok(query) = queryable.recv_async().await { /// println!(">> Handling query '{}'", query.selector()); -/// query.reply(Ok(Sample::try_from("key/expression", "value").unwrap())) +/// query.reply(KeyExpr::try_from("key/expression").unwrap(), "value") /// .res() /// .await /// .unwrap(); diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index af4a58956d..9c68b460d9 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -585,3 +585,9 @@ impl From for QoS { QoS { inner: qos } } } + +impl From for QoSType { + fn from(qos: QoS) -> Self { + qos.inner + } +} diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 861acf71de..ba67e173bd 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -1852,10 +1852,10 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// .unwrap(); /// async_std::task::spawn(async move { /// while let Ok(query) = queryable.recv_async().await { - /// query.reply(Ok(Sample::try_from( - /// "key/expression", + /// query.reply( + /// KeyExpr::try_from("key/expression").unwrap(), /// "value", - /// ).unwrap())).res().await.unwrap(); + /// ).res().await.unwrap(); /// } /// }).await; /// # }) @@ -2481,10 +2481,10 @@ pub trait SessionDeclarations<'s, 'a> { /// .unwrap(); /// async_std::task::spawn(async move { /// while let Ok(query) = queryable.recv_async().await { - /// query.reply(Ok(Sample::try_from( - /// "key/expression", + /// query.reply( + /// KeyExpr::try_from("key/expression").unwrap(), /// "value", - /// ).unwrap())).res().await.unwrap(); + /// ).res().await.unwrap(); /// } /// }).await; /// # }) diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 89dd3e231f..0e7c1c0de7 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -73,11 +73,11 @@ fn queries() { attachment.insert(&k, &k); } query - .reply(Ok(Sample::new( + .reply( query.key_expr().clone(), query.value().unwrap().payload.clone(), ) - .with_attachment(attachment))) + .with_attachment(attachment) .res() .unwrap(); }) diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 5c96f080f8..82053b4f1d 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -115,12 +115,12 @@ impl Task { // The Queryable task keeps replying to requested messages until all checkpoints are finished. Self::Queryable(ke, payload_size) => { let queryable = session.declare_queryable(ke).res_async().await?; - let sample = Sample::try_from(ke.clone(), vec![0u8; *payload_size])?; + let payload = vec![0u8; *payload_size]; loop { futures::select! { query = queryable.recv_async() => { - query?.reply(Ok(sample.clone())).res_async().await?; + query?.reply(KeyExpr::try_from(ke.to_owned())?, payload.clone()).res_async().await?; }, _ = async_std::task::sleep(Duration::from_millis(100)).fuse() => { diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 8a3f4381d2..077c58298d 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -157,23 +157,28 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re c_msgs.fetch_add(1, Ordering::Relaxed); match query.parameters() { "ok_put" => { - let mut rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - rep.kind = SampleKind::Put; task::block_on(async { - ztimeout!(query.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(query + .reply( + KeyExpr::try_from(key_expr).unwrap(), + vec![0u8; size].to_vec() + ) + .res_async()) + .unwrap() }); } "ok_del" => { - let mut rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - rep.kind = SampleKind::Delete; task::block_on(async { - ztimeout!(query.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(query + .reply_del(KeyExpr::try_from(key_expr).unwrap()) + .res_async()) + .unwrap() }); } "err" => { let rep = Value::from(vec![0u8; size]); task::block_on(async { - ztimeout!(query.reply(Err(rep)).res_async()).unwrap() + ztimeout!(query.reply_err(rep).res_async()).unwrap() }); } _ => panic!("Unknown query parameter"), diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 76910ee5de..def0dffe33 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -196,8 +196,12 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { .declare_queryable(key_expr) .callback(move |sample| { c_msgs1.fetch_add(1, Ordering::Relaxed); - let rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - task::block_on(async { ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() }); + task::block_on(async { + ztimeout!(sample + .reply(KeyExpr::try_from(key_expr).unwrap(), vec![0u8; size]) + .res_async()) + .unwrap() + }); }) .res_async()) .unwrap(); @@ -209,8 +213,12 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { .declare_queryable(key_expr) .callback(move |sample| { c_msgs2.fetch_add(1, Ordering::Relaxed); - let rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - task::block_on(async { ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() }); + task::block_on(async { + ztimeout!(sample + .reply(KeyExpr::try_from(key_expr).unwrap(), vec![0u8; size]) + .res_async()) + .unwrap() + }); }) .res_async()) .unwrap(); From e06b46d4e39b723fb17f9cf6015e07c58b2ec710 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 13 Mar 2024 10:03:45 +0100 Subject: [PATCH 011/598] Simplify Error message (#813) --- commons/zenoh-codec/src/zenoh/err.rs | 57 +++++++++------------ commons/zenoh-protocol/src/zenoh/err.rs | 52 +++++++------------ io/zenoh-transport/src/shm.rs | 28 ++-------- zenoh/src/net/routing/dispatcher/queries.rs | 2 +- zenoh/src/queryable.rs | 19 +++---- zenoh/src/session.rs | 12 ++--- 6 files changed, 56 insertions(+), 114 deletions(-) diff --git a/commons/zenoh-codec/src/zenoh/err.rs b/commons/zenoh-codec/src/zenoh/err.rs index 5cef1a6389..b459f67b3f 100644 --- a/commons/zenoh-codec/src/zenoh/err.rs +++ b/commons/zenoh-codec/src/zenoh/err.rs @@ -11,14 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header}; use alloc::vec::Vec; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, + ZBuf, }; use zenoh_protocol::{ common::{iext, imsg}, + core::Encoding, zenoh::{ err::{ext, flag, Err}, id, @@ -33,33 +35,26 @@ where fn write(self, writer: &mut W, x: &Err) -> Self::Output { let Err { - code, - is_infrastructure, - timestamp, + encoding, ext_sinfo, - ext_body, ext_unknown, + payload, } = x; // Header let mut header = id::ERR; - if timestamp.is_some() { - header |= flag::T; + if encoding != &Encoding::empty() { + header |= flag::E; } - if *is_infrastructure { - header |= flag::I; - } - let mut n_exts = - (ext_sinfo.is_some() as u8) + (ext_body.is_some() as u8) + (ext_unknown.len() as u8); + let mut n_exts = (ext_sinfo.is_some() as u8) + (ext_unknown.len() as u8); if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, code)?; - if let Some(ts) = timestamp.as_ref() { - self.write(&mut *writer, ts)?; + if encoding != &Encoding::empty() { + self.write(&mut *writer, encoding)?; } // Extensions @@ -67,15 +62,15 @@ where n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } - if let Some(body) = ext_body.as_ref() { - n_exts -= 1; - self.write(&mut *writer, (body, n_exts != 0))?; - } for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; } + // Payload + let bodec = Zenoh080Bounded::::new(); + bodec.write(&mut *writer, payload)?; + Ok(()) } } @@ -105,16 +100,13 @@ where } // Body - let code: u16 = self.codec.read(&mut *reader)?; - let is_infrastructure = imsg::has_flag(self.header, flag::I); - let mut timestamp: Option = None; - if imsg::has_flag(self.header, flag::T) { - timestamp = Some(self.codec.read(&mut *reader)?); + let mut encoding = Encoding::empty(); + if imsg::has_flag(self.header, flag::E) { + encoding = self.codec.read(&mut *reader)?; } // Extensions let mut ext_sinfo: Option = None; - let mut ext_body: Option = None; let mut ext_unknown = Vec::new(); let mut has_ext = imsg::has_flag(self.header, flag::Z); @@ -127,11 +119,6 @@ where ext_sinfo = Some(s); has_ext = ext; } - ext::ErrBodyType::VID | ext::ErrBodyType::SID => { - let (s, ext): (ext::ErrBodyType, bool) = eodec.read(&mut *reader)?; - ext_body = Some(s); - has_ext = ext; - } _ => { let (u, ext) = extension::read(reader, "Err", ext)?; ext_unknown.push(u); @@ -140,13 +127,15 @@ where } } + // Payload + let bodec = Zenoh080Bounded::::new(); + let payload: ZBuf = bodec.read(&mut *reader)?; + Ok(Err { - code, - is_infrastructure, - timestamp, + encoding, ext_sinfo, - ext_body, ext_unknown, + payload, }) } } diff --git a/commons/zenoh-protocol/src/zenoh/err.rs b/commons/zenoh-protocol/src/zenoh/err.rs index 648efff441..eacbb26596 100644 --- a/commons/zenoh-protocol/src/zenoh/err.rs +++ b/commons/zenoh-protocol/src/zenoh/err.rs @@ -11,43 +11,41 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::common::ZExtUnknown; +use crate::{common::ZExtUnknown, core::Encoding}; use alloc::vec::Vec; -use uhlc::Timestamp; +use zenoh_buffers::ZBuf; /// # Err message /// /// ```text /// Flags: -/// - T: Timestamp If T==1 then the timestamp if present -/// - I: Infrastructure If I==1 then the error is related to the infrastructure else to the user +/// - X: Reserved +/// - E: Encoding If E==1 then the encoding is present /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|I|T| ERR | +/// |Z|E|X| ERR | /// +-+-+-+---------+ -/// % code:z16 % -/// +---------------+ -/// ~ ts: ~ if T==1 +/// ~ encoding ~ if E==1 /// +---------------+ /// ~ [err_exts] ~ if Z==1 /// +---------------+ +/// ~ pl: ~ -- Payload +/// +---------------+ /// ``` pub mod flag { - pub const T: u8 = 1 << 5; // 0x20 Timestamp if T==0 then the timestamp if present - pub const I: u8 = 1 << 6; // 0x40 Infrastructure if I==1 then the error is related to the infrastructure else to the user + // pub const X: u8 = 1 << 5; // 0x20 Reserved + pub const E: u8 = 1 << 6; // 0x40 Encoding if E==1 then the encoding is present pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Err { - pub code: u16, - pub is_infrastructure: bool, - pub timestamp: Option, + pub encoding: Encoding, pub ext_sinfo: Option, - pub ext_body: Option, pub ext_unknown: Vec, + pub payload: ZBuf, } pub mod ext { @@ -57,45 +55,31 @@ pub mod ext { /// Used to carry additional information about the source of data pub type SourceInfo = zextzbuf!(0x1, false); pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; - - /// # ErrBody extension - /// Used to carry a body attached to the query - /// Shared Memory extension is automatically defined by ValueType extension if - /// #[cfg(feature = "shared-memory")] is defined. - pub type ErrBodyType = crate::zenoh::ext::ValueType<{ ZExtZBuf::<0x02>::id(false) }, 0x03>; } impl Err { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::{common::iext, core::ZenohId}; + use crate::common::iext; use rand::Rng; let mut rng = rand::thread_rng(); - let code: u16 = rng.gen(); - let is_infrastructure = rng.gen_bool(0.5); - let timestamp = rng.gen_bool(0.5).then_some({ - let time = uhlc::NTP64(rng.gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); - Timestamp::new(time, id) - }); + let encoding = Encoding::rand(); let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); - let ext_body = rng.gen_bool(0.5).then_some(ext::ErrBodyType::rand()); let mut ext_unknown = Vec::new(); for _ in 0..rng.gen_range(0..4) { ext_unknown.push(ZExtUnknown::rand2( - iext::mid(ext::ErrBodyType::SID) + 1, + iext::mid(ext::SourceInfo::ID) + 1, false, )); } + let payload = ZBuf::rand(rng.gen_range(0..=64)); Self { - code, - is_infrastructure, - timestamp, + encoding, ext_sinfo, - ext_body, ext_unknown, + payload, } } } diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 6f98cafc14..31910f51ae 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -18,7 +18,7 @@ use zenoh_core::{zasyncread, zasyncwrite, zerror}; use zenoh_protocol::{ network::{NetworkBody, NetworkMessage, Push, Request, Response}, zenoh::{ - err::{ext::ErrBodyType, Err}, + err::Err, ext::ShmType, query::{ext::QueryBodyType, Query}, reply::ReplyBody, @@ -123,31 +123,11 @@ impl MapShm for Reply { // Impl - Err impl MapShm for Err { fn map_to_shminfo(&mut self) -> ZResult { - if let Self { - ext_body: Some(ErrBodyType { - payload, ext_shm, .. - }), - .. - } = self - { - map_to_shminfo!(payload, ext_shm) - } else { - Ok(false) - } + Ok(false) } - fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { - if let Self { - ext_body: Some(ErrBodyType { - payload, ext_shm, .. - }), - .. - } = self - { - map_to_shmbuf!(payload, ext_shm, shmr) - } else { - Ok(false) - } + fn map_to_shmbuf(&mut self, _shmr: &RwLock) -> ZResult { + Ok(false) } } diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 287621151a..721a98b8c2 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -521,7 +521,7 @@ macro_rules! inc_res_stats { ResponseBody::Err(e) => { stats.[<$txrx _z_reply_msgs>].[](1); stats.[<$txrx _z_reply_pl_bytes>].[]( - e.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), + e.payload.len() ); } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index ed3bd63b6a..d98df046b7 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -31,9 +31,11 @@ use std::ops::Deref; use std::sync::Arc; use uhlc::Timestamp; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -use zenoh_protocol::core::{EntityId, WireExpr}; -use zenoh_protocol::network::{response, Mapping, RequestId, Response, ResponseFinal}; -use zenoh_protocol::zenoh::{self, ext::ValueType, reply::ReplyBody, Del, Put, ResponseBody}; +use zenoh_protocol::{ + core::{EntityId, WireExpr}, + network::{response, Mapping, RequestId, Response, ResponseFinal}, + zenoh::{self, reply::ReplyBody, Del, Put, ResponseBody}, +}; use zenoh_result::ZResult; pub(crate) struct QueryInner { @@ -380,17 +382,10 @@ impl SyncResolve for ReplyErrBuilder<'_> { mapping: Mapping::Sender, }, payload: ResponseBody::Err(zenoh::Err { - timestamp: None, - is_infrastructure: false, + encoding: self.value.encoding.into(), ext_sinfo: None, ext_unknown: vec![], - ext_body: Some(ValueType { - #[cfg(feature = "shared-memory")] - ext_shm: None, - payload: self.value.payload.into(), - encoding: self.value.encoding.into(), - }), - code: 0, // TODO + payload: self.value.payload.into(), }), ext_qos: response::ext::QoSType::RESPONSE, ext_tstamp: None, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index ba67e173bd..4c303ae974 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -2128,15 +2128,9 @@ impl Primitives for Session { Some(query) => { let callback = query.callback.clone(); std::mem::drop(state); - let value = match e.ext_body { - Some(body) => Value { - payload: body.payload.into(), - encoding: body.encoding.into(), - }, - None => Value { - payload: Payload::empty(), - encoding: Encoding::default(), - }, + let value = Value { + payload: e.payload.into(), + encoding: e.encoding.into(), }; let replier_id = match e.ext_sinfo { Some(info) => info.id.zid, From 55119a5001f0080bfd78a91733760a45a959674c Mon Sep 17 00:00:00 2001 From: Denis Biryukov Date: Wed, 13 Mar 2024 15:07:59 +0100 Subject: [PATCH 012/598] make Sample ields pub(crate) provide accessors for external users --- examples/examples/z_get.rs | 4 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_pull.rs | 6 +- examples/examples/z_storage.rs | 15 ++- examples/examples/z_sub.rs | 4 +- plugins/zenoh-plugin-example/src/lib.rs | 6 +- plugins/zenoh-plugin-rest/src/lib.rs | 22 ++--- .../src/replica/align_queryable.rs | 10 +- .../src/replica/aligner.rs | 16 ++-- .../src/replica/mod.rs | 11 ++- .../src/replica/storage.rs | 84 +++++++++-------- .../tests/operations.rs | 8 +- .../tests/wildcard.rs | 20 ++-- zenoh-ext/examples/z_query_sub.rs | 4 +- zenoh-ext/src/group.rs | 4 +- zenoh-ext/src/publication_cache.rs | 8 +- zenoh-ext/src/querying_subscriber.rs | 4 +- zenoh/src/liveliness.rs | 14 +-- zenoh/src/payload.rs | 4 +- zenoh/src/sample.rs | 93 +++++++++++++------ zenoh/src/subscriber.rs | 12 +-- zenoh/tests/attachments.rs | 4 +- zenoh/tests/events.rs | 20 ++-- zenoh/tests/interceptors.rs | 6 +- zenoh/tests/liveliness.rs | 8 +- zenoh/tests/qos.rs | 4 +- zenoh/tests/routing.rs | 4 +- zenoh/tests/session.rs | 10 +- zenoh/tests/unicity.rs | 6 +- 29 files changed, 224 insertions(+), 189 deletions(-) diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 0fff95c250..dce74d367b 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -41,12 +41,12 @@ async fn main() { match reply.sample { Ok(sample) => { let payload = sample - .payload + .payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)); println!( ">> Received ('{}': '{}')", - sample.key_expr.as_str(), + sample.key_expr().as_str(), payload, ); } diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index 1f06c7abb9..6c333cbbeb 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -41,7 +41,7 @@ fn main() { let _sub = session .declare_subscriber(key_expr_ping) - .callback(move |sample| publisher.put(sample.payload).res().unwrap()) + .callback(move |sample| publisher.put(sample.payload().clone()).res().unwrap()) .res() .unwrap(); for _ in stdin().bytes().take_while(|b| !matches!(b, Ok(b'q'))) {} diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index ed2a90f1a6..5ba4f413bd 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -45,13 +45,13 @@ async fn main() { let subs = async { while let Ok(sample) = subscriber.recv_async().await { let payload = sample - .payload + .payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)); println!( ">> [Subscriber] Received {} ('{}': '{}')", - sample.kind, - sample.key_expr.as_str(), + sample.kind(), + sample.key_expr().as_str(), payload, ); } diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 857181751b..ab62785f18 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -53,13 +53,12 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); - println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind, sample.key_expr.as_str(),payload); - if sample.kind == SampleKind::Delete { - stored.remove(&sample.key_expr.to_string()); - } else { - stored.insert(sample.key_expr.to_string(), sample); - } + let payload = sample.payload().deserialize::().unwrap_or_else(|e| format!("{}", e)); + println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind(), sample.key_expr().as_str(),payload); + match sample.kind() { + SampleKind::Delete => stored.remove(&sample.key_expr().to_string()), + SampleKind::Put => stored.insert(sample.key_expr().to_string(), sample), + }; }, query = queryable.recv_async() => { @@ -67,7 +66,7 @@ async fn main() { println!(">> [Queryable ] Received Query '{}'", query.selector()); for (stored_name, sample) in stored.iter() { if query.selector().key_expr.intersects(unsafe {keyexpr::from_str_unchecked(stored_name)}) { - query.reply(sample.key_expr.clone(), sample.payload.clone()).res().await.unwrap(); + query.reply(sample.key_expr().clone(), sample.payload().clone()).res().await.unwrap(); } } }, diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index 195e2f7640..f2d337a7cf 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -46,8 +46,8 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); - println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind, sample.key_expr.as_str(), payload); + let payload = sample.payload().deserialize::().unwrap_or_else(|e| format!("{}", e)); + println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind(), sample.key_expr().as_str(), payload); }, _ = stdin.read_exact(&mut input).fuse() => { match input[0] { diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 12cc6ffa84..04f49b4739 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -164,9 +164,9 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { // on sample received by the Subscriber sample = sub.recv_async() => { let sample = sample.unwrap(); - let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); - info!("Received data ('{}': '{}')", sample.key_expr, payload); - stored.insert(sample.key_expr.to_string(), sample); + let payload = sample.payload().deserialize::().unwrap_or_else(|e| format!("{}", e)); + info!("Received data ('{}': '{}')", sample.key_expr(), payload); + stored.insert(sample.key_expr().to_string(), sample); }, // on query received by the Queryable query = queryable.recv_async() => { diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 1a99d7b5a4..c689bc7d7d 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -46,7 +46,7 @@ lazy_static::lazy_static! { } const RAW_KEY: &str = "_raw"; -fn payload_to_json(payload: Payload) -> String { +fn payload_to_json(payload: &Payload) -> String { payload .deserialize::() .unwrap_or_else(|_| format!(r#""{}""#, b64_std_engine.encode(payload.contiguous()))) @@ -55,10 +55,10 @@ fn payload_to_json(payload: Payload) -> String { fn sample_to_json(sample: Sample) -> String { format!( r#"{{ "key": "{}", "value": {}, "encoding": "{}", "time": "{}" }}"#, - sample.key_expr.as_str(), - payload_to_json(sample.payload), - sample.encoding, - if let Some(ts) = sample.timestamp { + sample.key_expr().as_str(), + payload_to_json(sample.payload()), + sample.encoding(), + if let Some(ts) = sample.timestamp() { ts.to_string() } else { "None".to_string() @@ -72,7 +72,7 @@ fn result_to_json(sample: Result) -> String { Err(err) => { format!( r#"{{ "key": "ERROR", "value": {}, "encoding": "{}"}}"#, - payload_to_json(err.payload), + payload_to_json(&err.payload), err.encoding, ) } @@ -100,8 +100,8 @@ async fn to_json_response(results: flume::Receiver) -> Response { fn sample_to_html(sample: Sample) -> String { format!( "
{}
\n
{}
\n", - sample.key_expr.as_str(), - String::from_utf8_lossy(&sample.payload.contiguous()) + sample.key_expr().as_str(), + String::from_utf8_lossy(&sample.payload().contiguous()) ) } @@ -136,8 +136,8 @@ async fn to_raw_response(results: flume::Receiver) -> Response { Ok(reply) => match reply.sample { Ok(sample) => response( StatusCode::Ok, - Cow::from(&sample.encoding).as_ref(), - String::from_utf8_lossy(&sample.payload.contiguous()).as_ref(), + Cow::from(sample.encoding()).as_ref(), + String::from_utf8_lossy(&sample.payload().contiguous()).as_ref(), ), Err(value) => response( StatusCode::Ok, @@ -322,7 +322,7 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result { log::trace!( "[ALIGN QUERYABLE] Received ('{}': '{}')", - sample.key_expr.as_str(), - StringOrBase64::from(sample.payload.clone()) + sample.key_expr().as_str(), + StringOrBase64::from(sample.payload()) ); - if let Some(timestamp) = sample.timestamp { + if let Some(timestamp) = sample.timestamp() { match timestamp.cmp(&logentry.timestamp) { Ordering::Greater => return None, Ordering::Less => { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 03c6fa949a..b11a94e4f2 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -141,10 +141,10 @@ impl Aligner { for sample in replies { result.insert( - sample.key_expr.into(), + sample.key_expr().clone().into(), ( - sample.timestamp.unwrap(), - Value::new(sample.payload).with_encoding(sample.encoding), + sample.timestamp().unwrap().clone(), + Value::from(sample), ), ); } @@ -213,7 +213,7 @@ impl Aligner { let mut other_intervals: HashMap = HashMap::new(); // expecting sample.payload to be a vec of intervals with their checksum for each in reply_content { - match serde_json::from_str(&StringOrBase64::from(each.payload)) { + match serde_json::from_str(&StringOrBase64::from(each.payload())) { Ok((i, c)) => { other_intervals.insert(i, c); } @@ -259,7 +259,7 @@ impl Aligner { let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; let mut other_subintervals: HashMap = HashMap::new(); for each in reply_content { - match serde_json::from_str(&StringOrBase64::from(each.payload)) { + match serde_json::from_str(&StringOrBase64::from(each.payload())) { Ok((i, c)) => { other_subintervals.insert(i, c); } @@ -300,7 +300,7 @@ impl Aligner { let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; let mut other_content: HashMap> = HashMap::new(); for each in reply_content { - match serde_json::from_str(&StringOrBase64::from(each.payload)) { + match serde_json::from_str(&StringOrBase64::from(each.payload())) { Ok((i, c)) => { other_content.insert(i, c); } @@ -340,8 +340,8 @@ impl Aligner { Ok(sample) => { log::trace!( "[ALIGNER] Received ('{}': '{}')", - sample.key_expr.as_str(), - StringOrBase64::from(sample.payload.clone()) + sample.key_expr().as_str(), + StringOrBase64::from(sample.payload()) ); return_val.push(sample); } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 78254213f7..5dda032029 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -220,16 +220,17 @@ impl Replica { continue; } }; - let from = &sample.key_expr.as_str() + let from = &sample.key_expr().as_str() [Replica::get_digest_key(&self.key_expr, ALIGN_PREFIX).len() + 1..]; log::trace!( "[DIGEST_SUB] From {} Received {} ('{}': '{}')", from, - sample.kind, - sample.key_expr.as_str(), - StringOrBase64::from(sample.payload.clone()) + sample.kind(), + sample.key_expr().as_str(), + StringOrBase64::from(sample.payload()) ); - let digest: Digest = match serde_json::from_str(&StringOrBase64::from(sample.payload)) { + let digest: Digest = match serde_json::from_str(&StringOrBase64::from(sample.payload())) + { Ok(digest) => digest, Err(e) => { log::error!("[DIGEST_SUB] Error in decoding the digest: {}", e); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 6b48895612..895f2e1914 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -179,7 +179,7 @@ impl StorageService { }; // log error if the sample is not timestamped // This is to reduce down the line inconsistencies of having duplicate samples stored - if sample.get_timestamp().is_none() { + if sample.timestamp().is_none() { log::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { @@ -271,28 +271,28 @@ impl StorageService { }; // if wildcard, update wildcard_updates - if sample.key_expr.is_wild() { + if sample.key_expr().is_wild() { self.register_wildcard_update(sample.clone()).await; } - let matching_keys = if sample.key_expr.is_wild() { - self.get_matching_keys(&sample.key_expr).await + let matching_keys = if sample.key_expr().is_wild() { + self.get_matching_keys(&sample.key_expr()).await } else { - vec![sample.key_expr.clone().into()] + vec![sample.key_expr().clone().into()] }; log::trace!( "The list of keys matching `{}` is : {:?}", - sample.key_expr, + sample.key_expr(), matching_keys ); for k in matching_keys { if !self - .is_deleted(&k.clone(), sample.get_timestamp().unwrap()) + .is_deleted(&k.clone(), sample.timestamp().unwrap()) .await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) - && self.is_latest(&k, sample.get_timestamp().unwrap()).await)) + && self.is_latest(&k, sample.timestamp().unwrap()).await)) { log::trace!( "Sample `{:?}` identified as neded processing for key {}", @@ -302,30 +302,30 @@ impl StorageService { // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage let sample_to_store = match self - .ovderriding_wild_update(&k, sample.get_timestamp().unwrap()) + .ovderriding_wild_update(&k, sample.timestamp().unwrap()) .await { Some(overriding_update) => { let Value { payload, encoding, .. } = overriding_update.data.value; - let mut sample_to_store = Sample::new(KeyExpr::from(k.clone()), payload) + let sample_to_store = Sample::new(KeyExpr::from(k.clone()), payload) .with_encoding(encoding) - .with_timestamp(overriding_update.data.timestamp); - sample_to_store.kind = overriding_update.kind; + .with_timestamp(overriding_update.data.timestamp) + .with_kind(overriding_update.kind); sample_to_store } None => { - let mut sample_to_store = - Sample::new(KeyExpr::from(k.clone()), sample.payload.clone()) - .with_encoding(sample.encoding.clone()) - .with_timestamp(sample.timestamp.unwrap()); - sample_to_store.kind = sample.kind; + let sample_to_store = + Sample::new(KeyExpr::from(k.clone()), sample.payload().clone()) + .with_encoding(sample.encoding().clone()) + .with_timestamp(sample.timestamp().unwrap().clone()) + .with_kind(sample.kind()); sample_to_store } }; - let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { + let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { Ok(stripped) => stripped, Err(e) => { log::error!("{}", e); @@ -333,24 +333,25 @@ impl StorageService { } }; let mut storage = self.storage.lock().await; - let result = if sample.kind == SampleKind::Put { - storage - .put( - stripped_key, - Value::new(sample_to_store.payload.clone()) - .with_encoding(sample_to_store.encoding.clone()), - sample_to_store.timestamp.unwrap(), - ) - .await - } else if sample.kind == SampleKind::Delete { - // register a tombstone - self.mark_tombstone(&k, sample_to_store.timestamp.unwrap()) - .await; - storage - .delete(stripped_key, sample_to_store.timestamp.unwrap()) - .await - } else { - Err("sample kind not implemented".into()) + let result = match sample.kind() { + SampleKind::Put => { + storage + .put( + stripped_key, + Value::new(sample_to_store.payload().clone()) + .with_encoding(sample_to_store.encoding().clone()), + sample_to_store.timestamp().unwrap().clone(), + ) + .await + } + SampleKind::Delete => { + // register a tombstone + self.mark_tombstone(&k, sample_to_store.timestamp().unwrap().clone()) + .await; + storage + .delete(stripped_key, sample_to_store.timestamp().unwrap().clone()) + .await + } }; drop(storage); if self.replication.is_some() @@ -362,7 +363,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), *sample_to_store.get_timestamp().unwrap())); + .send((k.clone(), sample_to_store.timestamp().unwrap().clone())); match sending { Ok(_) => (), Err(e) => { @@ -395,15 +396,16 @@ impl StorageService { async fn register_wildcard_update(&self, sample: Sample) { // @TODO: change into a better store that does incremental writes - let key = sample.clone().key_expr; + let key = sample.key_expr().clone(); let mut wildcards = self.wildcard_updates.write().await; + let timestamp = sample.timestamp().unwrap().clone(); wildcards.insert( &key, Update { - kind: sample.kind, + kind: sample.kind(), data: StoredData { - value: Value::new(sample.payload).with_encoding(sample.encoding), - timestamp: sample.timestamp.unwrap(), + value: Value::from(sample), + timestamp, }, }, ); diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 81029e2fa7..36162f01c2 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -101,7 +101,7 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/a").await; assert_eq!(data.len(), 1); - assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "1"); + assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "1"); put_data( &session, @@ -117,7 +117,7 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/b").await; assert_eq!(data.len(), 1); - assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "2"); + assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "2"); delete_data( &session, @@ -136,8 +136,8 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/b").await; assert_eq!(data.len(), 1); - assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "2"); - assert_eq!(data[0].key_expr.as_str(), "operation/test/b"); + assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "2"); + assert_eq!(data[0].key_expr().as_str(), "operation/test/b"); drop(storage); } diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 4808ec246f..5a71dc23f0 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -117,8 +117,8 @@ async fn test_wild_card_in_order() { // expected single entry let data = get_data(&session, "wild/test/*").await; assert_eq!(data.len(), 1); - assert_eq!(data[0].key_expr.as_str(), "wild/test/a"); - assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "2"); + assert_eq!(data[0].key_expr().as_str(), "wild/test/a"); + assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "2"); put_data( &session, @@ -134,10 +134,10 @@ async fn test_wild_card_in_order() { // expected two entries let data = get_data(&session, "wild/test/*").await; assert_eq!(data.len(), 2); - assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr.as_str())); - assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr.as_str())); - assert!(["2", "3"].contains(&StringOrBase64::from(data[0].payload.clone()).as_str())); - assert!(["2", "3"].contains(&StringOrBase64::from(data[1].payload.clone()).as_str())); + assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr().as_str())); + assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr().as_str())); + assert!(["2", "3"].contains(&StringOrBase64::from(data[0].payload()).as_str())); + assert!(["2", "3"].contains(&StringOrBase64::from(data[1].payload()).as_str())); put_data( &session, @@ -153,10 +153,10 @@ async fn test_wild_card_in_order() { // expected two entries let data = get_data(&session, "wild/test/*").await; assert_eq!(data.len(), 2); - assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr.as_str())); - assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr.as_str())); - assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "4"); - assert_eq!(StringOrBase64::from(data[1].payload.clone()).as_str(), "4"); + assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr().as_str())); + assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr().as_str())); + assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "4"); + assert_eq!(StringOrBase64::from(data[1].payload()).as_str(), "4"); delete_data( &session, diff --git a/zenoh-ext/examples/z_query_sub.rs b/zenoh-ext/examples/z_query_sub.rs index 80efc0854f..8c1307d712 100644 --- a/zenoh-ext/examples/z_query_sub.rs +++ b/zenoh-ext/examples/z_query_sub.rs @@ -60,8 +60,8 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); - println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind, sample.key_expr.as_str(), payload); + let payload = sample.payload().deserialize::().unwrap_or_else(|e| format!("{}", e)); + println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind(), sample.key_expr().as_str(), payload); }, _ = stdin.read_exact(&mut input).fuse() => { diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 75a435e8f4..41007d8b87 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -248,7 +248,7 @@ async fn net_event_handler(z: Arc, state: Arc) { .await .unwrap(); while let Ok(s) = sub.recv_async().await { - match bincode::deserialize::(&(s.payload.contiguous())) { + match bincode::deserialize::(&(s.payload().contiguous())) { Ok(evt) => match evt { GroupNetEvent::Join(je) => { log::debug!("Member join: {:?}", &je.member); @@ -308,7 +308,7 @@ async fn net_event_handler(z: Arc, state: Arc) { match reply.sample { Ok(sample) => { match bincode::deserialize::( - &sample.payload.contiguous(), + &sample.payload().contiguous(), ) { Ok(m) => { let mut expiry = Instant::now(); diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 1c9a286800..85cb96cce2 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -180,9 +180,9 @@ impl<'a> PublicationCache<'a> { sample = sub_recv.recv_async() => { if let Ok(sample) = sample { let queryable_key_expr: KeyExpr<'_> = if let Some(prefix) = &queryable_prefix { - prefix.join(&sample.key_expr).unwrap().into() + prefix.join(sample.key_expr()).unwrap().into() } else { - sample.key_expr.clone() + sample.key_expr().clone() }; if let Some(queue) = cache.get_mut(queryable_key_expr.as_keyexpr()) { @@ -207,7 +207,7 @@ impl<'a> PublicationCache<'a> { if !query.selector().key_expr.as_str().contains('*') { if let Some(queue) = cache.get(query.selector().key_expr.as_keyexpr()) { for sample in queue { - if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp) { + if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ continue; } @@ -221,7 +221,7 @@ impl<'a> PublicationCache<'a> { for (key_expr, queue) in cache.iter() { if query.selector().key_expr.intersects(unsafe{ keyexpr::from_str_unchecked(key_expr) }) { for sample in queue { - if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp) { + if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ continue; } diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 2c89ec82ae..470f795f2b 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -304,8 +304,8 @@ impl MergeQueue { } fn push(&mut self, sample: Sample) { - if let Some(ts) = sample.timestamp { - self.timstamped.entry(ts).or_insert(sample); + if let Some(ts) = sample.timestamp() { + self.timstamped.entry(ts.clone()).or_insert(sample); } else { self.untimestamped.push_back(sample); } diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 9f14866363..d4229db4cc 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -131,9 +131,9 @@ impl<'a> Liveliness<'a> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session.liveliness().declare_subscriber("key/expression").res().await.unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { - /// match sample.kind { - /// SampleKind::Put => println!("New liveliness: {}", sample.key_expr), - /// SampleKind::Delete => println!("Lost liveliness: {}", sample.key_expr), + /// match sample.kind() { + /// SampleKind::Put => println!("New liveliness: {}", sample.key_expr()), + /// SampleKind::Delete => println!("Lost liveliness: {}", sample.key_expr()), /// } /// } /// # }) @@ -169,7 +169,7 @@ impl<'a> Liveliness<'a> { /// let replies = session.liveliness().get("key/expression").res().await.unwrap(); /// while let Ok(reply) = replies.recv_async().await { /// if let Ok(sample) = reply.sample { - /// println!(">> Liveliness token {}", sample.key_expr); + /// println!(">> Liveliness token {}", sample.key_expr()); /// } /// } /// # }) @@ -425,7 +425,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) + /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) /// .res() /// .await /// .unwrap(); @@ -499,7 +499,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { - /// println!("Received: {} {:?}", sample.key_expr, sample.payload); + /// println!("Received: {} {:?}", sample.key_expr(), sample.payload()); /// } /// # }) /// ``` @@ -593,7 +593,7 @@ where /// .unwrap(); /// while let Ok(token) = tokens.recv_async().await { /// match token.sample { -/// Ok(sample) => println!("Alive token ('{}')", sample.key_expr.as_str()), +/// Ok(sample) => println!("Alive token ('{}')", sample.key_expr().as_str()), /// Err(err) => println!("Received (ERROR: '{:?}')", err.payload), /// } /// } diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index f499db50da..62f40f9294 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -579,8 +579,8 @@ impl std::fmt::Display for StringOrBase64 { } } -impl From for StringOrBase64 { - fn from(v: Payload) -> Self { +impl From<&Payload> for StringOrBase64 { + fn from(v: &Payload) -> Self { use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; match v.deserialize::() { Ok(s) => StringOrBase64::String(s), diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 9c68b460d9..1ac04313ab 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -363,38 +363,18 @@ pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; #[non_exhaustive] #[derive(Clone, Debug)] pub struct Sample { - /// The key expression on which this Sample was published. - pub key_expr: KeyExpr<'static>, - /// The payload of this Sample. - pub payload: Payload, - /// The kind of this Sample. - pub kind: SampleKind, - /// The encoding of this sample - pub encoding: Encoding, - /// The [`Timestamp`] of this Sample. - pub timestamp: Option, - /// Quality of service settings this sample was sent with. - pub qos: QoS, + pub(crate) key_expr: KeyExpr<'static>, + pub(crate) payload: Payload, + pub(crate) kind: SampleKind, + pub(crate) encoding: Encoding, + pub(crate) timestamp: Option, + pub(crate) qos: QoS, #[cfg(feature = "unstable")] - ///
- /// 🔬 - /// This API has been marked as unstable: it works as advertised, but we may change it in a future release. - /// To use it, you must enable zenoh's unstable feature flag. - ///
- /// - /// Infos on the source of this Sample. - pub source_info: SourceInfo, + pub(crate) source_info: SourceInfo, #[cfg(feature = "unstable")] - ///
- /// 🔬 - /// This API has been marked as unstable: it works as advertised, but we may change it in a future release. - /// To use it, you must enable zenoh's unstable feature flag. - ///
- /// - /// A map of key-value pairs, where each key and value are byte-slices. - pub attachment: Option, + pub(crate) attachment: Option, } impl Sample { @@ -471,19 +451,67 @@ impl Sample { self } + /// Gets the key expression on which this Sample was published. + #[inline] + pub fn key_expr(&self) -> &KeyExpr<'static> { + &self.key_expr + } + + /// Gets the payload of this Sample. + #[inline] + pub fn payload(&self) -> &Payload { + &self.payload + } + + /// Gets the kind of this Sample. + #[inline] + pub fn kind(&self) -> SampleKind { + self.kind + } + + /// Sets the kind of this Sample. + #[inline] + #[doc(hidden)] + #[zenoh_macros::unstable] + pub fn with_kind(mut self, kind: SampleKind) -> Self { + self.kind = kind; + self + } + + /// Gets the encoding of this sample + #[inline] + pub fn encoding(&self) -> &Encoding { + &self.encoding + } + /// Gets the timestamp of this Sample. #[inline] - pub fn get_timestamp(&self) -> Option<&Timestamp> { + pub fn timestamp(&self) -> Option<&Timestamp> { self.timestamp.as_ref() } /// Sets the timestamp of this Sample. #[inline] + #[doc(hidden)] + #[zenoh_macros::unstable] pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { self.timestamp = Some(timestamp); self } + /// Gets the quality of service settings this Sample was sent with. + #[inline] + pub fn qos(&self) -> &QoS { + &self.qos + } + + /// Gets infos on the source of this Sample. + #[zenoh_macros::unstable] + #[inline] + pub fn source_info(&self) -> &SourceInfo { + &self.source_info + } + /// Sets the source info of this Sample. #[zenoh_macros::unstable] #[inline] @@ -506,17 +534,22 @@ impl Sample { } } + /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. #[zenoh_macros::unstable] + #[inline] pub fn attachment(&self) -> Option<&Attachment> { self.attachment.as_ref() } + /// Gets the mutable sample attachment: a map of key-value pairs, where each key and value are byte-slices. #[zenoh_macros::unstable] + #[inline] pub fn attachment_mut(&mut self) -> &mut Option { &mut self.attachment } - #[allow(clippy::result_large_err)] + #[inline] + #[doc(hidden)] #[zenoh_macros::unstable] pub fn with_attachment(mut self, attachment: Attachment) -> Self { self.attachment = Some(attachment); diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index e276d0c6d0..d4c3257472 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -67,7 +67,7 @@ impl fmt::Debug for SubscriberState { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") -/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload) }) +/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()) }) /// .res() /// .await /// .unwrap(); @@ -100,7 +100,7 @@ pub(crate) struct SubscriberInner<'a> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") -/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) +/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) /// .pull_mode() /// .res() /// .await @@ -123,7 +123,7 @@ impl<'a> PullSubscriberInner<'a> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) + /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) /// .pull_mode() /// .res() /// .await @@ -332,7 +332,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) + /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) /// .res() /// .await /// .unwrap(); @@ -407,7 +407,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { - /// println!("Received: {} {:?}", sample.key_expr, sample.payload); + /// println!("Received: {} {:?}", sample.key_expr(), sample.payload()); /// } /// # }) /// ``` @@ -636,7 +636,7 @@ where /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { -/// println!("Received: {} {:?}", sample.key_expr, sample.payload); +/// println!("Received: {} {:?}", sample.key_expr(), sample.payload()); /// } /// # }) /// ``` diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 0e7c1c0de7..38d03b0a84 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -9,9 +9,9 @@ fn pubsub() { .callback(|sample| { println!( "{}", - std::str::from_utf8(&sample.payload.contiguous()).unwrap() + std::str::from_utf8(&sample.payload().contiguous()).unwrap() ); - for (k, v) in &sample.attachment.unwrap() { + for (k, v) in sample.attachment().unwrap() { assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)) } }) diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 0ea775784a..5823b16150 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -69,15 +69,15 @@ fn zenoh_events() { let sample = ztimeout!(sub1.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); + let key_expr = sample.as_ref().unwrap().key_expr().as_str(); assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Put); + assert!(sample.as_ref().unwrap().kind() == SampleKind::Put); let sample = ztimeout!(sub2.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); + let key_expr = sample.as_ref().unwrap().key_expr().as_str(); assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Put); + assert!(sample.as_ref().unwrap().kind() == SampleKind::Put); let replies: Vec = ztimeout!(session .get(format!("@/session/{zid}/transport/unicast/*")) @@ -87,7 +87,7 @@ fn zenoh_events() { .collect(); assert!(replies.len() == 1); assert!(replies[0].sample.is_ok()); - let key_expr = replies[0].sample.as_ref().unwrap().key_expr.as_str(); + let key_expr = replies[0].sample.as_ref().unwrap().key_expr().as_str(); assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); let replies: Vec = ztimeout!(session @@ -98,22 +98,22 @@ fn zenoh_events() { .collect(); assert!(replies.len() == 1); assert!(replies[0].sample.is_ok()); - let key_expr = replies[0].sample.as_ref().unwrap().key_expr.as_str(); + let key_expr = replies[0].sample.as_ref().unwrap().key_expr().as_str(); assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); close_session(session2).await; let sample = ztimeout!(sub1.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); + let key_expr = sample.as_ref().unwrap().key_expr().as_str(); assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Delete); + assert!(sample.as_ref().unwrap().kind() == SampleKind::Delete); let sample = ztimeout!(sub2.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); + let key_expr = sample.as_ref().unwrap().key_expr().as_str(); assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Delete); + assert!(sample.as_ref().unwrap().kind() == SampleKind::Delete); sub2.undeclare().res().await.unwrap(); sub1.undeclare().res().await.unwrap(); diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 2a5c30e7b8..1f502138e4 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -83,9 +83,9 @@ fn downsampling_by_keyexpr_impl(egress: bool) { .callback(move |sample| { let mut count = zlock!(total_count_clone); *count += 1; - if sample.key_expr.as_str() == "test/downsamples_by_keyexp/r100" { + if sample.key_expr().as_str() == "test/downsamples_by_keyexp/r100" { zlock!(counter_r100).tick(); - } else if sample.key_expr.as_str() == "test/downsamples_by_keyexp/r50" { + } else if sample.key_expr().as_str() == "test/downsamples_by_keyexp/r50" { zlock!(counter_r50).tick(); } }) @@ -191,7 +191,7 @@ fn downsampling_by_interface_impl(egress: bool) { .callback(move |sample| { let mut count = zlock!(total_count_clone); *count += 1; - if sample.key_expr.as_str() == "test/downsamples_by_interface/r100" { + if sample.key_expr().as_str() == "test/downsamples_by_interface/r100" { zlock!(counter_r100).tick(); } }) diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 96cca533df..c55eed4bc4 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -72,14 +72,14 @@ fn zenoh_liveliness() { .res_async()) .unwrap(); let sample = ztimeout!(replies.recv_async()).unwrap().sample.unwrap(); - assert!(sample.kind == SampleKind::Put); - assert!(sample.key_expr.as_str() == "zenoh_liveliness_test"); + assert!(sample.kind() == SampleKind::Put); + assert!(sample.key_expr().as_str() == "zenoh_liveliness_test"); assert!(ztimeout!(replies.recv_async()).is_err()); let sample = ztimeout!(sub.recv_async()).unwrap(); - assert!(sample.kind == SampleKind::Put); - assert!(sample.key_expr.as_str() == "zenoh_liveliness_test"); + assert!(sample.kind() == SampleKind::Put); + assert!(sample.key_expr().as_str() == "zenoh_liveliness_test"); drop(token); diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 475d8d7a1b..24119e7b1e 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -52,13 +52,13 @@ fn pubsub() { task::sleep(SLEEP).await; ztimeout!(publisher1.put("qos").res_async()).unwrap(); - let qos = ztimeout!(subscriber.recv_async()).unwrap().qos; + let qos = ztimeout!(subscriber.recv_async()).unwrap().qos().clone(); assert_eq!(qos.priority(), Priority::DataHigh); assert_eq!(qos.congestion_control(), CongestionControl::Drop); ztimeout!(publisher2.put("qos").res_async()).unwrap(); - let qos = ztimeout!(subscriber.recv_async()).unwrap().qos; + let qos = ztimeout!(subscriber.recv_async()).unwrap().qos().clone(); assert_eq!(qos.priority(), Priority::DataLow); assert_eq!(qos.congestion_control(), CongestionControl::Block); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 82053b4f1d..06a8f5da45 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -58,7 +58,7 @@ impl Task { let sub = ztimeout!(session.declare_subscriber(ke).res_async())?; let mut counter = 0; while let Ok(sample) = sub.recv_async().await { - let recv_size = sample.payload.len(); + let recv_size = sample.payload().len(); if recv_size != *expected_size { bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); } @@ -91,7 +91,7 @@ impl Task { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { - let recv_size = sample.payload.len(); + let recv_size = sample.payload().len(); if recv_size != *expected_size { bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); } diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 077c58298d..e3f5e2df63 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -95,7 +95,7 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re let sub = ztimeout!(peer01 .declare_subscriber(key_expr) .callback(move |sample| { - assert_eq!(sample.payload.len(), size); + assert_eq!(sample.payload().len(), size); c_msgs.fetch_add(1, Ordering::Relaxed); }) .res_async()) @@ -198,8 +198,8 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { let s = s.sample.unwrap(); - assert_eq!(s.kind, SampleKind::Put); - assert_eq!(s.payload.len(), size); + assert_eq!(s.kind(), SampleKind::Put); + assert_eq!(s.payload().len(), size); cnt += 1; } } @@ -216,8 +216,8 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { let s = s.sample.unwrap(); - assert_eq!(s.kind, SampleKind::Delete); - assert_eq!(s.payload.len(), 0); + assert_eq!(s.kind(), SampleKind::Delete); + assert_eq!(s.payload().len(), 0); cnt += 1; } } diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index def0dffe33..8eb007b0c0 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -114,7 +114,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { let sub1 = ztimeout!(s01 .declare_subscriber(key_expr) .callback(move |sample| { - assert_eq!(sample.payload.len(), size); + assert_eq!(sample.payload().len(), size); c_msgs1.fetch_add(1, Ordering::Relaxed); }) .res_async()) @@ -126,7 +126,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { let sub2 = ztimeout!(s02 .declare_subscriber(key_expr) .callback(move |sample| { - assert_eq!(sample.payload.len(), size); + assert_eq!(sample.payload().len(), size); c_msgs2.fetch_add(1, Ordering::Relaxed); }) .res_async()) @@ -232,7 +232,7 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { for _ in 0..msg_count { let rs = ztimeout!(s03.get(key_expr).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - assert_eq!(s.sample.unwrap().payload.len(), size); + assert_eq!(s.sample.unwrap().payload().len(), size); cnt += 1; } } From cc68ffb8f0f3d8b429ffcdab6230d1a5cbb79a8a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 13 Mar 2024 15:10:45 +0100 Subject: [PATCH 013/598] remove Publisher::write (#819) * build plugins with default zenoh features * update documentation to the new api for keformat's generated Parsed (#783) * fix: Relax dependency requirements (#758) - async-io - unix-named-pipe - filepath - advisory-lock * feat: Improve release workflow (#756) * wip: Improve Release workflow * feat: Add DockerHub & GHCR releases * feat: Refactor checks and tests into pre-release workflow * chore: Remove crates_check.sh and crates_publish.sh * fix: Remove Dockerfile * restore SN in case of frame drops caused by congestion control (#815) * remove Publisher::write * test fix * remove unrelated changes added by rebasing --------- Co-authored-by: Pierre Avital Co-authored-by: Mahmoud Mazouz Co-authored-by: Dmitrii Bannov <104833606+yellowhatter@users.noreply.github.com> --- zenoh/src/publication.rs | 29 ++++++++--------------------- 1 file changed, 8 insertions(+), 21 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 2a1a58ebd9..f12842d081 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -350,25 +350,6 @@ impl<'a> Publisher<'a> { } } - /// Send data with [`kind`](SampleKind) (Put or Delete). - /// - /// # Examples - /// ``` - /// # async_std::task::block_on(async { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.write(SampleKind::Put, "payload").res().await.unwrap(); - /// # }) - /// ``` - pub fn write(&self, kind: SampleKind, value: IntoPayload) -> Publication - where - IntoPayload: Into, - { - self._write(kind, value.into()) - } - /// Put data. /// /// # Examples @@ -1451,11 +1432,17 @@ mod tests { let session = open(Config::default()).res().unwrap(); let sub = session.declare_subscriber(KEY_EXPR).res().unwrap(); let pub_ = session.declare_publisher(KEY_EXPR).res().unwrap(); - pub_.write(kind, VALUE).res().unwrap(); + + match kind { + SampleKind::Put => pub_.put(VALUE).res().unwrap(), + SampleKind::Delete => pub_.delete().res().unwrap(), + } let sample = sub.recv().unwrap(); assert_eq!(sample.kind, kind); - assert_eq!(sample.payload.deserialize::().unwrap(), VALUE); + if let SampleKind::Put = kind { + assert_eq!(sample.payload.deserialize::().unwrap(), VALUE); + } } sample_kind_integrity_in_publication_with(SampleKind::Put); From f3af52ac0f1787d3eff29ef82c5f00e695c249e2 Mon Sep 17 00:00:00 2001 From: Denis Biryukov Date: Wed, 13 Mar 2024 15:17:37 +0100 Subject: [PATCH 014/598] format and clippy --- examples/examples/z_get_liveliness.rs | 2 +- examples/examples/z_sub_liveliness.rs | 6 ++-- .../src/replica/align_queryable.rs | 5 +--- .../src/replica/aligner.rs | 5 +--- .../src/replica/storage.rs | 29 ++++++++----------- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/tests/qos.rs | 4 +-- 7 files changed, 21 insertions(+), 32 deletions(-) diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 036dc0ab98..66de570356 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -37,7 +37,7 @@ async fn main() { .unwrap(); while let Ok(reply) = replies.recv_async().await { match reply.sample { - Ok(sample) => println!(">> Alive token ('{}')", sample.key_expr.as_str(),), + Ok(sample) => println!(">> Alive token ('{}')", sample.key_expr().as_str(),), Err(err) => { let payload = err .payload diff --git a/examples/examples/z_sub_liveliness.rs b/examples/examples/z_sub_liveliness.rs index 52ba53875c..02e2e71ba4 100644 --- a/examples/examples/z_sub_liveliness.rs +++ b/examples/examples/z_sub_liveliness.rs @@ -46,13 +46,13 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - match sample.kind { + match sample.kind() { SampleKind::Put => println!( ">> [LivelinessSubscriber] New alive token ('{}')", - sample.key_expr.as_str()), + sample.key_expr().as_str()), SampleKind::Delete => println!( ">> [LivelinessSubscriber] Dropped token ('{}')", - sample.key_expr.as_str()), + sample.key_expr().as_str()), } }, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index fc361d77f2..32be4a5534 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -180,10 +180,7 @@ impl AlignQueryable { let entry = entry.unwrap(); result.push(AlignData::Data( OwnedKeyExpr::from(entry.key_expr().clone()), - ( - Value::from(entry), - each.timestamp, - ), + (Value::from(entry), each.timestamp), )); } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index b11a94e4f2..fb46b78082 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -142,10 +142,7 @@ impl Aligner { for sample in replies { result.insert( sample.key_expr().clone().into(), - ( - sample.timestamp().unwrap().clone(), - Value::from(sample), - ), + (*sample.timestamp().unwrap(), Value::from(sample)), ); } (result, no_err) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 895f2e1914..0708dcabd9 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -276,7 +276,7 @@ impl StorageService { } let matching_keys = if sample.key_expr().is_wild() { - self.get_matching_keys(&sample.key_expr()).await + self.get_matching_keys(sample.key_expr()).await } else { vec![sample.key_expr().clone().into()] }; @@ -309,20 +309,15 @@ impl StorageService { let Value { payload, encoding, .. } = overriding_update.data.value; - let sample_to_store = Sample::new(KeyExpr::from(k.clone()), payload) + Sample::new(KeyExpr::from(k.clone()), payload) .with_encoding(encoding) .with_timestamp(overriding_update.data.timestamp) - .with_kind(overriding_update.kind); - sample_to_store - } - None => { - let sample_to_store = - Sample::new(KeyExpr::from(k.clone()), sample.payload().clone()) - .with_encoding(sample.encoding().clone()) - .with_timestamp(sample.timestamp().unwrap().clone()) - .with_kind(sample.kind()); - sample_to_store + .with_kind(overriding_update.kind) } + None => Sample::new(KeyExpr::from(k.clone()), sample.payload().clone()) + .with_encoding(sample.encoding().clone()) + .with_timestamp(*sample.timestamp().unwrap()) + .with_kind(sample.kind()), }; let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { @@ -340,16 +335,16 @@ impl StorageService { stripped_key, Value::new(sample_to_store.payload().clone()) .with_encoding(sample_to_store.encoding().clone()), - sample_to_store.timestamp().unwrap().clone(), + *sample_to_store.timestamp().unwrap(), ) .await } SampleKind::Delete => { // register a tombstone - self.mark_tombstone(&k, sample_to_store.timestamp().unwrap().clone()) + self.mark_tombstone(&k, *sample_to_store.timestamp().unwrap()) .await; storage - .delete(stripped_key, sample_to_store.timestamp().unwrap().clone()) + .delete(stripped_key, *sample_to_store.timestamp().unwrap()) .await } }; @@ -363,7 +358,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), sample_to_store.timestamp().unwrap().clone())); + .send((k.clone(), *sample_to_store.timestamp().unwrap())); match sending { Ok(_) => (), Err(e) => { @@ -398,7 +393,7 @@ impl StorageService { // @TODO: change into a better store that does incremental writes let key = sample.key_expr().clone(); let mut wildcards = self.wildcard_updates.write().await; - let timestamp = sample.timestamp().unwrap().clone(); + let timestamp = *sample.timestamp().unwrap(); wildcards.insert( &key, Update { diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 470f795f2b..480e490fdd 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -305,7 +305,7 @@ impl MergeQueue { fn push(&mut self, sample: Sample) { if let Some(ts) = sample.timestamp() { - self.timstamped.entry(ts.clone()).or_insert(sample); + self.timstamped.entry(*ts).or_insert(sample); } else { self.untimestamped.push_back(sample); } diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 24119e7b1e..1a9df306b2 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -52,13 +52,13 @@ fn pubsub() { task::sleep(SLEEP).await; ztimeout!(publisher1.put("qos").res_async()).unwrap(); - let qos = ztimeout!(subscriber.recv_async()).unwrap().qos().clone(); + let qos = *ztimeout!(subscriber.recv_async()).unwrap().qos(); assert_eq!(qos.priority(), Priority::DataHigh); assert_eq!(qos.congestion_control(), CongestionControl::Drop); ztimeout!(publisher2.put("qos").res_async()).unwrap(); - let qos = ztimeout!(subscriber.recv_async()).unwrap().qos().clone(); + let qos = *ztimeout!(subscriber.recv_async()).unwrap().qos(); assert_eq!(qos.priority(), Priority::DataLow); assert_eq!(qos.congestion_control(), CongestionControl::Block); From 0ca41e817044e80a6c422122f46aa3e60821ce64 Mon Sep 17 00:00:00 2001 From: Denis Biryukov Date: Wed, 13 Mar 2024 15:26:19 +0100 Subject: [PATCH 015/598] mark remaining sample-mutating methods as unstable and hidden --- zenoh/src/sample.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 1ac04313ab..9b9c55822e 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -520,10 +520,12 @@ impl Sample { self } - #[inline] /// Ensure that an associated Timestamp is present in this Sample. /// If not, a new one is created with the current system time and 0x00 as id. /// Get the timestamp of this sample (either existing one or newly created) + #[inline] + #[doc(hidden)] + #[zenoh_macros::unstable] pub fn ensure_timestamp(&mut self) -> &Timestamp { if let Some(ref timestamp) = self.timestamp { timestamp @@ -542,8 +544,9 @@ impl Sample { } /// Gets the mutable sample attachment: a map of key-value pairs, where each key and value are byte-slices. - #[zenoh_macros::unstable] #[inline] + #[doc(hidden)] + #[zenoh_macros::unstable] pub fn attachment_mut(&mut self) -> &mut Option { &mut self.attachment } From ea7179f789dba510c0e2070188a374768850c76e Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 14 Mar 2024 12:21:03 +0100 Subject: [PATCH 016/598] Add express support in the pipeline --- commons/zenoh-protocol/src/network/mod.rs | 16 +++++++++++++-- io/zenoh-transport/src/common/pipeline.rs | 24 ++++++++++++++--------- 2 files changed, 29 insertions(+), 11 deletions(-) diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index 6af7fef243..0e198ddf0f 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -110,6 +110,18 @@ impl NetworkMessage { true } + #[inline] + pub fn is_express(&self) -> bool { + match &self.body { + NetworkBody::Push(msg) => msg.ext_qos.is_express(), + NetworkBody::Request(msg) => msg.ext_qos.is_express(), + NetworkBody::Response(msg) => msg.ext_qos.is_express(), + NetworkBody::ResponseFinal(msg) => msg.ext_qos.is_express(), + NetworkBody::Declare(msg) => msg.ext_qos.is_express(), + NetworkBody::OAM(msg) => msg.ext_qos.is_express(), + } + } + #[inline] pub fn is_droppable(&self) -> bool { if !self.is_reliable() { @@ -117,11 +129,11 @@ impl NetworkMessage { } let cc = match &self.body { - NetworkBody::Declare(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Push(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Request(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Response(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::ResponseFinal(msg) => msg.ext_qos.get_congestion_control(), + NetworkBody::Declare(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::OAM(msg) => msg.ext_qos.get_congestion_control(), }; @@ -131,11 +143,11 @@ impl NetworkMessage { #[inline] pub fn priority(&self) -> Priority { match &self.body { - NetworkBody::Declare(msg) => msg.ext_qos.get_priority(), NetworkBody::Push(msg) => msg.ext_qos.get_priority(), NetworkBody::Request(msg) => msg.ext_qos.get_priority(), NetworkBody::Response(msg) => msg.ext_qos.get_priority(), NetworkBody::ResponseFinal(msg) => msg.ext_qos.get_priority(), + NetworkBody::Declare(msg) => msg.ext_qos.get_priority(), NetworkBody::OAM(msg) => msg.ext_qos.get_priority(), } } diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index 3968eabdf5..516834fa41 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -161,12 +161,18 @@ impl StageIn { } macro_rules! zretok { - ($batch:expr) => {{ - let bytes = $batch.len(); - *c_guard = Some($batch); - drop(c_guard); - self.s_out.notify(bytes); - return true; + ($batch:expr, $msg:expr) => {{ + if $msg.is_express() { + // Move out existing batch + self.s_out.move_batch($batch); + return true; + } else { + let bytes = $batch.len(); + *c_guard = Some($batch); + drop(c_guard); + self.s_out.notify(bytes); + return true; + } }}; } @@ -174,7 +180,7 @@ impl StageIn { let mut batch = zgetbatch_rets!(false); // Attempt the serialization on the current batch let e = match batch.encode(&*msg) { - Ok(_) => zretok!(batch), + Ok(_) => zretok!(batch, msg), Err(e) => e, }; @@ -194,7 +200,7 @@ impl StageIn { if let BatchError::NewFrame = e { // Attempt a serialization with a new frame if batch.encode((&*msg, &frame)).is_ok() { - zretok!(batch); + zretok!(batch, msg); } } @@ -206,7 +212,7 @@ impl StageIn { // Attempt a second serialization on fully empty batch if batch.encode((&*msg, &frame)).is_ok() { - zretok!(batch); + zretok!(batch, msg); } // The second serialization attempt has failed. This means that the message is From 62bf7d3c12d1e4bf56375a6af7a6bd9ebdf8e81a Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 14 Mar 2024 12:34:42 +0100 Subject: [PATCH 017/598] Add express support to publisher and put --- zenoh/src/publication.rs | 24 ++++++++++++++++++++++-- zenoh/src/session.rs | 2 ++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index f12842d081..75d4ddc2b7 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -102,6 +102,13 @@ impl PutBuilder<'_, '_> { self } + /// Change the `congestion_control` to apply when routing the data. + #[inline] + pub fn express(mut self, is_express: bool) -> Self { + self.publisher = self.publisher.express(is_express); + self + } + /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] @@ -141,6 +148,7 @@ impl SyncResolve for PutBuilder<'_, '_> { key_expr, congestion_control, priority, + is_express, destination, } = self.publisher; @@ -151,6 +159,7 @@ impl SyncResolve for PutBuilder<'_, '_> { key_expr: key_expr?, congestion_control, priority, + is_express, destination, }; @@ -248,6 +257,7 @@ pub struct Publisher<'a> { pub(crate) key_expr: KeyExpr<'a>, pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, + pub(crate) is_express: bool, pub(crate) destination: Locality, } @@ -738,6 +748,7 @@ pub struct PublisherBuilder<'a, 'b: 'a> { pub(crate) key_expr: ZResult>, pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, + pub(crate) is_express: bool, pub(crate) destination: Locality, } @@ -751,6 +762,7 @@ impl<'a, 'b> Clone for PublisherBuilder<'a, 'b> { }, congestion_control: self.congestion_control, priority: self.priority, + is_express: self.is_express, destination: self.destination, } } @@ -771,6 +783,13 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { self } + /// Change the `congestion_control` to apply when routing the data. + #[inline] + pub fn express(mut self, is_express: bool) -> Self { + self.is_express = is_express; + self + } + /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] @@ -830,6 +849,7 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { key_expr, congestion_control: self.congestion_control, priority: self.priority, + is_express: self.is_express, destination: self.destination, }; log::trace!("publish({:?})", publisher.key_expr); @@ -867,7 +887,7 @@ fn resolve_put( ext_qos: ext::QoSType::new( publisher.priority.into(), publisher.congestion_control, - false, + publisher.is_express, ), ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -933,7 +953,7 @@ fn resolve_put( qos: QoS::from(ext::QoSType::new( publisher.priority.into(), publisher.congestion_control, - false, + publisher.is_express, )), }; diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 4c303ae974..5e706a0da8 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -337,6 +337,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { key_expr: key_expr.try_into().map_err(Into::into), congestion_control: CongestionControl::DEFAULT, priority: Priority::DEFAULT, + is_express: false, destination: Locality::default(), } } @@ -1909,6 +1910,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { key_expr: key_expr.try_into().map_err(Into::into), congestion_control: CongestionControl::DEFAULT, priority: Priority::DEFAULT, + is_express: false, destination: Locality::default(), } } From 4d8ec6ca2d2f326c02af5aa71a92e68200dd2ba0 Mon Sep 17 00:00:00 2001 From: Gabriele Baldoni Date: Thu, 14 Mar 2024 18:34:55 +0000 Subject: [PATCH 018/598] fix(828): ensuring valid JSON response from REST API (#831) * fix(828): ensuring valid JSON response from REST API Signed-off-by: gabrik * fix(828): improved JSON format conversion Signed-off-by: gabrik * chore: addressing comments Signed-off-by: gabrik * fix(828): added 'into_string' for StringOrBase64 Signed-off-by: gabrik * chore: address comments Signed-off-by: gabrik --------- Signed-off-by: gabrik --- plugins/zenoh-plugin-rest/src/lib.rs | 85 ++++++++++++++++++---------- zenoh/src/payload.rs | 8 +++ 2 files changed, 64 insertions(+), 29 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 1a99d7b5a4..39225b5d25 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -18,9 +18,10 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use async_std::prelude::FutureExt; -use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; +use base64::Engine; use futures::StreamExt; use http_types::Method; +use serde::{Deserialize, Serialize}; use std::borrow::Cow; use std::convert::TryFrom; use std::str::FromStr; @@ -28,6 +29,7 @@ use std::sync::Arc; use tide::http::Mime; use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; +use zenoh::payload::StringOrBase64; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; @@ -46,36 +48,57 @@ lazy_static::lazy_static! { } const RAW_KEY: &str = "_raw"; -fn payload_to_json(payload: Payload) -> String { - payload - .deserialize::() - .unwrap_or_else(|_| format!(r#""{}""#, b64_std_engine.encode(payload.contiguous()))) +#[derive(Serialize, Deserialize)] +struct JSONSample { + key: String, + value: serde_json::Value, + encoding: String, + time: Option, } -fn sample_to_json(sample: Sample) -> String { - format!( - r#"{{ "key": "{}", "value": {}, "encoding": "{}", "time": "{}" }}"#, - sample.key_expr.as_str(), - payload_to_json(sample.payload), - sample.encoding, - if let Some(ts) = sample.timestamp { - ts.to_string() - } else { - "None".to_string() +pub fn base64_encode(data: &[u8]) -> String { + use base64::engine::general_purpose; + general_purpose::STANDARD.encode(data) +} + +fn payload_to_json(payload: Payload, encoding: &Encoding) -> serde_json::Value { + match payload.is_empty() { + // If the value is empty return a JSON null + true => serde_json::Value::Null, + // if it is not check the encoding + false => { + match encoding { + // If it is a JSON try to deserialize as json, if it fails fallback to base64 + &Encoding::APPLICATION_JSON | &Encoding::TEXT_JSON | &Encoding::TEXT_JSON5 => { + serde_json::from_slice::(&payload.contiguous()).unwrap_or( + serde_json::Value::String(StringOrBase64::from(payload).into_string()), + ) + } + // otherwise convert to JSON string + _ => serde_json::Value::String(StringOrBase64::from(payload).into_string()), + } } - ) + } } -fn result_to_json(sample: Result) -> String { +fn sample_to_json(sample: Sample) -> JSONSample { + JSONSample { + key: sample.key_expr.as_str().to_string(), + value: payload_to_json(sample.payload, &sample.encoding), + encoding: sample.encoding.to_string(), + time: sample.timestamp.map(|ts| ts.to_string()), + } +} + +fn result_to_json(sample: Result) -> JSONSample { match sample { Ok(sample) => sample_to_json(sample), - Err(err) => { - format!( - r#"{{ "key": "ERROR", "value": {}, "encoding": "{}"}}"#, - payload_to_json(err.payload), - err.encoding, - ) - } + Err(err) => JSONSample { + key: "ERROR".into(), + value: payload_to_json(err.payload, &err.encoding), + encoding: err.encoding.to_string(), + time: None, + }, } } @@ -83,10 +106,10 @@ async fn to_json(results: flume::Receiver) -> String { let values = results .stream() .filter_map(move |reply| async move { Some(result_to_json(reply.sample)) }) - .collect::>() - .await - .join(",\n"); - format!("[\n{values}\n]\n") + .collect::>() + .await; + + serde_json::to_string(&values).unwrap_or("[]".into()) } async fn to_json_response(results: flume::Receiver) -> Response { @@ -321,8 +344,12 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result String { + match self { + StringOrBase64::String(s) | StringOrBase64::Base64(s) => s, + } + } +} + impl Deref for StringOrBase64 { type Target = String; From 622b230286ca37899f768b24b865e18669c2b0c1 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 15 Mar 2024 10:12:14 +0100 Subject: [PATCH 019/598] Add express support (#829) * Improve docs * Add express to examples * Fix doc in sample.rs --- examples/examples/z_ping.rs | 9 +++++++-- examples/examples/z_pong.rs | 10 +++++++--- examples/examples/z_pub_thr.rs | 4 ++++ zenoh/src/publication.rs | 8 ++++++-- zenoh/src/sample.rs | 2 +- 5 files changed, 25 insertions(+), 8 deletions(-) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index cb6fecd81a..a57c937e48 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -22,7 +22,7 @@ fn main() { // initiate logging env_logger::init(); - let (config, warmup, size, n) = parse_args(); + let (config, warmup, size, n, express) = parse_args(); let session = zenoh::open(config).res().unwrap(); // The key expression to publish data on @@ -35,6 +35,7 @@ fn main() { let publisher = session .declare_publisher(key_expr_ping) .congestion_control(CongestionControl::Block) + .express(express) .res() .unwrap(); @@ -78,6 +79,9 @@ fn main() { #[derive(Parser)] struct Args { + /// express for sending data + #[arg(long, default_value = "false")] + no_express: bool, #[arg(short, long, default_value = "1")] /// The number of seconds to warm up (float) warmup: f64, @@ -90,12 +94,13 @@ struct Args { common: CommonArgs, } -fn parse_args() -> (Config, Duration, usize, usize) { +fn parse_args() -> (Config, Duration, usize, usize, bool) { let args = Args::parse(); ( args.common.into(), Duration::from_secs_f64(args.warmup), args.payload_size, args.samples, + !args.no_express, ) } diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index c2412b6d37..576ef232e5 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -21,7 +21,7 @@ fn main() { // initiate logging env_logger::init(); - let config = parse_args(); + let (config, express) = parse_args(); let session = zenoh::open(config).res().unwrap().into_arc(); @@ -34,6 +34,7 @@ fn main() { let publisher = session .declare_publisher(key_expr_pong) .congestion_control(CongestionControl::Block) + .express(express) .res() .unwrap(); @@ -47,11 +48,14 @@ fn main() { #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] struct Args { + /// express for sending data + #[arg(long, default_value = "false")] + no_express: bool, #[command(flatten)] common: CommonArgs, } -fn parse_args() -> Config { +fn parse_args() -> (Config, bool) { let args = Args::parse(); - args.common.into() + (args.common.into(), !args.no_express) } diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index c042b2e7a2..4354ad2e68 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -41,6 +41,7 @@ fn main() { .declare_publisher("test/thr") .congestion_control(CongestionControl::Block) .priority(prio) + .express(args.express) .res() .unwrap(); @@ -65,6 +66,9 @@ fn main() { #[derive(Parser, Clone, PartialEq, Eq, Hash, Debug)] struct Args { + /// express for sending data + #[arg(long, default_value = "false")] + express: bool, /// Priority for sending data #[arg(short, long)] priority: Option, diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 75d4ddc2b7..1531cab606 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -102,7 +102,9 @@ impl PutBuilder<'_, '_> { self } - /// Change the `congestion_control` to apply when routing the data. + /// Change the `express` policy to apply when routing the data. + /// When express is set to `true`, then the message will not be batched. + /// This usually has a positive impact on latency but negative impact on throughput. #[inline] pub fn express(mut self, is_express: bool) -> Self { self.publisher = self.publisher.express(is_express); @@ -783,7 +785,9 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { self } - /// Change the `congestion_control` to apply when routing the data. + /// Change the `express` policy to apply when routing the data. + /// When express is set to `true`, then the message will not be batched. + /// This usually has a positive impact on latency but negative impact on throughput. #[inline] pub fn express(mut self, is_express: bool) -> Self { self.is_express = is_express; diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 9c68b460d9..36ebeeb129 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -556,7 +556,7 @@ impl QoS { self.inner.get_congestion_control() } - /// Gets express flag value. If true, the message is not batched during transmission, in order to reduce latency. + /// Gets express flag value. If `true`, the message is not batched during transmission, in order to reduce latency. pub fn express(&self) -> bool { self.inner.is_express() } From d73da7d70fef25d76bf94945792da0f0adffed0b Mon Sep 17 00:00:00 2001 From: Denis Biryukov Date: Fri, 15 Mar 2024 11:22:00 +0100 Subject: [PATCH 020/598] clippy --- plugins/zenoh-plugin-rest/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index e0df8f286b..e2718f6579 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -344,7 +344,6 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result Date: Mon, 18 Mar 2024 13:20:01 +0100 Subject: [PATCH 021/598] Rename IntoCallbackReceiver trait to IntoHandler trait (#816) --- zenoh-ext/src/querying_subscriber.rs | 36 ++++++++--------- zenoh/src/handlers.rs | 60 ++++++++++++++++------------ zenoh/src/liveliness.rs | 40 +++++++++---------- zenoh/src/prelude.rs | 4 +- zenoh/src/publication.rs | 22 +++++----- zenoh/src/query.rs | 20 +++++----- zenoh/src/queryable.rs | 22 +++++----- zenoh/src/scouting.rs | 22 +++++----- zenoh/src/subscriber.rs | 42 +++++++++---------- 9 files changed, 138 insertions(+), 130 deletions(-) diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 480e490fdd..8cb5480e58 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -105,7 +105,7 @@ impl<'a, 'b, KeySpace> QueryingSubscriberBuilder<'a, 'b, KeySpace, DefaultHandle handler: Handler, ) -> QueryingSubscriberBuilder<'a, 'b, KeySpace, Handler> where - Handler: zenoh::prelude::IntoCallbackReceiverPair<'static, Sample>, + Handler: zenoh::prelude::IntoHandler<'static, Sample>, { let QueryingSubscriberBuilder { session, @@ -214,17 +214,17 @@ impl<'a, 'b, KeySpace, Handler> QueryingSubscriberBuilder<'a, 'b, KeySpace, Hand impl<'a, KeySpace, Handler> Resolvable for QueryingSubscriberBuilder<'a, '_, KeySpace, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample>, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample>, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } impl SyncResolve for QueryingSubscriberBuilder<'_, '_, KeySpace, Handler> where KeySpace: Into + Clone, - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { fn res_sync(self) -> ::To { let session = self.session.clone(); @@ -272,8 +272,8 @@ where impl<'a, KeySpace, Handler> AsyncResolve for QueryingSubscriberBuilder<'a, '_, KeySpace, Handler> where KeySpace: Into + Clone, - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { type Future = Ready; @@ -462,7 +462,7 @@ where handler: Handler, ) -> FetchingSubscriberBuilder<'a, 'b, KeySpace, Handler, Fetch, TryIntoSample> where - Handler: zenoh::prelude::IntoCallbackReceiverPair<'static, Sample>, + Handler: zenoh::prelude::IntoHandler<'static, Sample>, { let FetchingSubscriberBuilder { session, @@ -536,11 +536,11 @@ impl< TryIntoSample, > Resolvable for FetchingSubscriberBuilder<'a, '_, KeySpace, Handler, Fetch, TryIntoSample> where - Handler: IntoCallbackReceiverPair<'static, Sample>, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample>, + Handler::Handler: Send, TryIntoSample: ExtractSample, { - type To = ZResult>; + type To = ZResult>; } impl< @@ -551,8 +551,8 @@ impl< > SyncResolve for FetchingSubscriberBuilder<'_, '_, KeySpace, Handler, Fetch, TryIntoSample> where KeySpace: Into, - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, TryIntoSample: ExtractSample + Send + Sync, { fn res_sync(self) -> ::To { @@ -569,8 +569,8 @@ impl< > AsyncResolve for FetchingSubscriberBuilder<'a, '_, KeySpace, Handler, Fetch, TryIntoSample> where KeySpace: Into, - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, TryIntoSample: ExtractSample + Send + Sync, { type Future = Ready; @@ -643,14 +643,14 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { ) -> ZResult where KeySpace: Into, - Handler: IntoCallbackReceiverPair<'static, Sample, Receiver = Receiver> + Send, + Handler: IntoHandler<'static, Sample, Handler = Receiver> + Send, TryIntoSample: ExtractSample + Send + Sync, { let state = Arc::new(Mutex::new(InnerState { pending_fetches: 0, merge_queue: MergeQueue::new(), })); - let (callback, receiver) = conf.handler.into_cb_receiver_pair(); + let (callback, receiver) = conf.handler.into_handler(); let sub_callback = { let state = state.clone(); diff --git a/zenoh/src/handlers.rs b/zenoh/src/handlers.rs index 69828a5d7f..e5ec3bb0dc 100644 --- a/zenoh/src/handlers.rs +++ b/zenoh/src/handlers.rs @@ -17,34 +17,36 @@ use crate::API_DATA_RECEPTION_CHANNEL_SIZE; /// An alias for `Arc`. pub type Dyn = std::sync::Arc; + /// An immutable callback function. pub type Callback<'a, T> = Dyn; -/// A type that can be converted into a [`Callback`]-receiver pair. +/// A type that can be converted into a [`Callback`]-handler pair. /// /// When Zenoh functions accept types that implement these, it intends to use the [`Callback`] as just that, -/// while granting you access to the receiver through the returned value via [`std::ops::Deref`] and [`std::ops::DerefMut`]. +/// while granting you access to the handler through the returned value via [`std::ops::Deref`] and [`std::ops::DerefMut`]. /// /// Any closure that accepts `T` can be converted into a pair of itself and `()`. -pub trait IntoCallbackReceiverPair<'a, T> { - type Receiver; - fn into_cb_receiver_pair(self) -> (Callback<'a, T>, Self::Receiver); +pub trait IntoHandler<'a, T> { + type Handler; + + fn into_handler(self) -> (Callback<'a, T>, Self::Handler); } -impl<'a, T, F> IntoCallbackReceiverPair<'a, T> for F + +impl<'a, T, F> IntoHandler<'a, T> for F where F: Fn(T) + Send + Sync + 'a, { - type Receiver = (); - fn into_cb_receiver_pair(self) -> (Callback<'a, T>, Self::Receiver) { + type Handler = (); + fn into_handler(self) -> (Callback<'a, T>, Self::Handler) { (Dyn::from(self), ()) } } -impl IntoCallbackReceiverPair<'static, T> - for (flume::Sender, flume::Receiver) -{ - type Receiver = flume::Receiver; - fn into_cb_receiver_pair(self) -> (Callback<'static, T>, Self::Receiver) { +impl IntoHandler<'static, T> for (flume::Sender, flume::Receiver) { + type Handler = flume::Receiver; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { let (sender, receiver) = self; ( Dyn::new(move |t| { @@ -56,18 +58,24 @@ impl IntoCallbackReceiverPair<'static, T> ) } } + +/// The default handler in Zenoh is a FIFO queue. pub struct DefaultHandler; -impl IntoCallbackReceiverPair<'static, T> for DefaultHandler { - type Receiver = flume::Receiver; - fn into_cb_receiver_pair(self) -> (Callback<'static, T>, Self::Receiver) { - flume::bounded(*API_DATA_RECEPTION_CHANNEL_SIZE).into_cb_receiver_pair() + +impl IntoHandler<'static, T> for DefaultHandler { + type Handler = flume::Receiver; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + flume::bounded(*API_DATA_RECEPTION_CHANNEL_SIZE).into_handler() } } -impl IntoCallbackReceiverPair<'static, T> + +impl IntoHandler<'static, T> for (std::sync::mpsc::SyncSender, std::sync::mpsc::Receiver) { - type Receiver = std::sync::mpsc::Receiver; - fn into_cb_receiver_pair(self) -> (Callback<'static, T>, Self::Receiver) { + type Handler = std::sync::mpsc::Receiver; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { let (sender, receiver) = self; ( Dyn::new(move |t| { @@ -96,7 +104,7 @@ pub fn locked(fnmut: impl FnMut(T)) -> impl Fn(T) { /// - `callback` will never be called once `drop` has started. /// - `drop` will only be called **once**, and **after every** `callback` has ended. /// - The two previous guarantees imply that `call` and `drop` are never called concurrently. -pub struct CallbackPair +pub struct CallbackDrop where DropFn: FnMut() + Send + Sync + 'static, { @@ -104,7 +112,7 @@ where pub drop: DropFn, } -impl Drop for CallbackPair +impl Drop for CallbackDrop where DropFn: FnMut() + Send + Sync + 'static, { @@ -113,14 +121,14 @@ where } } -impl<'a, OnEvent, Event, DropFn> IntoCallbackReceiverPair<'a, Event> - for CallbackPair +impl<'a, OnEvent, Event, DropFn> IntoHandler<'a, Event> for CallbackDrop where OnEvent: Fn(Event) + Send + Sync + 'a, DropFn: FnMut() + Send + Sync + 'static, { - type Receiver = (); - fn into_cb_receiver_pair(self) -> (Callback<'a, Event>, Self::Receiver) { + type Handler = (); + + fn into_handler(self) -> (Callback<'a, Event>, Self::Handler) { (Dyn::from(move |evt| (self.callback)(evt)), ()) } } diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index d4229db4cc..4103504f13 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -484,7 +484,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run @@ -507,7 +507,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { #[zenoh_macros::unstable] pub fn with(self, handler: Handler) -> LivelinessSubscriberBuilder<'a, 'b, Handler> where - Handler: crate::handlers::IntoCallbackReceiverPair<'static, Sample>, + Handler: crate::handlers::IntoHandler<'static, Sample>, { let LivelinessSubscriberBuilder { session, @@ -525,23 +525,23 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { #[zenoh_macros::unstable] impl<'a, Handler> Resolvable for LivelinessSubscriberBuilder<'a, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } #[zenoh_macros::unstable] impl<'a, Handler> SyncResolve for LivelinessSubscriberBuilder<'a, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { #[zenoh_macros::unstable] fn res_sync(self) -> ::To { let key_expr = self.key_expr?; let session = self.session; - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); session .declare_subscriber_inner( &key_expr, @@ -564,8 +564,8 @@ where #[zenoh_macros::unstable] impl<'a, Handler> AsyncResolve for LivelinessSubscriberBuilder<'a, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { type Future = Ready; @@ -677,7 +677,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ``` @@ -700,7 +700,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { #[inline] pub fn with(self, handler: Handler) -> LivelinessGetBuilder<'a, 'b, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply>, + Handler: IntoHandler<'static, Reply>, { let LivelinessGetBuilder { session, @@ -728,19 +728,19 @@ impl<'a, 'b, Handler> LivelinessGetBuilder<'a, 'b, Handler> { impl Resolvable for LivelinessGetBuilder<'_, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, { - type To = ZResult; + type To = ZResult; } impl SyncResolve for LivelinessGetBuilder<'_, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, { fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); self.session .query( @@ -761,8 +761,8 @@ where impl AsyncResolve for LivelinessGetBuilder<'_, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, { type Future = Ready; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 177906e9b1..26c93e1801 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -37,11 +37,11 @@ pub(crate) mod common { pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; pub use crate::config::{self, Config, ValidatedMap}; - pub use crate::handlers::IntoCallbackReceiverPair; + pub use crate::handlers::IntoHandler; + pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::session::{Session, SessionDeclarations}; pub use crate::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; - pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::encoding::Encoding; /// The encoding of a zenoh `Value`. diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 1531cab606..392c0bf8c1 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -22,7 +22,7 @@ use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] use crate::{ - handlers::{Callback, DefaultHandler, IntoCallbackReceiverPair}, + handlers::{Callback, DefaultHandler, IntoHandler}, Id, }; use std::future::Ready; @@ -1180,7 +1180,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { self.callback(crate::handlers::locked(callback)) } - /// Receive the MatchingStatuses for this listener with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the MatchingStatuses for this listener with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run @@ -1208,7 +1208,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { #[zenoh_macros::unstable] pub fn with(self, handler: Handler) -> MatchingListenerBuilder<'a, Handler> where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, MatchingStatus>, + Handler: crate::prelude::IntoHandler<'static, MatchingStatus>, { let MatchingListenerBuilder { publisher, @@ -1221,21 +1221,21 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { #[zenoh_macros::unstable] impl<'a, Handler> Resolvable for MatchingListenerBuilder<'a, Handler> where - Handler: IntoCallbackReceiverPair<'static, MatchingStatus> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, MatchingStatus> + Send, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } #[zenoh_macros::unstable] impl<'a, Handler> SyncResolve for MatchingListenerBuilder<'a, Handler> where - Handler: IntoCallbackReceiverPair<'static, MatchingStatus> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, MatchingStatus> + Send, + Handler::Handler: Send, { #[zenoh_macros::unstable] fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); self.publisher .session .declare_matches_listener_inner(&self.publisher, callback) @@ -1253,8 +1253,8 @@ where #[zenoh_macros::unstable] impl<'a, Handler> AsyncResolve for MatchingListenerBuilder<'a, Handler> where - Handler: IntoCallbackReceiverPair<'static, MatchingStatus> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, MatchingStatus> + Send, + Handler::Handler: Send, { type Future = Ready; diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index a848913c7a..fe48748ad4 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -209,7 +209,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ``` @@ -231,7 +231,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { #[inline] pub fn with(self, handler: Handler) -> GetBuilder<'a, 'b, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply>, + Handler: IntoHandler<'static, Reply>, { let GetBuilder { session, @@ -362,19 +362,19 @@ impl Default for ReplyKeyExpr { impl Resolvable for GetBuilder<'_, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, { - type To = ZResult; + type To = ZResult; } impl SyncResolve for GetBuilder<'_, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, { fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); self.session .query( @@ -395,8 +395,8 @@ where impl AsyncResolve for GetBuilder<'_, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, { type Future = Ready; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index d98df046b7..d2eabcdc2a 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -599,7 +599,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the queries for this Queryable with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the queries for this Queryable with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run @@ -621,7 +621,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { #[inline] pub fn with(self, handler: Handler) -> QueryableBuilder<'a, 'b, Handler> where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Query>, + Handler: crate::prelude::IntoHandler<'static, Query>, { let QueryableBuilder { session, @@ -657,7 +657,7 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { } } -/// A queryable that provides data through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). +/// A queryable that provides data through a [`Handler`](crate::prelude::IntoHandler). /// /// Queryables can be created from a zenoh [`Session`] /// with the [`declare_queryable`](crate::Session::declare_queryable) function @@ -740,20 +740,20 @@ impl Deref for Queryable<'_, Receiver> { impl<'a, Handler> Resolvable for QueryableBuilder<'a, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Query> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Query> + Send, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } impl<'a, Handler> SyncResolve for QueryableBuilder<'a, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Query> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Query> + Send, + Handler::Handler: Send, { fn res_sync(self) -> ::To { let session = self.session; - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); session .declare_queryable_inner( &self.key_expr?.to_wire(&session), @@ -774,8 +774,8 @@ where impl<'a, Handler> AsyncResolve for QueryableBuilder<'a, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Query> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Query> + Send, + Handler::Handler: Send, { type Future = Ready; diff --git a/zenoh/src/scouting.rs b/zenoh/src/scouting.rs index ea09823ea1..470e2f1c61 100644 --- a/zenoh/src/scouting.rs +++ b/zenoh/src/scouting.rs @@ -115,7 +115,7 @@ impl ScoutBuilder { self.callback(locked(callback)) } - /// Receive the [`Hello`] messages from this scout with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the [`Hello`] messages from this scout with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run @@ -136,7 +136,7 @@ impl ScoutBuilder { #[inline] pub fn with(self, handler: Handler) -> ScoutBuilder where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello>, + Handler: crate::prelude::IntoHandler<'static, Hello>, { let ScoutBuilder { what, @@ -153,27 +153,27 @@ impl ScoutBuilder { impl Resolvable for ScoutBuilder where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello> + Send, - Handler::Receiver: Send, + Handler: crate::prelude::IntoHandler<'static, Hello> + Send, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } impl SyncResolve for ScoutBuilder where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello> + Send, - Handler::Receiver: Send, + Handler: crate::prelude::IntoHandler<'static, Hello> + Send, + Handler::Handler: Send, { fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); scout(self.what, self.config?, callback).map(|scout| Scout { scout, receiver }) } } impl AsyncResolve for ScoutBuilder where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello> + Send, - Handler::Receiver: Send, + Handler: crate::prelude::IntoHandler<'static, Hello> + Send, + Handler::Handler: Send, { type Future = Ready; @@ -231,7 +231,7 @@ impl fmt::Debug for ScoutInner { } } -/// A scout that returns [`Hello`] messages through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). +/// A scout that returns [`Hello`] messages through a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index d4c3257472..413c9201f2 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -13,7 +13,7 @@ // //! Subscribing primitives. -use crate::handlers::{locked, Callback, DefaultHandler, IntoCallbackReceiverPair}; +use crate::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::key_expr::KeyExpr; use crate::prelude::Locality; use crate::sample::Sample; @@ -392,7 +392,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run @@ -414,7 +414,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { #[inline] pub fn with(self, handler: Handler) -> SubscriberBuilder<'a, 'b, Mode, Handler> where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Sample>, + Handler: crate::prelude::IntoHandler<'static, Sample>, { let SubscriberBuilder { session, @@ -511,21 +511,21 @@ impl<'a, 'b, Mode, Handler> SubscriberBuilder<'a, 'b, Mode, Handler> { // Push mode impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, PushMode, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, PushMode, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { fn res_sync(self) -> ::To { let key_expr = self.key_expr?; let session = self.session; - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); session .declare_subscriber_inner( &key_expr, @@ -550,8 +550,8 @@ where impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, PushMode, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { type Future = Ready; @@ -563,21 +563,21 @@ where // Pull mode impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, PullMode, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, PullMode, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { fn res_sync(self) -> ::To { let key_expr = self.key_expr?; let session = self.session; - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); session .declare_subscriber_inner( &key_expr, @@ -604,8 +604,8 @@ where impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, PullMode, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { type Future = Ready; @@ -614,7 +614,7 @@ where } } -/// A subscriber that provides data through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). +/// A subscriber that provides data through a [`Handler`](crate::prelude::IntoHandler). /// /// Subscribers can be created from a zenoh [`Session`](crate::Session) /// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function @@ -647,7 +647,7 @@ pub struct Subscriber<'a, Receiver> { pub receiver: Receiver, } -/// A [`PullMode`] subscriber that provides data through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). +/// A [`PullMode`] subscriber that provides data through a [`Handler`](crate::prelude::IntoHandler). /// /// PullSubscribers only provide data when explicitely pulled by the /// application with the [`pull`](PullSubscriber::pull) function. From 665c90f0b326b125658267b2e50c4a6b43b3a42a Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 18 Mar 2024 18:10:32 +0100 Subject: [PATCH 022/598] Optimize zint encode/decode (#838) * Rebase on protocol_changes * Fix rebase conflict --- commons/zenoh-codec/src/core/zint.rs | 52 ++++++++++++++++------------ commons/zenoh-codec/tests/codec.rs | 21 ++++++++++- 2 files changed, 50 insertions(+), 23 deletions(-) diff --git a/commons/zenoh-codec/src/core/zint.rs b/commons/zenoh-codec/src/core/zint.rs index 1c2f5a28e4..0daff7348b 100644 --- a/commons/zenoh-codec/src/core/zint.rs +++ b/commons/zenoh-codec/src/core/zint.rs @@ -17,7 +17,7 @@ use zenoh_buffers::{ writer::{DidntWrite, Writer}, }; -const VLE_LEN: usize = 10; +const VLE_LEN: usize = 9; impl LCodec for Zenoh080 { fn w_len(self, x: u64) -> usize { @@ -29,7 +29,6 @@ impl LCodec for Zenoh080 { const B6: u64 = u64::MAX << (7 * 6); const B7: u64 = u64::MAX << (7 * 7); const B8: u64 = u64::MAX << (7 * 8); - const B9: u64 = u64::MAX << (7 * 9); if (x & B1) == 0 { 1 @@ -47,10 +46,8 @@ impl LCodec for Zenoh080 { 7 } else if (x & B8) == 0 { 8 - } else if (x & B9) == 0 { - 9 } else { - 10 + 9 } } } @@ -112,15 +109,31 @@ where fn write(self, writer: &mut W, mut x: u64) -> Self::Output { writer.with_slot(VLE_LEN, move |buffer| { let mut len = 0; - let mut b = x as u8; - while x > 0x7f { - buffer[len] = b | 0x80; + while (x & !0x7f_u64) != 0 { + // SAFETY: buffer is guaranteed to be VLE_LEN long where VLE_LEN is + // the maximum number of bytes a VLE can take once encoded. + // I.e.: x is shifted 7 bits to the right every iteration, + // the loop is at most VLE_LEN iterations. + unsafe { + *buffer.get_unchecked_mut(len) = (x as u8) | 0x80_u8; + } len += 1; x >>= 7; - b = x as u8; } - buffer[len] = b; - len + 1 + // In case len == VLE_LEN then all the bits have already been written in the latest iteration. + // Else we haven't written all the necessary bytes yet. + if len != VLE_LEN { + // SAFETY: buffer is guaranteed to be VLE_LEN long where VLE_LEN is + // the maximum number of bytes a VLE can take once encoded. + // I.e.: x is shifted 7 bits to the right every iteration, + // the loop is at most VLE_LEN iterations. + unsafe { + *buffer.get_unchecked_mut(len) = x as u8; + } + len += 1; + } + // The number of written bytes + len })?; Ok(()) } @@ -137,19 +150,14 @@ where let mut v = 0; let mut i = 0; - let mut k = VLE_LEN; - while b > 0x7f && k > 0 { - v |= ((b & 0x7f) as u64) << i; - i += 7; + // 7 * VLE_LEN is beyond the maximum number of shift bits + while (b & 0x80_u8) != 0 && i != 7 * (VLE_LEN - 1) { + v |= ((b & 0x7f_u8) as u64) << i; b = reader.read_u8()?; - k -= 1; - } - if k > 0 { - v |= ((b & 0x7f) as u64) << i; - Ok(v) - } else { - Err(DidntRead) + i += 7; } + v |= (b as u64) << i; + Ok(v) } } diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index 7f23214b49..3bca8b7489 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -121,10 +121,28 @@ macro_rules! run { // Core #[test] fn codec_zint() { + run!(u8, { u8::MIN }); + run!(u8, { u8::MAX }); run!(u8, { thread_rng().gen::() }); + + run!(u16, { u16::MIN }); + run!(u16, { u16::MAX }); run!(u16, { thread_rng().gen::() }); + + run!(u32, { u32::MIN }); + run!(u32, { u32::MAX }); run!(u32, { thread_rng().gen::() }); + + run!(u64, { u64::MIN }); + run!(u64, { u64::MAX }); + let codec = Zenoh080::new(); + for i in 1..=codec.w_len(u64::MAX) { + run!(u64, { 1 << (7 * i) }); + } run!(u64, { thread_rng().gen::() }); + + run!(usize, { usize::MIN }); + run!(usize, { usize::MAX }); run!(usize, thread_rng().gen::()); } @@ -138,11 +156,12 @@ fn codec_zint_len() { codec.write(&mut writer, n).unwrap(); assert_eq!(codec.w_len(n), buff.len()); - for i in 1..=9 { + for i in 1..=codec.w_len(u64::MAX) { let mut buff = vec![]; let mut writer = buff.writer(); let n: u64 = 1 << (7 * i); codec.write(&mut writer, n).unwrap(); + println!("ZInt len: {} {:02x?}", n, buff); assert_eq!(codec.w_len(n), buff.len()); } From 7300f4c8fe1c1fd89f1109d5091a642c3c51c298 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 19 Mar 2024 14:35:17 +0100 Subject: [PATCH 023/598] Remove pull API and protocol support (#821) * Remove Pull subscriber * Fix doctest. Remove unused code. * Remove routing code for pull subscriptions * Remove pull mode from DeclareSubscriber * Remove unsupported Put/Del in Request/Response (#839) * Address review comments --------- Co-authored-by: OlivierHecart --- Cargo.lock | 1 + commons/zenoh-codec/src/zenoh/mod.rs | 9 - commons/zenoh-codec/src/zenoh/pull.rs | 93 ----- commons/zenoh-codec/tests/codec.rs | 5 - commons/zenoh-collections/src/ring_buffer.rs | 9 + commons/zenoh-protocol/src/network/declare.rs | 48 +-- commons/zenoh-protocol/src/zenoh/mod.rs | 27 +- commons/zenoh-protocol/src/zenoh/pull.rs | 56 --- examples/Cargo.toml | 1 + examples/examples/z_pull.rs | 72 ++-- io/zenoh-transport/src/shm.rs | 8 +- zenoh-ext/src/subscriber_ext.rs | 6 +- zenoh/src/liveliness.rs | 1 - zenoh/src/net/routing/dispatcher/face.rs | 6 - zenoh/src/net/routing/dispatcher/pubsub.rs | 263 +++----------- zenoh/src/net/routing/dispatcher/queries.rs | 16 - zenoh/src/net/routing/dispatcher/resource.rs | 15 - zenoh/src/net/routing/dispatcher/tables.rs | 2 - zenoh/src/net/routing/hat/client/mod.rs | 15 +- zenoh/src/net/routing/hat/client/pubsub.rs | 34 +- zenoh/src/net/routing/hat/client/queries.rs | 3 +- .../src/net/routing/hat/linkstate_peer/mod.rs | 15 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 35 +- .../net/routing/hat/linkstate_peer/queries.rs | 1 - zenoh/src/net/routing/hat/p2p_peer/mod.rs | 15 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 34 +- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 3 +- zenoh/src/net/routing/hat/router/mod.rs | 15 +- zenoh/src/net/routing/hat/router/pubsub.rs | 37 +- zenoh/src/net/routing/hat/router/queries.rs | 1 - zenoh/src/net/runtime/adminspace.rs | 96 ++--- zenoh/src/net/tests/tables.rs | 5 - zenoh/src/session.rs | 43 +-- zenoh/src/subscriber.rs | 331 +----------------- 34 files changed, 242 insertions(+), 1079 deletions(-) delete mode 100644 commons/zenoh-codec/src/zenoh/pull.rs delete mode 100644 commons/zenoh-protocol/src/zenoh/pull.rs diff --git a/Cargo.lock b/Cargo.lock index 53f2600071..fa55ca4acd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4534,6 +4534,7 @@ dependencies = [ "rand 0.8.5", "rustc_version 0.4.0", "zenoh", + "zenoh-collections", "zenoh-ext", ] diff --git a/commons/zenoh-codec/src/zenoh/mod.rs b/commons/zenoh-codec/src/zenoh/mod.rs index 0d7146dc90..dc38e5ee84 100644 --- a/commons/zenoh-codec/src/zenoh/mod.rs +++ b/commons/zenoh-codec/src/zenoh/mod.rs @@ -13,7 +13,6 @@ // pub mod del; pub mod err; -pub mod pull; pub mod put; pub mod query; pub mod reply; @@ -81,9 +80,6 @@ where fn write(self, writer: &mut W, x: &RequestBody) -> Self::Output { match x { RequestBody::Query(b) => self.write(&mut *writer, b), - RequestBody::Put(b) => self.write(&mut *writer, b), - RequestBody::Del(b) => self.write(&mut *writer, b), - RequestBody::Pull(b) => self.write(&mut *writer, b), } } } @@ -100,9 +96,6 @@ where let codec = Zenoh080Header::new(header); let body = match imsg::mid(codec.header) { id::QUERY => RequestBody::Query(codec.read(&mut *reader)?), - id::PUT => RequestBody::Put(codec.read(&mut *reader)?), - id::DEL => RequestBody::Del(codec.read(&mut *reader)?), - id::PULL => RequestBody::Pull(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; @@ -121,7 +114,6 @@ where match x { ResponseBody::Reply(b) => self.write(&mut *writer, b), ResponseBody::Err(b) => self.write(&mut *writer, b), - ResponseBody::Put(b) => self.write(&mut *writer, b), } } } @@ -139,7 +131,6 @@ where let body = match imsg::mid(codec.header) { id::REPLY => ResponseBody::Reply(codec.read(&mut *reader)?), id::ERR => ResponseBody::Err(codec.read(&mut *reader)?), - id::PUT => ResponseBody::Put(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; diff --git a/commons/zenoh-codec/src/zenoh/pull.rs b/commons/zenoh-codec/src/zenoh/pull.rs deleted file mode 100644 index dc71901d58..0000000000 --- a/commons/zenoh-codec/src/zenoh/pull.rs +++ /dev/null @@ -1,93 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; -use alloc::vec::Vec; -use zenoh_buffers::{ - reader::{DidntRead, Reader}, - writer::{DidntWrite, Writer}, -}; - -use zenoh_protocol::{ - common::imsg, - zenoh::{ - id, - pull::{flag, Pull}, - }, -}; - -impl WCodec<&Pull, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &Pull) -> Self::Output { - let Pull { ext_unknown } = x; - - // Header - let mut header = id::PULL; - let mut n_exts = ext_unknown.len() as u8; - if n_exts != 0 { - header |= flag::Z; - } - self.write(&mut *writer, header)?; - - // Extensions - for u in ext_unknown.iter() { - n_exts -= 1; - self.write(&mut *writer, (u, n_exts != 0))?; - } - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != id::PULL { - return Err(DidntRead); - } - - // Extensions - let mut ext_unknown = Vec::new(); - - let mut has_ext = imsg::has_flag(self.header, flag::Z); - while has_ext { - let ext: u8 = self.codec.read(&mut *reader)?; - let (u, ext) = extension::read(reader, "Pull", ext)?; - ext_unknown.push(u); - has_ext = ext; - } - - Ok(Pull { ext_unknown }) - } -} diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index 3bca8b7489..2f0e870c4f 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -600,8 +600,3 @@ fn codec_reply() { fn codec_err() { run!(zenoh::Err, zenoh::Err::rand()); } - -#[test] -fn codec_pull() { - run!(zenoh::Pull, zenoh::Pull::rand()); -} diff --git a/commons/zenoh-collections/src/ring_buffer.rs b/commons/zenoh-collections/src/ring_buffer.rs index fd60030ebc..e9f7909d5f 100644 --- a/commons/zenoh-collections/src/ring_buffer.rs +++ b/commons/zenoh-collections/src/ring_buffer.rs @@ -40,6 +40,15 @@ impl RingBuffer { Some(elem) } + #[inline] + pub fn push_force(&mut self, elem: T) -> Option { + self.push(elem).and_then(|elem| { + let ret = self.buffer.pop_front(); + self.buffer.push_back(elem); + ret + }) + } + #[inline] pub fn pull(&mut self) -> Option { let x = self.buffer.pop_front(); diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 2dd8de4ef8..187fa87662 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -146,31 +146,6 @@ impl Declare { } } -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] -#[repr(u8)] -pub enum Mode { - #[default] - Push, - Pull, -} - -impl Mode { - pub const DEFAULT: Self = Self::Push; - - #[cfg(feature = "test")] - fn rand() -> Self { - use rand::Rng; - - let mut rng = rand::thread_rng(); - - if rng.gen_bool(0.5) { - Mode::Push - } else { - Mode::Pull - } - } -} - pub mod common { use super::*; @@ -320,9 +295,7 @@ pub mod subscriber { /// ~ [decl_exts] ~ if Z==1 /// +---------------+ /// - /// - if R==1 then the subscription is reliable, else it is best effort - /// - if P==1 then the subscription is pull, else it is push - /// + /// - if R==1 then the subscription is reliable, else it is best effort /// /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct DeclareSubscriber { @@ -343,34 +316,29 @@ pub mod subscriber { /// +-+-+-+-+-+-+-+-+ /// |Z|0_1| ID | /// +-+-+-+---------+ - /// % reserved |P|R% + /// % reserved |R% /// +---------------+ /// /// - if R==1 then the subscription is reliable, else it is best effort - /// - if P==1 then the subscription is pull, else it is push /// - rsv: Reserved /// ``` #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct SubscriberInfo { pub reliability: Reliability, - pub mode: Mode, } impl SubscriberInfo { pub const R: u64 = 1; - pub const P: u64 = 1 << 1; pub const DEFAULT: Self = Self { reliability: Reliability::DEFAULT, - mode: Mode::DEFAULT, }; #[cfg(feature = "test")] pub fn rand() -> Self { let reliability = Reliability::rand(); - let mode = Mode::rand(); - Self { reliability, mode } + Self { reliability } } } @@ -387,12 +355,7 @@ pub mod subscriber { } else { Reliability::BestEffort }; - let mode = if imsg::has_option(ext.value, SubscriberInfo::P) { - Mode::Pull - } else { - Mode::Push - }; - Self { reliability, mode } + Self { reliability } } } @@ -402,9 +365,6 @@ pub mod subscriber { if ext.reliability == Reliability::Reliable { v |= SubscriberInfo::R; } - if ext.mode == Mode::Pull { - v |= SubscriberInfo::P; - } Info::new(v) } } diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index 3e5d573c43..7bca48f3ba 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -13,7 +13,6 @@ // pub mod del; pub mod err; -pub mod pull; pub mod put; pub mod query; pub mod reply; @@ -21,7 +20,6 @@ pub mod reply; use crate::core::Encoding; pub use del::Del; pub use err::Err; -pub use pull::Pull; pub use put::Put; pub use query::{Consolidation, Query}; pub use reply::Reply; @@ -33,7 +31,6 @@ pub mod id { pub const QUERY: u8 = 0x03; pub const REPLY: u8 = 0x04; pub const ERR: u8 = 0x05; - pub const PULL: u8 = 0x06; } // DataInfo @@ -80,9 +77,6 @@ impl From for PushBody { #[derive(Debug, Clone, PartialEq, Eq)] pub enum RequestBody { Query(Query), - Put(Put), - Del(Del), - Pull(Pull), } impl RequestBody { @@ -92,11 +86,8 @@ impl RequestBody { let mut rng = rand::thread_rng(); - match rng.gen_range(0..4) { + match rng.gen_range(0..1) { 0 => RequestBody::Query(Query::rand()), - 1 => RequestBody::Put(Put::rand()), - 2 => RequestBody::Del(Del::rand()), - 3 => RequestBody::Pull(Pull::rand()), _ => unreachable!(), } } @@ -108,24 +99,11 @@ impl From for RequestBody { } } -impl From for RequestBody { - fn from(p: Put) -> RequestBody { - RequestBody::Put(p) - } -} - -impl From for RequestBody { - fn from(d: Del) -> RequestBody { - RequestBody::Del(d) - } -} - // Response #[derive(Debug, Clone, PartialEq, Eq)] pub enum ResponseBody { Reply(Reply), Err(Err), - Put(Put), } impl ResponseBody { @@ -134,10 +112,9 @@ impl ResponseBody { use rand::Rng; let mut rng = rand::thread_rng(); - match rng.gen_range(0..3) { + match rng.gen_range(0..2) { 0 => ResponseBody::Reply(Reply::rand()), 1 => ResponseBody::Err(Err::rand()), - 2 => ResponseBody::Put(Put::rand()), _ => unreachable!(), } } diff --git a/commons/zenoh-protocol/src/zenoh/pull.rs b/commons/zenoh-protocol/src/zenoh/pull.rs deleted file mode 100644 index eb4f7eb55e..0000000000 --- a/commons/zenoh-protocol/src/zenoh/pull.rs +++ /dev/null @@ -1,56 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::common::ZExtUnknown; -use alloc::vec::Vec; - -/// # Pull message -/// -/// ```text -/// Flags: -/// - X: Reserved -/// - X: Reserved -/// - Z: Extension If Z==1 then at least one extension is present -/// -/// 7 6 5 4 3 2 1 0 -/// +-+-+-+-+-+-+-+-+ -/// |Z|X|X| PULL | -/// +-+-+-+---------+ -/// ~ [pull_exts] ~ if Z==1 -/// +---------------+ -/// ``` -pub mod flag { - // pub const X: u8 = 1 << 5; // 0x20 Reserved - // pub const X: u8 = 1 << 6; // 0x40 Reserved - pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Pull { - pub ext_unknown: Vec, -} - -impl Pull { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let mut ext_unknown = Vec::new(); - for _ in 0..rng.gen_range(0..4) { - ext_unknown.push(ZExtUnknown::rand2(1, false)); - } - - Self { ext_unknown } - } -} diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 190894fb18..b827ed2e7f 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -50,6 +50,7 @@ git-version = { workspace = true } json5 = { workspace = true } log = { workspace = true } zenoh = { workspace = true } +zenoh-collections = { workspace = true } zenoh-ext = { workspace = true } [dev-dependencies] diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 910d7614cf..d2c9a5380b 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,9 +13,12 @@ // use async_std::task::sleep; use clap::Parser; -use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use std::{ + sync::{Arc, Mutex}, + time::Duration, +}; +use zenoh::{config::Config, prelude::r#async::*}; +use zenoh_collections::RingBuffer; use zenoh_examples::CommonArgs; #[async_std::main] @@ -23,50 +26,67 @@ async fn main() { // initiate logging env_logger::init(); - let (config, key_expr) = parse_args(); + let (config, key_expr, cache, interval) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).res().await.unwrap(); - println!("Declaring Subscriber on '{key_expr}'..."); + println!("Creating a local queue keeping the last {cache} elements..."); + let arb = Arc::new(Mutex::new(RingBuffer::new(cache))); + let arb_c = arb.clone(); - let subscriber = session + println!("Declaring Subscriber on '{key_expr}'..."); + let _subscriber = session .declare_subscriber(&key_expr) - .pull_mode() - .callback(|sample| { - let payload = sample - .payload() - .deserialize::() - .unwrap_or_else(|e| format!("{}", e)); - println!( - ">> [Subscriber] Received {} ('{}': '{}')", - sample.kind(), - sample.key_expr().as_str(), - payload, - ); + .callback(move |sample| { + arb_c.lock().unwrap().push_force(sample); }) .res() .await .unwrap(); - println!("Press CTRL-C to quit..."); - for idx in 0..u32::MAX { - sleep(Duration::from_secs(1)).await; - println!("[{idx:4}] Pulling..."); - subscriber.pull().res().await.unwrap(); + println!("Pulling data every {:#?} seconds", interval); + loop { + let mut res = arb.lock().unwrap().pull(); + print!(">> [Subscriber] Pulling "); + match res.take() { + Some(sample) => { + let payload = sample + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!( + "{} ('{}': '{}')", + sample.kind(), + sample.key_expr().as_str(), + payload, + ); + } + None => { + println!("nothing... sleep for {:#?}", interval); + sleep(interval).await; + } + } } } -#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(clap::Parser, Clone, PartialEq, Debug)] struct SubArgs { #[arg(short, long, default_value = "demo/example/**")] /// The Key Expression to subscribe to. key: KeyExpr<'static>, + /// The size of the cache. + #[arg(long, default_value = "3")] + cache: usize, + /// The interval for pulling the cache. + #[arg(long, default_value = "5.0")] + interval: f32, #[command(flatten)] common: CommonArgs, } -fn parse_args() -> (Config, KeyExpr<'static>) { +fn parse_args() -> (Config, KeyExpr<'static>, usize, Duration) { let args = SubArgs::parse(); - (args.common.into(), args.key) + let interval = Duration::from_secs_f32(args.interval); + (args.common.into(), args.key, args.cache, interval) } diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 31910f51ae..0dd6662286 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -140,12 +140,9 @@ pub fn map_zmsg_to_shminfo(msg: &mut NetworkMessage) -> ZResult { }, NetworkBody::Request(Request { payload, .. }) => match payload { RequestBody::Query(b) => b.map_to_shminfo(), - RequestBody::Put(b) => b.map_to_shminfo(), - RequestBody::Del(_) | RequestBody::Pull(_) => Ok(false), }, NetworkBody::Response(Response { payload, .. }) => match payload { ResponseBody::Reply(b) => b.map_to_shminfo(), - ResponseBody::Put(b) => b.map_to_shminfo(), ResponseBody::Err(b) => b.map_to_shminfo(), }, NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), @@ -194,13 +191,10 @@ pub fn map_zmsg_to_shmbuf( }, NetworkBody::Request(Request { payload, .. }) => match payload { RequestBody::Query(b) => b.map_to_shmbuf(shmr), - RequestBody::Put(b) => b.map_to_shmbuf(shmr), - RequestBody::Del(_) | RequestBody::Pull(_) => Ok(false), }, NetworkBody::Response(Response { payload, .. }) => match payload { - ResponseBody::Put(b) => b.map_to_shmbuf(shmr), - ResponseBody::Err(b) => b.map_to_shmbuf(shmr), ResponseBody::Reply(b) => b.map_to_shmbuf(shmr), + ResponseBody::Err(b) => b.map_to_shmbuf(shmr), }, NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), } diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 89d3b5f691..6ac796efb1 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -21,7 +21,7 @@ use zenoh::{ liveliness::LivelinessSubscriberBuilder, prelude::Sample, query::{QueryConsolidation, QueryTarget}, - subscriber::{PushMode, Reliability, Subscriber, SubscriberBuilder}, + subscriber::{Reliability, Subscriber, SubscriberBuilder}, }; use crate::ExtractSample; @@ -122,9 +122,7 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { fn querying(self) -> QueryingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler>; } -impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> - for SubscriberBuilder<'a, 'b, PushMode, Handler> -{ +impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> for SubscriberBuilder<'a, 'b, Handler> { type KeySpace = crate::UserSpace; /// Create a [`FetchingSubscriber`](super::FetchingSubscriber). diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 4103504f13..425aa62592 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -398,7 +398,6 @@ impl Drop for LivelinessToken<'_> { /// let subscriber = session /// .declare_subscriber("key/expression") /// .best_effort() -/// .pull_mode() /// .res() /// .await /// .unwrap(); diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 79c9da9127..cb565053c9 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -244,12 +244,6 @@ impl Primitives for Face { msg.ext_nodeid.node_id, ); } - RequestBody::Pull(_) => { - pull_data(&self.tables.tables, &self.state.clone(), msg.wire_expr); - } - _ => { - log::error!("{} Unsupported request!", self); - } } } diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index c0d1bb4a34..89c6c40206 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -12,17 +12,15 @@ // ZettaScale Zenoh Team, // use super::face::FaceState; -use super::resource::{DataRoutes, Direction, PullCaches, Resource}; +use super::resource::{DataRoutes, Direction, Resource}; use super::tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}; use crate::net::routing::hat::HatTrait; -use std::borrow::Cow; use std::collections::HashMap; use std::sync::Arc; -use std::sync::RwLock; use zenoh_core::zread; -use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; +use zenoh_protocol::core::key_expr::keyexpr; use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::declare::{Mode, SubscriberId}; +use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::{declare::ext, Push}, @@ -83,13 +81,10 @@ pub(crate) fn declare_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); } drop(wtables); } @@ -148,13 +143,10 @@ pub(crate) fn undeclare_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); } Resource::clean(&mut res); drop(wtables); @@ -223,7 +215,6 @@ pub(crate) fn update_data_routes(tables: &Tables, res: &mut Arc) { pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc) { update_data_routes(tables, res); - update_matching_pulls(tables, res); let res = get_mut_unchecked(res); for child in res.childs.values_mut() { update_data_routes_from(tables, child); @@ -233,22 +224,17 @@ pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc( tables: &'a Tables, res: &'a Arc, -) -> Vec<(Arc, DataRoutes, Arc)> { +) -> Vec<(Arc, DataRoutes)> { let mut routes = vec![]; if res.context.is_some() { let mut expr = RoutingExpr::new(res, ""); - routes.push(( - res.clone(), - compute_data_routes(tables, &mut expr), - compute_matching_pulls(tables, &mut expr), - )); + routes.push((res.clone(), compute_data_routes(tables, &mut expr))); for match_ in &res.context().matches { let match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { let mut expr = RoutingExpr::new(&match_, ""); let match_routes = compute_data_routes(tables, &mut expr); - let matching_pulls = compute_matching_pulls(tables, &mut expr); - routes.push((match_, match_routes, matching_pulls)); + routes.push((match_, match_routes)); } } } @@ -258,12 +244,10 @@ pub(crate) fn compute_matches_data_routes<'a>( pub(crate) fn update_matches_data_routes<'a>(tables: &'a mut Tables, res: &'a mut Arc) { if res.context.is_some() { update_data_routes(tables, res); - update_matching_pulls(tables, res); for match_ in &res.context().matches { let mut match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { update_data_routes(tables, &mut match_); - update_matching_pulls(tables, &mut match_); } } } @@ -278,9 +262,6 @@ pub(crate) fn disable_matches_data_routes(_tables: &mut Tables, res: &mut Arc Arc { - let mut pull_caches = PullCaches::default(); - compute_matching_pulls_(tables, &mut pull_caches, expr); - Arc::new(pull_caches) -} - -pub(crate) fn update_matching_pulls(tables: &Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - if res_mut.context_mut().matching_pulls.is_none() { - res_mut.context_mut().matching_pulls = Some(Arc::new(PullCaches::default())); - } - compute_matching_pulls_( - tables, - get_mut_unchecked(res_mut.context_mut().matching_pulls.as_mut().unwrap()), - &mut RoutingExpr::new(res, ""), - ); - } -} - -#[inline] -fn get_matching_pulls( - tables: &Tables, - res: &Option>, - expr: &mut RoutingExpr, -) -> Arc { - res.as_ref() - .and_then(|res| res.context.as_ref()) - .and_then(|ctx| ctx.matching_pulls.clone()) - .unwrap_or_else(|| compute_matching_pulls(tables, expr)) -} - -macro_rules! cache_data { - ( - $matching_pulls:expr, - $expr:expr, - $payload:expr - ) => { - for context in $matching_pulls.iter() { - get_mut_unchecked(&mut context.clone()) - .last_values - .insert($expr.full_expr().to_string(), $payload.clone()); - } - }; -} - #[cfg(feature = "stats")] macro_rules! inc_stats { ( @@ -497,12 +406,10 @@ pub fn full_reentrant_route_data( let route = get_data_route(&tables, face, &res, &mut expr, routing_context); - let matching_pulls = get_matching_pulls(&tables, &res, &mut expr); - - if !(route.is_empty() && matching_pulls.is_empty()) { + if !route.is_empty() { treat_timestamp!(&tables.hlc, payload, tables.drop_future_timestamp); - if route.len() == 1 && matching_pulls.len() == 0 { + if route.len() == 1 { let (outface, key_expr, context) = route.values().next().unwrap(); if tables .hat_code @@ -524,26 +431,43 @@ pub fn full_reentrant_route_data( payload, }) } - } else { - if !matching_pulls.is_empty() { - let lock = zlock!(tables.pull_caches_lock); - cache_data!(matching_pulls, expr, payload); - drop(lock); - } + } else if tables.whatami == WhatAmI::Router { + let route = route + .values() + .filter(|(outface, _key_expr, _context)| { + tables + .hat_code + .egress_filter(&tables, face, outface, &mut expr) + }) + .cloned() + .collect::>(); - if tables.whatami == WhatAmI::Router { - let route = route - .values() - .filter(|(outface, _key_expr, _context)| { - tables - .hat_code - .egress_filter(&tables, face, outface, &mut expr) - }) - .cloned() - .collect::>(); + drop(tables); + for (outface, key_expr, context) in route { + #[cfg(feature = "stats")] + if !admin { + inc_stats!(face, tx, user, payload) + } else { + inc_stats!(face, tx, admin, payload) + } - drop(tables); - for (outface, key_expr, context) in route { + outface.primitives.send_push(Push { + wire_expr: key_expr, + ext_qos, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: context }, + payload: payload.clone(), + }) + } + } else { + drop(tables); + for (outface, key_expr, context) in route.values() { + if face.id != outface.id + && match (face.mcast_group.as_ref(), outface.mcast_group.as_ref()) { + (Some(l), Some(r)) => l != r, + _ => true, + } + { #[cfg(feature = "stats")] if !admin { inc_stats!(face, tx, user, payload) @@ -552,41 +476,13 @@ pub fn full_reentrant_route_data( } outface.primitives.send_push(Push { - wire_expr: key_expr, + wire_expr: key_expr.into(), ext_qos, ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: context }, + ext_nodeid: ext::NodeIdType { node_id: *context }, payload: payload.clone(), }) } - } else { - drop(tables); - for (outface, key_expr, context) in route.values() { - if face.id != outface.id - && match ( - face.mcast_group.as_ref(), - outface.mcast_group.as_ref(), - ) { - (Some(l), Some(r)) => l != r, - _ => true, - } - { - #[cfg(feature = "stats")] - if !admin { - inc_stats!(face, tx, user, payload) - } else { - inc_stats!(face, tx, admin, payload) - } - - outface.primitives.send_push(Push { - wire_expr: key_expr.into(), - ext_qos, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, - payload: payload.clone(), - }) - } - } } } } @@ -597,68 +493,3 @@ pub fn full_reentrant_route_data( } } } - -pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireExpr) { - let tables = zread!(tables_ref); - match tables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - let res = get_mut_unchecked(&mut res); - match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(_subinfo) => { - // let reliability = subinfo.reliability; - let lock = zlock!(tables.pull_caches_lock); - let route = get_mut_unchecked(ctx) - .last_values - .drain() - .map(|(name, sample)| { - ( - Resource::get_best_key(&tables.root_res, &name, face.id) - .to_owned(), - sample, - ) - }) - .collect::>(); - drop(lock); - drop(tables); - for (key_expr, payload) in route { - face.primitives.send_push(Push { - wire_expr: key_expr, - ext_qos: ext::QoSType::PUSH, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - payload, - }); - } - } - None => { - log::error!( - "{} Pull data for unknown subscriber {} (no info)!", - face, - prefix.expr() + expr.suffix.as_ref() - ); - } - }, - None => { - log::error!( - "{} Pull data for unknown subscriber {} (no context)!", - face, - prefix.expr() + expr.suffix.as_ref() - ); - } - } - } - None => { - log::error!( - "{} Pull data for unknown subscriber {} (no resource)!", - face, - prefix.expr() + expr.suffix.as_ref() - ); - } - }, - None => { - log::error!("{} Pull data with unknown scope {}!", face, expr.scope); - } - }; -} diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 721a98b8c2..04262e555d 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -460,20 +460,12 @@ macro_rules! inc_req_stats { if let Some(stats) = $face.stats.as_ref() { use zenoh_buffers::buffer::Buffer; match &$body { - RequestBody::Put(p) => { - stats.[<$txrx _z_put_msgs>].[](1); - stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); - } - RequestBody::Del(_) => { - stats.[<$txrx _z_del_msgs>].[](1); - } RequestBody::Query(q) => { stats.[<$txrx _z_query_msgs>].[](1); stats.[<$txrx _z_query_pl_bytes>].[]( q.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), ); } - RequestBody::Pull(_) => (), } } } @@ -492,14 +484,6 @@ macro_rules! inc_res_stats { if let Some(stats) = $face.stats.as_ref() { use zenoh_buffers::buffer::Buffer; match &$body { - ResponseBody::Put(p) => { - stats.[<$txrx _z_put_msgs>].[](1); - let mut n = p.payload.len(); - if let Some(a) = p.ext_attachment.as_ref() { - n += a.buffer.len(); - } - stats.[<$txrx _z_put_pl_bytes>].[](n); - } ResponseBody::Reply(r) => { stats.[<$txrx _z_reply_msgs>].[](1); let mut n = 0; diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 9f43841025..3e35db14b6 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -24,7 +24,6 @@ use zenoh_config::WhatAmI; #[cfg(feature = "complete_n")] use zenoh_protocol::network::request::ext::TargetType; use zenoh_protocol::network::RequestId; -use zenoh_protocol::zenoh::PushBody; use zenoh_protocol::{ core::{key_expr::keyexpr, ExprId, WireExpr}, network::{ @@ -51,7 +50,6 @@ pub(crate) struct QueryTargetQabl { pub(crate) distance: f64, } pub(crate) type QueryTargetQablSet = Vec; -pub(crate) type PullCaches = Vec>; pub(crate) struct SessionContext { pub(crate) face: Arc, @@ -59,7 +57,6 @@ pub(crate) struct SessionContext { pub(crate) remote_expr_id: Option, pub(crate) subs: Option, pub(crate) qabl: Option, - pub(crate) last_values: HashMap, pub(crate) in_interceptor_cache: Option>, pub(crate) e_interceptor_cache: Option>, } @@ -121,7 +118,6 @@ impl QueryRoutes { pub(crate) struct ResourceContext { pub(crate) matches: Vec>, - pub(crate) matching_pulls: Option>, pub(crate) hat: Box, pub(crate) valid_data_routes: bool, pub(crate) data_routes: DataRoutes, @@ -133,7 +129,6 @@ impl ResourceContext { fn new(hat: Box) -> ResourceContext { ResourceContext { matches: Vec::new(), - matching_pulls: None, hat, valid_data_routes: false, data_routes: DataRoutes::default(), @@ -159,14 +154,6 @@ impl ResourceContext { pub(crate) fn disable_query_routes(&mut self) { self.valid_query_routes = false; } - - pub(crate) fn update_matching_pulls(&mut self, pulls: Arc) { - self.matching_pulls = Some(pulls); - } - - pub(crate) fn disable_matching_pulls(&mut self) { - self.matching_pulls = None; - } } pub struct Resource { @@ -445,7 +432,6 @@ impl Resource { remote_expr_id: None, subs: None, qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) @@ -708,7 +694,6 @@ pub fn register_expr( remote_expr_id: Some(expr_id), subs: None, qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index e239a316a1..4f2fc2ee83 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -71,7 +71,6 @@ pub struct Tables { pub(crate) mcast_groups: Vec>, pub(crate) mcast_faces: Vec>, pub(crate) interceptors: Vec, - pub(crate) pull_caches_lock: Mutex<()>, pub(crate) hat: Box, pub(crate) hat_code: Arc, // @TODO make this a Box } @@ -103,7 +102,6 @@ impl Tables { mcast_groups: vec![], mcast_faces: vec![], interceptors: interceptor_factories(config)?, - pull_caches_lock: Mutex::new(()), hat: hat_code.new_tables(router_peers_failover_brokering), hat_code: hat_code.into(), }) diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 05210bcaee..a9908f5f58 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -20,9 +20,7 @@ use crate::{ net::routing::{ dispatcher::face::Face, - router::{ - compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, - }, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, runtime::Runtime, }; @@ -192,11 +190,7 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push(( - _match.clone(), - compute_data_routes(&rtables, &mut expr), - compute_matching_pulls(&rtables, &mut expr), - )); + matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -204,13 +198,10 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index f9f827ecc5..290f90f95f 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -30,7 +30,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, Mode, UndeclareSubscriber, + DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -94,16 +94,11 @@ fn register_client_subscription( { let res = get_mut_unchecked(res); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(info) => { - if Mode::Pull == info.mode { - get_mut_unchecked(ctx).subs = Some(*sub_info); - } - } - None => { + Some(ctx) => { + if ctx.subs.is_none() { get_mut_unchecked(ctx).subs = Some(*sub_info); } - }, + } None => { res.session_ctxs.insert( face.id, @@ -113,7 +108,6 @@ fn register_client_subscription( remote_expr_id: None, subs: Some(*sub_info), qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }), @@ -132,10 +126,8 @@ fn declare_client_subscription( sub_info: &SubscriberInfo, ) { register_client_subscription(tables, face, id, res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; - propagate_simple_subscription(tables, res, &propa_sub_info, face); + propagate_simple_subscription(tables, res, sub_info, face); // This introduced a buffer overflow on windows // @TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] @@ -243,7 +235,6 @@ fn forget_client_subscription( pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, }; for src_face in tables .faces @@ -327,20 +318,19 @@ impl HatPubSubTrait for HatCode { let mres = mres.upgrade().unwrap(); for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if match tables.whatami { + if context.subs.is_some() + && match tables.whatami { WhatAmI::Router => context.face.whatami != WhatAmI::Router, _ => { source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); } + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } } } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 4964a8880a..81e5ba52d9 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -22,7 +22,7 @@ use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_buffers::ZBuf; @@ -133,7 +133,6 @@ fn register_client_queryable( remote_expr_id: None, subs: None, qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 5591ea3b3e..3c4e2091f0 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -36,9 +36,7 @@ use crate::{ routing::{ dispatcher::face::Face, hat::TREES_COMPUTATION_DELAY_MS, - router::{ - compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, - }, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, }, runtime::Runtime, @@ -311,11 +309,7 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push(( - _match.clone(), - compute_data_routes(&rtables, &mut expr), - compute_matching_pulls(&rtables, &mut expr), - )); + matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -323,13 +317,10 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 9a41915333..dddb6ae366 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -33,7 +33,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI, ZenohId}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, Mode, UndeclareSubscriber, + DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -207,16 +207,11 @@ fn register_client_subscription( { let res = get_mut_unchecked(res); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(info) => { - if Mode::Pull == info.mode { - get_mut_unchecked(ctx).subs = Some(*sub_info); - } - } - None => { + Some(ctx) => { + if ctx.subs.is_none() { get_mut_unchecked(ctx).subs = Some(*sub_info); } - }, + } None => { res.session_ctxs.insert( face.id, @@ -226,7 +221,6 @@ fn register_client_subscription( remote_expr_id: None, subs: Some(*sub_info), qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }), @@ -245,10 +239,8 @@ fn declare_client_subscription( sub_info: &SubscriberInfo, ) { register_client_subscription(tables, face, id, res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; let zid = tables.zid; - register_peer_subscription(tables, face, res, &propa_sub_info, zid); + register_peer_subscription(tables, face, res, sub_info, zid); } #[inline] @@ -454,7 +446,6 @@ fn forget_client_subscription( pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, }; if face.whatami == WhatAmI::Client { @@ -511,7 +502,6 @@ pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec context.face.whatami != WhatAmI::Router, _ => { source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); } + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 51aac2175a..fa553e5121 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -285,7 +285,6 @@ fn register_client_queryable( remote_expr_id: None, subs: None, qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 1a6c1ba407..59b39d4284 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -23,9 +23,7 @@ use crate::{ protocol::linkstate::LinkStateList, routing::{ dispatcher::face::Face, - router::{ - compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, - }, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, }, runtime::Runtime, @@ -241,11 +239,7 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push(( - _match.clone(), - compute_data_routes(&rtables, &mut expr), - compute_matching_pulls(&rtables, &mut expr), - )); + matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -253,13 +247,10 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 4f6ce5aeca..a722176292 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -30,7 +30,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, Mode, UndeclareSubscriber, + DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -94,16 +94,11 @@ fn register_client_subscription( { let res = get_mut_unchecked(res); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(info) => { - if Mode::Pull == info.mode { - get_mut_unchecked(ctx).subs = Some(*sub_info); - } - } - None => { + Some(ctx) => { + if ctx.subs.is_none() { get_mut_unchecked(ctx).subs = Some(*sub_info); } - }, + } None => { res.session_ctxs.insert( face.id, @@ -113,7 +108,6 @@ fn register_client_subscription( remote_expr_id: None, subs: Some(*sub_info), qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }), @@ -132,10 +126,8 @@ fn declare_client_subscription( sub_info: &SubscriberInfo, ) { register_client_subscription(tables, face, id, res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; - propagate_simple_subscription(tables, res, &propa_sub_info, face); + propagate_simple_subscription(tables, res, sub_info, face); // This introduced a buffer overflow on windows // TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] @@ -243,7 +235,6 @@ fn forget_client_subscription( pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, }; for src_face in tables .faces @@ -327,20 +318,19 @@ impl HatPubSubTrait for HatCode { let mres = mres.upgrade().unwrap(); for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if match tables.whatami { + if context.subs.is_some() + && match tables.whatami { WhatAmI::Router => context.face.whatami != WhatAmI::Router, _ => { source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); } + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 04b31b41ef..caea6fe6b8 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -22,7 +22,7 @@ use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_buffers::ZBuf; @@ -133,7 +133,6 @@ fn register_client_queryable( remote_expr_id: None, subs: None, qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index ff576ae271..47cf02db46 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -40,9 +40,7 @@ use crate::{ routing::{ dispatcher::face::Face, hat::TREES_COMPUTATION_DELAY_MS, - router::{ - compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, - }, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, }, runtime::Runtime, @@ -480,11 +478,7 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push(( - _match.clone(), - compute_data_routes(&rtables, &mut expr), - compute_matching_pulls(&rtables, &mut expr), - )); + matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -492,13 +486,10 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index da1ca66efd..93c4cb7002 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -33,7 +33,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI, ZenohId}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, Mode, UndeclareSubscriber, + DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -243,8 +243,7 @@ fn declare_peer_subscription( peer: ZenohId, ) { register_peer_subscription(tables, face, res, sub_info, peer); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; + let propa_sub_info = *sub_info; let zid = tables.zid; register_router_subscription(tables, face, res, &propa_sub_info, zid); } @@ -260,16 +259,11 @@ fn register_client_subscription( { let res = get_mut_unchecked(res); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(info) => { - if Mode::Pull == info.mode { - get_mut_unchecked(ctx).subs = Some(*sub_info); - } - } - None => { + Some(ctx) => { + if ctx.subs.is_none() { get_mut_unchecked(ctx).subs = Some(*sub_info); } - }, + } None => { res.session_ctxs.insert( face.id, @@ -279,7 +273,6 @@ fn register_client_subscription( remote_expr_id: None, subs: Some(*sub_info), qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }), @@ -298,10 +291,8 @@ fn declare_client_subscription( sub_info: &SubscriberInfo, ) { register_client_subscription(tables, face, id, res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; let zid = tables.zid; - register_router_subscription(tables, face, res, &propa_sub_info, zid); + register_router_subscription(tables, face, res, sub_info, zid); } #[inline] @@ -600,7 +591,6 @@ fn forget_client_subscription( pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, }; if face.whatami == WhatAmI::Client { @@ -720,7 +710,6 @@ pub(super) fn pubsub_tree_change( if *sub == tree_id { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, }; send_sourced_subscription_to_net_childs( tables, @@ -799,7 +788,6 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: let key_expr = Resource::decl_key(res, dst_face); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, }; dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -1003,14 +991,11 @@ impl HatPubSubTrait for HatCode { if master || source_type == WhatAmI::Router { for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if context.face.whatami != WhatAmI::Router && subinfo.mode == Mode::Push { - route.entry(*sid).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); - } + if context.subs.is_some() && context.face.whatami != WhatAmI::Router { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index b76f0adcc6..aca6f71b3e 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -413,7 +413,6 @@ fn register_client_queryable( remote_expr_id: None, subs: None, qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index b67692e704..29106cb89d 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -388,58 +388,60 @@ impl Primitives for AdminSpace { fn send_request(&self, msg: Request) { trace!("recv Request {:?}", msg); - if let RequestBody::Query(query) = msg.payload { - let primitives = zlock!(self.primitives).as_ref().unwrap().clone(); - { - let conf = self.context.runtime.state.config.lock(); - if !conf.adminspace.permissions().read { - log::error!( + match msg.payload { + RequestBody::Query(query) => { + let primitives = zlock!(self.primitives).as_ref().unwrap().clone(); + { + let conf = self.context.runtime.state.config.lock(); + if !conf.adminspace.permissions().read { + log::error!( "Received GET on '{}' but adminspace.permissions.read=false in configuration", msg.wire_expr ); - primitives.send_response_final(ResponseFinal { - rid: msg.id, - ext_qos: ext::QoSType::RESPONSE_FINAL, - ext_tstamp: None, - }); - return; - } - } - - let key_expr = match self.key_expr_to_string(&msg.wire_expr) { - Ok(key_expr) => key_expr.into_owned(), - Err(e) => { - log::error!("Unknown KeyExpr: {}", e); - primitives.send_response_final(ResponseFinal { - rid: msg.id, - ext_qos: ext::QoSType::RESPONSE_FINAL, - ext_tstamp: None, - }); - return; + primitives.send_response_final(ResponseFinal { + rid: msg.id, + ext_qos: ext::QoSType::RESPONSE_FINAL, + ext_tstamp: None, + }); + return; + } } - }; - - let zid = self.zid; - let parameters = query.parameters.to_owned(); - let query = Query { - inner: Arc::new(QueryInner { - key_expr: key_expr.clone(), - parameters, - value: query - .ext_body - .map(|b| Value::from(b.payload).with_encoding(b.encoding)), - qid: msg.id, - zid, - primitives, - #[cfg(feature = "unstable")] - attachment: query.ext_attachment.map(Into::into), - }), - eid: self.queryable_id, - }; - for (key, handler) in &self.handlers { - if key_expr.intersects(key) { - handler(&self.context, query.clone()); + let key_expr = match self.key_expr_to_string(&msg.wire_expr) { + Ok(key_expr) => key_expr.into_owned(), + Err(e) => { + log::error!("Unknown KeyExpr: {}", e); + primitives.send_response_final(ResponseFinal { + rid: msg.id, + ext_qos: ext::QoSType::RESPONSE_FINAL, + ext_tstamp: None, + }); + return; + } + }; + + let zid = self.zid; + let parameters = query.parameters.to_owned(); + let query = Query { + inner: Arc::new(QueryInner { + key_expr: key_expr.clone(), + parameters, + value: query + .ext_body + .map(|b| Value::from(b.payload).with_encoding(b.encoding)), + qid: msg.id, + zid, + primitives, + #[cfg(feature = "unstable")] + attachment: query.ext_attachment.map(Into::into), + }), + eid: self.queryable_id, + }; + + for (key, handler) in &self.handlers { + if key_expr.intersects(key) { + handler(&self.context, query.clone()); + } } } } diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 4560eefaae..516bcd0109 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -26,7 +26,6 @@ use zenoh_protocol::core::{ key_expr::keyexpr, ExprId, Reliability, WhatAmI, WireExpr, ZenohId, EMPTY_EXPR_ID, }; use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::declare::Mode; use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr}; use zenoh_protocol::zenoh::{PushBody, Put}; @@ -59,7 +58,6 @@ fn base_test() { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, - mode: Mode::Push, }; declare_subscription( @@ -186,7 +184,6 @@ fn multisub_test() { // -------------- let sub_info = SubscriberInfo { reliability: Reliability::Reliable, - mode: Mode::Push, }; declare_subscription( zlock!(tables.ctrl_lock).as_ref(), @@ -305,7 +302,6 @@ fn clean_test() { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, - mode: Mode::Push, }; declare_subscription( @@ -570,7 +566,6 @@ fn client_test() { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, - mode: Mode::Push, }; let primitives0 = Arc::new(ClientPrimitives::new()); diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 5e706a0da8..496c6879ce 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -81,7 +81,7 @@ use zenoh_protocol::{ }, zenoh::{ query::{self, ext::QueryBodyType, Consolidation}, - Pull, PushBody, RequestBody, ResponseBody, + PushBody, RequestBody, ResponseBody, }, }; use zenoh_result::ZResult; @@ -294,7 +294,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { fn declare_subscriber<'b, TryIntoKeyExpr>( &'s self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'a, 'b, PushMode, DefaultHandler> + ) -> SubscriberBuilder<'a, 'b, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -303,7 +303,6 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { session: self.clone(), key_expr: TryIntoKeyExpr::try_into(key_expr).map_err(Into::into), reliability: Reliability::DEFAULT, - mode: PushMode, origin: Locality::default(), handler: DefaultHandler, } @@ -578,7 +577,7 @@ impl<'a> SessionDeclarations<'a, 'a> for Session { fn declare_subscriber<'b, TryIntoKeyExpr>( &'a self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'a, 'b, PushMode, DefaultHandler> + ) -> SubscriberBuilder<'a, 'b, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -1556,29 +1555,6 @@ impl Session { } } - pub(crate) fn pull<'a>(&'a self, key_expr: &'a KeyExpr) -> impl Resolve> + 'a { - ResolveClosure::new(move || { - trace!("pull({:?})", key_expr); - let state = zread!(self.state); - let primitives = state.primitives.as_ref().unwrap().clone(); - drop(state); - primitives.send_request(Request { - id: 0, // @TODO compute a proper request ID - wire_expr: key_expr.to_wire(self).to_owned(), - ext_qos: ext::QoSType::REQUEST, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - ext_target: request::ext::TargetType::DEFAULT, - ext_budget: None, - ext_timeout: None, - payload: RequestBody::Pull(Pull { - ext_unknown: vec![], - }), - }); - Ok(()) - }) - } - #[allow(clippy::too_many_arguments)] pub(crate) fn query( &self, @@ -1819,7 +1795,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { fn declare_subscriber<'b, TryIntoKeyExpr>( &'s self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'static, 'b, PushMode, DefaultHandler> + ) -> SubscriberBuilder<'static, 'b, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -1828,7 +1804,6 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { session: SessionRef::Shared(self.clone()), key_expr: key_expr.try_into().map_err(Into::into), reliability: Reliability::DEFAULT, - mode: PushMode, origin: Locality::default(), handler: DefaultHandler, } @@ -2110,20 +2085,12 @@ impl Primitives for Session { #[cfg(feature = "unstable")] m.ext_attachment.map(Into::into), ), - RequestBody::Put(_) => (), - RequestBody::Del(_) => (), - RequestBody::Pull(_) => todo!(), } } fn send_response(&self, msg: Response) { trace!("recv Response {:?}", msg); match msg.payload { - ResponseBody::Put(_) => { - log::warn!( - "Received a ResponseBody::Put, but this isn't supported yet. Dropping message." - ) - } ResponseBody::Err(e) => { let mut state = zwrite!(self.state); match state.queries.get_mut(&msg.rid) { @@ -2453,7 +2420,7 @@ pub trait SessionDeclarations<'s, 'a> { fn declare_subscriber<'b, TryIntoKeyExpr>( &'s self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'a, 'b, PushMode, DefaultHandler> + ) -> SubscriberBuilder<'a, 'b, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into; diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 413c9201f2..4488140610 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -24,10 +24,10 @@ use std::fmt; use std::future::Ready; use std::ops::{Deref, DerefMut}; use std::sync::Arc; -use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; #[cfg(feature = "unstable")] use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::network::declare::{subscriber::ext::SubscriberInfo, Mode}; +use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; /// The kind of reliability. pub use zenoh_protocol::core::Reliability; @@ -80,90 +80,6 @@ pub(crate) struct SubscriberInner<'a> { pub(crate) alive: bool, } -/// A [`PullMode`] subscriber that provides data through a callback. -/// -/// CallbackPullSubscribers only provide data when explicitely pulled by the -/// application with the [`pull`](CallbackPullSubscriber::pull) function. -/// CallbackPullSubscribers can be created from a zenoh [`Session`](crate::Session) -/// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function, -/// the [`callback`](SubscriberBuilder::callback) function -/// and the [`pull_mode`](SubscriberBuilder::pull_mode) function -/// of the resulting builder. -/// -/// Subscribers are automatically undeclared when dropped. -/// -/// # Examples -/// ``` -/// # async_std::task::block_on(async { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let subscriber = session -/// .declare_subscriber("key/expression") -/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) -/// .pull_mode() -/// .res() -/// .await -/// .unwrap(); -/// subscriber.pull(); -/// # }) -/// ``` -pub(crate) struct PullSubscriberInner<'a> { - inner: SubscriberInner<'a>, -} - -impl<'a> PullSubscriberInner<'a> { - /// Pull available data for a [`CallbackPullSubscriber`]. - /// - /// # Examples - /// ``` - /// # async_std::task::block_on(async { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let subscriber = session - /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) - /// .pull_mode() - /// .res() - /// .await - /// .unwrap(); - /// subscriber.pull(); - /// # }) - /// ``` - #[inline] - pub fn pull(&self) -> impl Resolve> + '_ { - self.inner.session.pull(&self.inner.state.key_expr) - } - - /// Close a [`CallbackPullSubscriber`](CallbackPullSubscriber). - /// - /// `CallbackPullSubscribers` are automatically closed when dropped, but you may want to use this function to handle errors or - /// close the `CallbackPullSubscriber` asynchronously. - /// - /// # Examples - /// ``` - /// # async_std::task::block_on(async { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// # fn data_handler(_sample: Sample) { }; - /// let subscriber = session - /// .declare_subscriber("key/expression") - /// .callback(data_handler) - /// .pull_mode() - /// .res() - /// .await - /// .unwrap(); - /// subscriber.undeclare().res().await.unwrap(); - /// # }) - /// ``` - #[inline] - pub fn undeclare(self) -> impl Resolve> + 'a { - Undeclarable::undeclare_inner(self.inner, ()) - } -} - impl<'a> SubscriberInner<'a> { /// Close a [`CallbackSubscriber`](CallbackSubscriber). /// @@ -248,28 +164,6 @@ impl Drop for SubscriberInner<'_> { } } -/// The mode for pull subscribers. -#[non_exhaustive] -#[derive(Debug, Clone, Copy)] -pub struct PullMode; - -impl From for Mode { - fn from(_: PullMode) -> Self { - Mode::Pull - } -} - -/// The mode for push subscribers. -#[non_exhaustive] -#[derive(Debug, Clone, Copy)] -pub struct PushMode; - -impl From for Mode { - fn from(_: PushMode) -> Self { - Mode::Push - } -} - /// A builder for initializing a [`FlumeSubscriber`]. /// /// # Examples @@ -281,7 +175,6 @@ impl From for Mode { /// let subscriber = session /// .declare_subscriber("key/expression") /// .best_effort() -/// .pull_mode() /// .res() /// .await /// .unwrap(); @@ -289,7 +182,7 @@ impl From for Mode { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct SubscriberBuilder<'a, 'b, Mode, Handler> { +pub struct SubscriberBuilder<'a, 'b, Handler> { #[cfg(feature = "unstable")] pub session: SessionRef<'a>, #[cfg(not(feature = "unstable"))] @@ -305,8 +198,6 @@ pub struct SubscriberBuilder<'a, 'b, Mode, Handler> { #[cfg(not(feature = "unstable"))] pub(crate) reliability: Reliability, - #[cfg(feature = "unstable")] - pub mode: Mode, #[cfg(not(feature = "unstable"))] pub(crate) mode: Mode, @@ -321,7 +212,7 @@ pub struct SubscriberBuilder<'a, 'b, Mode, Handler> { pub(crate) handler: Handler, } -impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { +impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { /// Receive the samples for this subscription with a callback. /// /// # Examples @@ -339,7 +230,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// # }) /// ``` #[inline] - pub fn callback(self, callback: Callback) -> SubscriberBuilder<'a, 'b, Mode, Callback> + pub fn callback(self, callback: Callback) -> SubscriberBuilder<'a, 'b, Callback> where Callback: Fn(Sample) + Send + Sync + 'static, { @@ -347,7 +238,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { session, key_expr, reliability, - mode, + origin, handler: _, } = self; @@ -355,7 +246,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { session, key_expr, reliability, - mode, + origin, handler: callback, } @@ -385,7 +276,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { pub fn callback_mut( self, callback: CallbackMut, - ) -> SubscriberBuilder<'a, 'b, Mode, impl Fn(Sample) + Send + Sync + 'static> + ) -> SubscriberBuilder<'a, 'b, impl Fn(Sample) + Send + Sync + 'static> where CallbackMut: FnMut(Sample) + Send + Sync + 'static, { @@ -412,7 +303,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// # }) /// ``` #[inline] - pub fn with(self, handler: Handler) -> SubscriberBuilder<'a, 'b, Mode, Handler> + pub fn with(self, handler: Handler) -> SubscriberBuilder<'a, 'b, Handler> where Handler: crate::prelude::IntoHandler<'static, Sample>, { @@ -420,7 +311,6 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { session, key_expr, reliability, - mode, origin, handler: _, } = self; @@ -428,13 +318,13 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { session, key_expr, reliability, - mode, origin, handler, } } } -impl<'a, 'b, Mode, Handler> SubscriberBuilder<'a, 'b, Mode, Handler> { + +impl<'a, 'b, Handler> SubscriberBuilder<'a, 'b, Handler> { /// Change the subscription reliability. #[inline] pub fn reliability(mut self, reliability: Reliability) -> Self { @@ -464,52 +354,10 @@ impl<'a, 'b, Mode, Handler> SubscriberBuilder<'a, 'b, Mode, Handler> { self.origin = origin; self } - - /// Change the subscription mode to Pull. - #[inline] - pub fn pull_mode(self) -> SubscriberBuilder<'a, 'b, PullMode, Handler> { - let SubscriberBuilder { - session, - key_expr, - reliability, - mode: _, - origin, - handler, - } = self; - SubscriberBuilder { - session, - key_expr, - reliability, - mode: PullMode, - origin, - handler, - } - } - - /// Change the subscription mode to Push. - #[inline] - pub fn push_mode(self) -> SubscriberBuilder<'a, 'b, PushMode, Handler> { - let SubscriberBuilder { - session, - key_expr, - reliability, - mode: _, - origin, - handler, - } = self; - SubscriberBuilder { - session, - key_expr, - reliability, - mode: PushMode, - origin, - handler, - } - } } // Push mode -impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, PushMode, Handler> +impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, @@ -517,7 +365,7 @@ where type To = ZResult>; } -impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, PushMode, Handler> +impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, @@ -534,7 +382,6 @@ where callback, &SubscriberInfo { reliability: self.reliability, - mode: self.mode.into(), }, ) .map(|sub_state| Subscriber { @@ -548,61 +395,7 @@ where } } -impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, PushMode, Handler> -where - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, -{ - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -// Pull mode -impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, PullMode, Handler> -where - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, -{ - type To = ZResult>; -} - -impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, PullMode, Handler> -where - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, -{ - fn res_sync(self) -> ::To { - let key_expr = self.key_expr?; - let session = self.session; - let (callback, receiver) = self.handler.into_handler(); - session - .declare_subscriber_inner( - &key_expr, - &None, - self.origin, - callback, - &SubscriberInfo { - reliability: self.reliability, - mode: self.mode.into(), - }, - ) - .map(|sub_state| PullSubscriber { - subscriber: PullSubscriberInner { - inner: SubscriberInner { - session, - state: sub_state, - alive: true, - }, - }, - receiver, - }) - } -} - -impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, PullMode, Handler> +impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, @@ -647,102 +440,6 @@ pub struct Subscriber<'a, Receiver> { pub receiver: Receiver, } -/// A [`PullMode`] subscriber that provides data through a [`Handler`](crate::prelude::IntoHandler). -/// -/// PullSubscribers only provide data when explicitely pulled by the -/// application with the [`pull`](PullSubscriber::pull) function. -/// PullSubscribers can be created from a zenoh [`Session`](crate::Session) -/// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function, -/// the [`with`](SubscriberBuilder::with) function -/// and the [`pull_mode`](SubscriberBuilder::pull_mode) function -/// of the resulting builder. -/// -/// Subscribers are automatically undeclared when dropped. -/// -/// # Examples -/// ``` -/// # async_std::task::block_on(async { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let subscriber = session -/// .declare_subscriber("key/expression") -/// .with(flume::bounded(32)) -/// .pull_mode() -/// .res() -/// .await -/// .unwrap(); -/// subscriber.pull(); -/// # }) -/// ``` -#[non_exhaustive] -pub struct PullSubscriber<'a, Receiver> { - pub(crate) subscriber: PullSubscriberInner<'a>, - pub receiver: Receiver, -} - -impl<'a, Receiver> Deref for PullSubscriber<'a, Receiver> { - type Target = Receiver; - fn deref(&self) -> &Self::Target { - &self.receiver - } -} - -impl<'a, Receiver> DerefMut for PullSubscriber<'a, Receiver> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.receiver - } -} - -impl<'a, Receiver> PullSubscriber<'a, Receiver> { - /// Pull available data for a [`PullSubscriber`]. - /// - /// # Examples - /// ``` - /// # async_std::task::block_on(async { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let subscriber = session - /// .declare_subscriber("key/expression") - /// .with(flume::bounded(32)) - /// .pull_mode() - /// .res() - /// .await - /// .unwrap(); - /// subscriber.pull(); - /// # }) - /// ``` - #[inline] - pub fn pull(&self) -> impl Resolve> + '_ { - self.subscriber.pull() - } - - /// Close a [`PullSubscriber`]. - /// - /// Subscribers are automatically closed when dropped, but you may want to use this function to handle errors or - /// close the Subscriber asynchronously. - /// - /// # Examples - /// ``` - /// # async_std::task::block_on(async { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let subscriber = session.declare_subscriber("key/expression") - /// .pull_mode() - /// .res() - /// .await - /// .unwrap(); - /// subscriber.undeclare().res().await.unwrap(); - /// # }) - /// ``` - #[inline] - pub fn undeclare(self) -> impl Resolve> + 'a { - self.subscriber.undeclare() - } -} - impl<'a, Receiver> Subscriber<'a, Receiver> { /// Returns the [`EntityGlobalId`] of this Subscriber. /// From 5b18594a5084bc2f98fc14ee52078dffa2328eec Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 19 Mar 2024 14:58:46 +0100 Subject: [PATCH 024/598] replaced sample new to put/delete --- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 250 ++++++++++-------- zenoh/src/sample.rs | 70 +++-- zenoh/src/session.rs | 6 +- 4 files changed, 192 insertions(+), 136 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index fb46b78082..0df648409d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -108,7 +108,7 @@ impl Aligner { let Value { payload, encoding, .. } = value; - let sample = Sample::new(key, payload) + let sample = Sample::put(key, payload) .with_encoding(encoding) .with_timestamp(ts); log::debug!("[ALIGNER] Adding {:?} to storage", sample); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 0708dcabd9..9e4ae7ad0e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -19,12 +19,12 @@ use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; use std::collections::{HashMap, HashSet}; -use std::str::{self, FromStr}; +use std::str::FromStr; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::ConsolidationMode; -use zenoh::time::{Timestamp, NTP64}; +use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; @@ -39,9 +39,102 @@ pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; #[derive(Clone)] -struct Update { - kind: SampleKind, - data: StoredData, +enum Update { + Put(StoredData), + Delete(Timestamp), +} + +impl From for Update { + fn from(sample: Sample) -> Self { + let mut sample = sample; + let timestamp = *sample.ensure_timestamp(); + match sample.kind() { + SampleKind::Put => Update::Put(StoredData { + value: Value::from(sample), + timestamp, + }), + SampleKind::Delete => Update::Delete(timestamp), + } + } +} + +impl Update { + fn timestamp(&self) -> &Timestamp { + match self { + Update::Put(data) => &data.timestamp, + Update::Delete(ts) => ts, + } + } +} + +// implement from String for Update +impl TryFrom for Update { + type Error = zenoh::Error; + + fn try_from(value: String) -> Result { + let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&value)?; + let mut payload = ZBuf::default(); + for slice in result.3 { + payload.push_zslice(slice.to_vec().into()); + } + let value = Value::new(payload).with_encoding(result.2); + let timestamp = Timestamp::from_str(&result.1).map_err(|_|"Error parsing timestamp")?; + if result .0.eq(&(SampleKind::Put).to_string()) { + Ok(Update::Put(StoredData { value, timestamp })) + } else { + Ok(Update::Delete(timestamp)) + } + } +} + +// implement to_string for Update +impl ToString for Update { + fn to_string(&self) -> String { + let result = match self { + Update::Put(data) => ( + SampleKind::Put.to_string(), + data.timestamp.to_string(), + data.value.encoding.to_string(), + data.value.payload.slices().collect::>(), + ), + Update::Delete(ts) => ( + SampleKind::Delete.to_string(), + ts.to_string(), + "".to_string(), + vec![], + ), + }; + serde_json::to_string_pretty(&result).unwrap() + } +} + +trait IntoSample { + fn into_sample(self, key_expr: IntoKeyExpr) -> Sample + where + IntoKeyExpr: Into>; +} + +impl IntoSample for StoredData { + fn into_sample(self, key_expr: IntoKeyExpr) -> Sample + where + IntoKeyExpr: Into>, + { + Sample::put(key_expr, self.value.payload) + .with_encoding(self.value.encoding) + .with_timestamp(self.timestamp) + } +} + +impl IntoSample for Update { + fn into_sample(self, key_expr: IntoKeyExpr) -> Sample + where + IntoKeyExpr: Into>, + { + match self { + Update::Put(data) => data.into_sample(key_expr), + Update::Delete(ts) => Sample::delete(key_expr).with_timestamp(ts), + } + } } pub struct ReplicationService { @@ -109,10 +202,10 @@ impl StorageService { let saved_wc = std::fs::read_to_string(zenoh_home().join(WILDCARD_UPDATES_FILENAME)).unwrap(); let saved_wc: HashMap = - serde_json::from_str(&saved_wc).unwrap(); + serde_json::from_str(&saved_wc).unwrap(); // TODO: Remove unwrap let mut wildcard_updates = storage_service.wildcard_updates.write().await; for (k, data) in saved_wc { - wildcard_updates.insert(&k, construct_update(data)); + wildcard_updates.insert(&k, Update::try_from(data).unwrap()); // TODO: Remove unwrap } } } @@ -269,6 +362,7 @@ impl StorageService { } else { sample }; + let sample_timestamp = *sample.timestamp().unwrap_or(&new_reception_timestamp()); // if wildcard, update wildcard_updates if sample.key_expr().is_wild() { @@ -297,28 +391,17 @@ impl StorageService { log::trace!( "Sample `{:?}` identified as neded processing for key {}", sample, - k + &k ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store = match self - .ovderriding_wild_update(&k, sample.timestamp().unwrap()) - .await - { - Some(overriding_update) => { - let Value { - payload, encoding, .. - } = overriding_update.data.value; - Sample::new(KeyExpr::from(k.clone()), payload) - .with_encoding(encoding) - .with_timestamp(overriding_update.data.timestamp) - .with_kind(overriding_update.kind) - } - None => Sample::new(KeyExpr::from(k.clone()), sample.payload().clone()) - .with_encoding(sample.encoding().clone()) - .with_timestamp(*sample.timestamp().unwrap()) - .with_kind(sample.kind()), - }; + let sample_to_store = + match self.ovderriding_wild_update(&k, &sample_timestamp).await { + Some(overriding_update) => overriding_update.into_sample(k.clone()), + + None => sample.clone(), + }; + let timestamp = sample_to_store.timestamp().unwrap_or(&sample_timestamp); let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { Ok(stripped) => stripped, @@ -341,11 +424,8 @@ impl StorageService { } SampleKind::Delete => { // register a tombstone - self.mark_tombstone(&k, *sample_to_store.timestamp().unwrap()) - .await; - storage - .delete(stripped_key, *sample_to_store.timestamp().unwrap()) - .await + self.mark_tombstone(&k, *timestamp).await; + storage.delete(stripped_key, *timestamp).await } }; drop(storage); @@ -393,22 +473,12 @@ impl StorageService { // @TODO: change into a better store that does incremental writes let key = sample.key_expr().clone(); let mut wildcards = self.wildcard_updates.write().await; - let timestamp = *sample.timestamp().unwrap(); - wildcards.insert( - &key, - Update { - kind: sample.kind(), - data: StoredData { - value: Value::from(sample), - timestamp, - }, - }, - ); + wildcards.insert(&key, sample.into()); if self.capability.persistence.eq(&Persistence::Durable) { // flush to disk to makeit durable let mut serialized_data = HashMap::new(); for (k, update) in wildcards.key_value_pairs() { - serialized_data.insert(k, serialize_update(update)); + serialized_data.insert(k, update.to_string()); } if let Err(e) = std::fs::write( zenoh_home().join(WILDCARD_UPDATES_FILENAME), @@ -437,34 +507,36 @@ impl StorageService { let mut update = None; for node in wildcards.intersecting_keys(key_expr) { let weight = wildcards.weight_at(&node); - if weight.is_some() && weight.unwrap().data.timestamp > *ts { - // if the key matches a wild card update, check whether it was saved in storage - // remember that wild card updates change only existing keys - let stripped_key = match self.strip_prefix(&key_expr.into()) { - Ok(stripped) => stripped, - Err(e) => { - log::error!("{}", e); - break; - } - }; - let mut storage = self.storage.lock().await; - match storage.get(stripped_key, "").await { - Ok(stored_data) => { - for entry in stored_data { - if entry.timestamp > *ts { - return None; + if let Some(weight) = weight { + if weight.timestamp() > ts { + // if the key matches a wild card update, check whether it was saved in storage + // remember that wild card updates change only existing keys + let stripped_key = match self.strip_prefix(&key_expr.into()) { + Ok(stripped) => stripped, + Err(e) => { + log::error!("{}", e); + break; + } + }; + let mut storage = self.storage.lock().await; + match storage.get(stripped_key, "").await { + Ok(stored_data) => { + for entry in stored_data { + if entry.timestamp > *ts { + return None; + } } } - } - Err(e) => { - log::warn!( - "Storage '{}' raised an error fetching a query on key {} : {}", - self.name, - key_expr, - e - ); - ts = &weight.unwrap().data.timestamp; - update = Some(weight.unwrap().clone()); + Err(e) => { + log::warn!( + "Storage '{}' raised an error fetching a query on key {} : {}", + self.name, + key_expr, + e + ); + ts = weight.timestamp(); + update = Some(weight.clone()); + } } } } @@ -517,12 +589,7 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let Value { - payload, encoding, .. - } = entry.value; - let sample = Sample::new(key.clone(), payload) - .with_encoding(encoding) - .with_timestamp(entry.timestamp); + let sample = entry.into_sample(key.clone()); // apply outgoing interceptor on results let sample = if let Some(ref interceptor) = self.out_interceptor { interceptor(sample) @@ -558,7 +625,7 @@ impl StorageService { let Value { payload, encoding, .. } = entry.value; - let sample = Sample::new(q.key_expr().clone(), payload) + let sample = Sample::put(q.key_expr().clone(), payload) .with_encoding(encoding) .with_timestamp(entry.timestamp); // apply outgoing interceptor on results @@ -687,35 +754,6 @@ impl StorageService { } } -fn serialize_update(update: &Update) -> String { - let result = ( - update.kind.to_string(), - update.data.timestamp.to_string(), - update.data.value.encoding.to_string(), - update.data.value.payload.slices().collect::>(), - ); - serde_json::to_string_pretty(&result).unwrap() -} - -fn construct_update(data: String) -> Update { - let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); // @TODO: remove the unwrap() - let mut payload = ZBuf::default(); - for slice in result.3 { - payload.push_zslice(slice.to_vec().into()); - } - let value = Value::new(payload).with_encoding(result.2); - let data = StoredData { - value, - timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() - }; - let kind = if result.0.eq(&(SampleKind::Put).to_string()) { - SampleKind::Put - } else { - SampleKind::Delete - }; - Update { kind, data } -} - // Periodic event cleaning-up data info for old metadata struct GarbageCollectionEvent { config: GarbageCollectionConfig, @@ -747,7 +785,7 @@ impl Timed for GarbageCollectionEvent { let mut to_be_removed = HashSet::new(); for (k, update) in wildcard_updates.key_value_pairs() { - let ts = update.data.timestamp; + let ts = update.timestamp(); if ts.get_time() < &time_limit { // mark key to be removed to_be_removed.insert(k); diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index e294fd2c0c..757d65afd8 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -378,9 +378,9 @@ pub struct Sample { } impl Sample { - /// Creates a new Sample. + /// Creates a "put" Sample. #[inline] - pub fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self + pub fn put(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self where IntoKeyExpr: Into>, IntoPayload: Into, @@ -389,7 +389,7 @@ impl Sample { key_expr: key_expr.into(), payload: payload.into(), encoding: Encoding::default(), - kind: SampleKind::default(), + kind: SampleKind::Put, timestamp: None, qos: QoS::default(), #[cfg(feature = "unstable")] @@ -398,29 +398,55 @@ impl Sample { attachment: None, } } - /// Creates a new Sample. + + /// Creates a "delete" Sample. #[inline] - pub fn try_from( - key_expr: TryIntoKeyExpr, - payload: IntoPayload, - ) -> Result + pub fn delete(key_expr: IntoKeyExpr) -> Self where - TryIntoKeyExpr: TryInto>, - >>::Error: Into, - IntoPayload: Into, + IntoKeyExpr: Into>, { - Ok(Sample { - key_expr: key_expr.try_into().map_err(Into::into)?, - payload: payload.into(), + Sample { + key_expr: key_expr.into(), + payload: Payload::empty(), encoding: Encoding::default(), - kind: SampleKind::default(), + kind: SampleKind::Delete, timestamp: None, qos: QoS::default(), #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), #[cfg(feature = "unstable")] attachment: None, - }) + } + } + + /// Attempts to create a "put" Sample + #[inline] + pub fn try_put( + key_expr: TryIntoKeyExpr, + payload: TryIntoPayload, + ) -> Result + where + TryIntoKeyExpr: TryInto>, + >>::Error: Into, + TryIntoPayload: TryInto, + >::Error: Into, + { + let key_expr: KeyExpr<'static> = key_expr.try_into().map_err(Into::into)?; + let payload: Payload = payload.try_into().map_err(Into::into)?; + Ok(Self::put(key_expr, payload)) + } + + /// Attempts to create a "delete" Sample + #[inline] + pub fn try_delete( + key_expr: TryIntoKeyExpr, + ) -> Result + where + TryIntoKeyExpr: TryInto>, + >>::Error: Into, + { + let key_expr: KeyExpr<'static> = key_expr.try_into().map_err(Into::into)?; + Ok(Self::delete(key_expr)) } /// Creates a new Sample with optional data info. @@ -444,9 +470,10 @@ impl Sample { self } - /// Sets the encoding of this Sample. + /// Sets the encoding of this Sample #[inline] pub fn with_encoding(mut self, encoding: Encoding) -> Self { + assert!(self.kind == SampleKind::Put, "Cannot set encoding on a delete sample"); self.encoding = encoding; self } @@ -469,15 +496,6 @@ impl Sample { self.kind } - /// Sets the kind of this Sample. - #[inline] - #[doc(hidden)] - #[zenoh_macros::unstable] - pub fn with_kind(mut self, kind: SampleKind) -> Self { - self.kind = kind; - self - } - /// Gets the encoding of this sample #[inline] pub fn encoding(&self) -> &Encoding { diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 5e706a0da8..93d1e2fb9d 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -1538,7 +1538,7 @@ impl Session { let zenoh_collections::single_or_vec::IntoIter { drain, last } = callbacks.into_iter(); for (cb, key_expr) in drain { #[allow(unused_mut)] - let mut sample = Sample::new(key_expr, payload.clone()).with_info(info.clone()); + let mut sample = Sample::put(key_expr, payload.clone()).with_info(info.clone()); #[cfg(feature = "unstable")] { sample.attachment = attachment.clone(); @@ -1547,7 +1547,7 @@ impl Session { } if let Some((cb, key_expr)) = last { #[allow(unused_mut)] - let mut sample = Sample::new(key_expr, payload).with_info(info); + let mut sample = Sample::put(key_expr, payload).with_info(info); #[cfg(feature = "unstable")] { sample.attachment = attachment; @@ -2257,7 +2257,7 @@ impl Primitives for Session { #[allow(unused_mut)] let mut sample = - Sample::new(key_expr.into_owned(), payload).with_info(Some(info)); + Sample::put(key_expr.into_owned(), payload).with_info(Some(info)); #[cfg(feature = "unstable")] { sample.attachment = attachment; From 1038beb92c438d477215813eaed9c173d9785f94 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 19 Mar 2024 16:14:06 +0100 Subject: [PATCH 025/598] interceptors removed --- .../src/replica/storage.rs | 22 ------------------- 1 file changed, 22 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 9e4ae7ad0e..ed7c6a1d9c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -153,8 +153,6 @@ pub struct StorageService { capability: Capability, tombstones: Arc>>, wildcard_updates: Arc>>, - in_interceptor: Option Sample + Send + Sync>>, - out_interceptor: Option Sample + Send + Sync>>, replication: Option, } @@ -178,8 +176,6 @@ impl StorageService { capability: store_intercept.capability, tombstones: Arc::new(RwLock::new(KeBoxTree::default())), wildcard_updates: Arc::new(RwLock::new(KeBoxTree::default())), - in_interceptor: store_intercept.in_interceptor, - out_interceptor: store_intercept.out_interceptor, replication, }; if storage_service @@ -356,12 +352,6 @@ impl StorageService { // the trimming during PUT and GET should be handled by the plugin async fn process_sample(&self, sample: Sample) { log::trace!("[STORAGE] Processing sample: {:?}", sample); - // Call incoming data interceptor (if any) - let sample = if let Some(ref interceptor) = self.in_interceptor { - interceptor(sample) - } else { - sample - }; let sample_timestamp = *sample.timestamp().unwrap_or(&new_reception_timestamp()); // if wildcard, update wildcard_updates @@ -590,12 +580,6 @@ impl StorageService { Ok(stored_data) => { for entry in stored_data { let sample = entry.into_sample(key.clone()); - // apply outgoing interceptor on results - let sample = if let Some(ref interceptor) = self.out_interceptor { - interceptor(sample) - } else { - sample - }; if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", @@ -628,12 +612,6 @@ impl StorageService { let sample = Sample::put(q.key_expr().clone(), payload) .with_encoding(encoding) .with_timestamp(entry.timestamp); - // apply outgoing interceptor on results - let sample = if let Some(ref interceptor) = self.out_interceptor { - interceptor(sample) - } else { - sample - }; if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", From 09a84b34ad9be1fa39b4f0dd268d722d6a211f72 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 19 Mar 2024 16:29:14 +0100 Subject: [PATCH 026/598] interceptors removed --- plugins/zenoh-backend-example/src/lib.rs | 13 ++---------- plugins/zenoh-backend-traits/src/lib.rs | 8 -------- .../src/backends_mgt.rs | 7 ------- .../zenoh-plugin-storage-manager/src/lib.rs | 4 ---- .../src/memory_backend/mod.rs | 20 ------------------- 5 files changed, 2 insertions(+), 50 deletions(-) diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index 602d29f375..f81231a498 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -13,11 +13,8 @@ // use async_std::sync::RwLock; use async_trait::async_trait; -use std::{ - collections::{hash_map::Entry, HashMap}, - sync::Arc, -}; -use zenoh::{prelude::OwnedKeyExpr, sample::Sample, time::Timestamp, value::Value}; +use std::collections::{hash_map::Entry, HashMap}; +use zenoh::{prelude::OwnedKeyExpr, time::Timestamp, value::Value}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, Volume, @@ -71,12 +68,6 @@ impl Volume for ExampleBackend { async fn create_storage(&self, _props: StorageConfig) -> ZResult> { Ok(Box::::default()) } - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { - None - } - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { - None - } } #[async_trait] diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index d17e6dfd77..91e030d361 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -210,14 +210,6 @@ pub trait Volume: Send + Sync { /// Creates a storage configured with some properties. async fn create_storage(&self, props: StorageConfig) -> ZResult>; - - /// Returns an interceptor that will be called before pushing any data - /// into a storage created by this backend. `None` can be returned for no interception point. - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>>; - - /// Returns an interceptor that will be called before sending any reply - /// to a query from a storage created by this backend. `None` can be returned for no interception point. - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>>; } pub type VolumeInstance = Box; diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index aa7260e868..90a6ae6250 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -14,7 +14,6 @@ use super::storages_mgt::*; use flume::Sender; use std::sync::Arc; -use zenoh::prelude::r#async::*; use zenoh::Session; use zenoh_backend_traits::config::StorageConfig; use zenoh_backend_traits::{Capability, VolumeInstance}; @@ -23,16 +22,12 @@ use zenoh_result::ZResult; pub struct StoreIntercept { pub storage: Box, pub capability: Capability, - pub in_interceptor: Option Sample + Send + Sync>>, - pub out_interceptor: Option Sample + Send + Sync>>, } pub(crate) async fn create_and_start_storage( admin_key: String, config: StorageConfig, backend: &VolumeInstance, - in_interceptor: Option Sample + Send + Sync>>, - out_interceptor: Option Sample + Send + Sync>>, zenoh: Arc, ) -> ZResult> { log::trace!("Create storage '{}'", &admin_key); @@ -41,8 +36,6 @@ pub(crate) async fn create_and_start_storage( let store_intercept = StoreIntercept { storage, capability, - in_interceptor, - out_interceptor, }; start_storage(store_intercept, config, admin_key, zenoh).await diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 0db30bbd6a..91df2f108d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -239,14 +239,10 @@ impl StorageRuntimeInner { volume_id, backend.name() ); - let in_interceptor = backend.instance().incoming_data_interceptor(); - let out_interceptor = backend.instance().outgoing_data_interceptor(); let stopper = async_std::task::block_on(create_and_start_storage( admin_key, storage.clone(), backend.instance(), - in_interceptor, - out_interceptor, self.session.clone(), ))?; self.storages diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index ebb4922c9d..4e333b8592 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -61,26 +61,6 @@ impl Volume for MemoryBackend { log::debug!("Create Memory Storage with configuration: {:?}", properties); Ok(Box::new(MemoryStorage::new(properties).await?)) } - - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { - // By default: no interception point - None - // To test interceptors, uncomment this line: - // Some(Arc::new(|sample| { - // trace!(">>>> IN INTERCEPTOR FOR {:?}", sample); - // sample - // })) - } - - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { - // By default: no interception point - None - // To test interceptors, uncomment this line: - // Some(Arc::new(|sample| { - // trace!("<<<< OUT INTERCEPTOR FOR {:?}", sample); - // sample - // })) - } } impl Drop for MemoryBackend { From 886c37c1922b7882fa3c670f5ad71b1662857729 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 19 Mar 2024 17:13:02 +0100 Subject: [PATCH 027/598] storage sample added --- .../src/replica/storage.rs | 144 +++++++++++------- 1 file changed, 91 insertions(+), 53 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index ed7c6a1d9c..41a456e344 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -27,7 +27,7 @@ use zenoh::query::ConsolidationMode; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; -use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; +use zenoh_backend_traits::{Capability, History, Persistence, Storage, StorageInsertionResult, StoredData}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; @@ -38,22 +38,52 @@ use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer}; pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; +#[derive(Clone, Debug)] +enum StorageSampleKind { + Put(Value), + Delete, +} + +#[derive(Clone, Debug)] +struct StorageSample { + pub key_expr: KeyExpr<'static>, + pub timestamp: Timestamp, + pub kind: StorageSampleKind, +} + +impl From for StorageSample { + fn from(sample: Sample) -> Self { + let timestamp = *sample.timestamp().unwrap_or(&new_reception_timestamp()); + // TODO: add API for disassembly of Sample + let key_expr = sample.key_expr().clone(); + let payload = sample.payload().clone(); + let encoding = sample.encoding().clone(); + let kind = match sample.kind() { + SampleKind::Put => StorageSampleKind::Put(Value::new(payload).with_encoding(encoding)), + SampleKind::Delete => StorageSampleKind::Delete, + }; + StorageSample { + key_expr, + timestamp, + kind, + } + } +} + #[derive(Clone)] enum Update { Put(StoredData), Delete(Timestamp), } -impl From for Update { - fn from(sample: Sample) -> Self { - let mut sample = sample; - let timestamp = *sample.ensure_timestamp(); - match sample.kind() { - SampleKind::Put => Update::Put(StoredData { - value: Value::from(sample), - timestamp, +impl From for Update { + fn from(value: StorageSample) -> Self { + match value.kind { + StorageSampleKind::Put(data) => Update::Put(StoredData { + value: data, + timestamp: value.timestamp, }), - SampleKind::Delete => Update::Delete(timestamp), + StorageSampleKind::Delete => Update::Delete(value.timestamp), } } } @@ -78,8 +108,8 @@ impl TryFrom for Update { payload.push_zslice(slice.to_vec().into()); } let value = Value::new(payload).with_encoding(result.2); - let timestamp = Timestamp::from_str(&result.1).map_err(|_|"Error parsing timestamp")?; - if result .0.eq(&(SampleKind::Put).to_string()) { + let timestamp = Timestamp::from_str(&result.1).map_err(|_| "Error parsing timestamp")?; + if result.0.eq(&(SampleKind::Put).to_string()) { Ok(Update::Put(StoredData { value, timestamp })) } else { Ok(Update::Delete(timestamp)) @@ -90,7 +120,7 @@ impl TryFrom for Update { // implement to_string for Update impl ToString for Update { fn to_string(&self) -> String { - let result = match self { + let result = match self { Update::Put(data) => ( SampleKind::Put.to_string(), data.timestamp.to_string(), @@ -108,31 +138,41 @@ impl ToString for Update { } } -trait IntoSample { - fn into_sample(self, key_expr: IntoKeyExpr) -> Sample +trait IntoStorageSample { + fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample where IntoKeyExpr: Into>; } -impl IntoSample for StoredData { - fn into_sample(self, key_expr: IntoKeyExpr) -> Sample +impl IntoStorageSample for StoredData { + fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample where IntoKeyExpr: Into>, { - Sample::put(key_expr, self.value.payload) - .with_encoding(self.value.encoding) - .with_timestamp(self.timestamp) + StorageSample { + key_expr: key_expr.into(), + timestamp: self.timestamp, + kind: StorageSampleKind::Put(self.value), + } } } -impl IntoSample for Update { - fn into_sample(self, key_expr: IntoKeyExpr) -> Sample +impl IntoStorageSample for Update { + fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample where IntoKeyExpr: Into>, { match self { - Update::Put(data) => data.into_sample(key_expr), - Update::Delete(ts) => Sample::delete(key_expr).with_timestamp(ts), + Update::Put(data) => StorageSample { + key_expr: key_expr.into(), + timestamp: data.timestamp, + kind: StorageSampleKind::Put(data.value), + }, + Update::Delete(ts) => StorageSample { + key_expr: key_expr.into(), + timestamp: ts, + kind: StorageSampleKind::Delete, + }, } } } @@ -201,7 +241,8 @@ impl StorageService { serde_json::from_str(&saved_wc).unwrap(); // TODO: Remove unwrap let mut wildcard_updates = storage_service.wildcard_updates.write().await; for (k, data) in saved_wc { - wildcard_updates.insert(&k, Update::try_from(data).unwrap()); // TODO: Remove unwrap + wildcard_updates.insert(&k, Update::try_from(data).unwrap()); + // TODO: Remove unwrap } } } @@ -272,7 +313,7 @@ impl StorageService { log::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { - self.process_sample(sample).await; + self.process_sample(sample.into()).await; } }, // on query on key_expr @@ -350,33 +391,32 @@ impl StorageService { // The storage should only simply save the key, sample pair while put and retrieve the same during get // the trimming during PUT and GET should be handled by the plugin - async fn process_sample(&self, sample: Sample) { + async fn process_sample(&self, sample: StorageSample) { log::trace!("[STORAGE] Processing sample: {:?}", sample); - let sample_timestamp = *sample.timestamp().unwrap_or(&new_reception_timestamp()); // if wildcard, update wildcard_updates - if sample.key_expr().is_wild() { + if sample.key_expr.is_wild() { self.register_wildcard_update(sample.clone()).await; } - let matching_keys = if sample.key_expr().is_wild() { - self.get_matching_keys(sample.key_expr()).await + let matching_keys = if sample.key_expr.is_wild() { + self.get_matching_keys(&sample.key_expr).await } else { - vec![sample.key_expr().clone().into()] + vec![sample.key_expr.clone().into()] }; log::trace!( "The list of keys matching `{}` is : {:?}", - sample.key_expr(), + sample.key_expr, matching_keys ); for k in matching_keys { if !self - .is_deleted(&k.clone(), sample.timestamp().unwrap()) + .is_deleted(&k.clone(), &sample.timestamp) .await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) - && self.is_latest(&k, sample.timestamp().unwrap()).await)) + && self.is_latest(&k, &sample.timestamp).await)) { log::trace!( "Sample `{:?}` identified as neded processing for key {}", @@ -386,14 +426,13 @@ impl StorageService { // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage let sample_to_store = - match self.ovderriding_wild_update(&k, &sample_timestamp).await { + match self.ovderriding_wild_update(&k, &sample.timestamp).await { Some(overriding_update) => overriding_update.into_sample(k.clone()), - None => sample.clone(), + None => sample.into(), }; - let timestamp = sample_to_store.timestamp().unwrap_or(&sample_timestamp); - let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { + let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { Ok(stripped) => stripped, Err(e) => { log::error!("{}", e); @@ -401,22 +440,21 @@ impl StorageService { } }; let mut storage = self.storage.lock().await; - let result = match sample.kind() { - SampleKind::Put => { + let result = match sample_to_store.kind { + StorageSampleKind::Put(data) => { storage .put( stripped_key, - Value::new(sample_to_store.payload().clone()) - .with_encoding(sample_to_store.encoding().clone()), - *sample_to_store.timestamp().unwrap(), + data, + sample_to_store.timestamp, ) .await - } - SampleKind::Delete => { + }, + StorageSampleKind::Delete => { // register a tombstone - self.mark_tombstone(&k, *timestamp).await; - storage.delete(stripped_key, *timestamp).await - } + self.mark_tombstone(&k, sample_to_store.timestamp).await; + storage.delete(stripped_key, sample_to_store.timestamp).await + }, }; drop(storage); if self.replication.is_some() @@ -428,7 +466,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), *sample_to_store.timestamp().unwrap())); + .send((k.clone(), sample_to_store.timestamp)); match sending { Ok(_) => (), Err(e) => { @@ -459,9 +497,9 @@ impl StorageService { } } - async fn register_wildcard_update(&self, sample: Sample) { + async fn register_wildcard_update(&self, sample: StorageSample) { // @TODO: change into a better store that does incremental writes - let key = sample.key_expr().clone(); + let key = sample.key_expr.clone(); let mut wildcards = self.wildcard_updates.write().await; wildcards.insert(&key, sample.into()); if self.capability.persistence.eq(&Persistence::Durable) { @@ -719,7 +757,7 @@ impl StorageService { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { - self.process_sample(sample).await; + self.process_sample(sample.into()).await; } Err(e) => log::warn!( "Storage '{}' received an error to align query: {:?}", From 780c82a3cae1115e624141c929639648e6902e16 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 19 Mar 2024 17:35:38 +0100 Subject: [PATCH 028/598] some compile error fixes --- .../src/replica/aligner.rs | 18 ++++++++++-------- .../src/replica/storage.rs | 10 +++++----- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 0df648409d..3f672382f1 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -12,6 +12,9 @@ // ZettaScale Zenoh Team, // +use crate::replica::storage::StorageSampleKind; + +use super::storage::StorageSample; use super::{Digest, EraType, LogEntry, Snapshotter}; use super::{CONTENTS, ERA, INTERVALS, SUBINTERVALS}; use async_std::sync::{Arc, RwLock}; @@ -29,7 +32,7 @@ pub struct Aligner { digest_key: OwnedKeyExpr, snapshotter: Arc, rx_digest: Receiver<(String, Digest)>, - tx_sample: Sender, + tx_sample: Sender, digests_processed: RwLock>, } @@ -38,7 +41,7 @@ impl Aligner { session: Arc, digest_key: OwnedKeyExpr, rx_digest: Receiver<(String, Digest)>, - tx_sample: Sender, + tx_sample: Sender, snapshotter: Arc, ) { let aligner = Aligner { @@ -105,12 +108,11 @@ impl Aligner { log::trace!("[ALIGNER] Received queried samples: {missing_data:?}"); for (key, (ts, value)) in missing_data { - let Value { - payload, encoding, .. - } = value; - let sample = Sample::put(key, payload) - .with_encoding(encoding) - .with_timestamp(ts); + let sample = StorageSample { + key_expr: key.into(), + timestamp: ts, + kind: StorageSampleKind::Put(value), + }; log::debug!("[ALIGNER] Adding {:?} to storage", sample); self.tx_sample.send_async(sample).await.unwrap_or_else(|e| { log::error!("[ALIGNER] Error adding sample to storage: {}", e) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 41a456e344..307ca95680 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -39,13 +39,13 @@ pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; #[derive(Clone, Debug)] -enum StorageSampleKind { +pub enum StorageSampleKind { Put(Value), Delete, } #[derive(Clone, Debug)] -struct StorageSample { +pub struct StorageSample { pub key_expr: KeyExpr<'static>, pub timestamp: Timestamp, pub kind: StorageSampleKind, @@ -179,7 +179,7 @@ impl IntoStorageSample for Update { pub struct ReplicationService { pub empty_start: bool, - pub aligner_updates: Receiver, + pub aligner_updates: Receiver, pub log_propagation: Sender<(OwnedKeyExpr, Timestamp)>, } @@ -361,7 +361,7 @@ impl StorageService { } }; sample.ensure_timestamp(); - self.process_sample(sample).await; + self.process_sample(sample.into()).await; }, // on query on key_expr query = storage_queryable.recv_async() => { @@ -429,7 +429,7 @@ impl StorageService { match self.ovderriding_wild_update(&k, &sample.timestamp).await { Some(overriding_update) => overriding_update.into_sample(k.clone()), - None => sample.into(), + None => sample.clone().into(), }; let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { From af0d167f6a1b0bcdc6d09074c4e2960f93034e90 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 19 Mar 2024 17:54:31 +0100 Subject: [PATCH 029/598] removed interceptor proxy --- plugins/zenoh-backend-traits/src/lib.rs | 50 +------------------------ 1 file changed, 1 insertion(+), 49 deletions(-) diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 91e030d361..16c00f64af 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -135,9 +135,7 @@ use async_trait::async_trait; use const_format::concatcp; -use std::sync::Arc; -use zenoh::prelude::{KeyExpr, OwnedKeyExpr, Sample, Selector}; -use zenoh::queryable::ReplyBuilder; +use zenoh::prelude::OwnedKeyExpr; use zenoh::time::Timestamp; use zenoh::value::Value; pub use zenoh::Result as ZResult; @@ -274,49 +272,3 @@ pub trait Storage: Send + Sync { /// Remember to fetch the entry corresponding to the `None` key async fn get_all_entries(&self) -> ZResult, Timestamp)>>; } - -/// A wrapper around the [`zenoh::queryable::Query`] allowing to call the -/// OutgoingDataInterceptor (if any) before to send the reply -pub struct Query { - q: zenoh::queryable::Query, - interceptor: Option Sample + Send + Sync>>, -} - -impl Query { - pub fn new( - q: zenoh::queryable::Query, - interceptor: Option Sample + Send + Sync>>, - ) -> Query { - Query { q, interceptor } - } - - /// The full [`Selector`] of this Query. - #[inline(always)] - pub fn selector(&self) -> Selector<'_> { - self.q.selector() - } - - /// The key selector part of this Query. - #[inline(always)] - pub fn key_expr(&self) -> &KeyExpr<'static> { - self.q.key_expr() - } - - /// This Query's selector parameters. - #[inline(always)] - pub fn parameters(&self) -> &str { - self.q.parameters() - } - - /// Sends a Sample as a reply to this Query - pub fn reply(&self, sample: Sample) -> ReplyBuilder<'_> { - // Call outgoing intercerceptor - let sample = if let Some(ref interceptor) = self.interceptor { - interceptor(sample) - } else { - sample - }; - // Send reply - self.q.reply_sample(sample) - } -} From 067823d3aa514735e60b684b0807fedfcfeb8069 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 20 Mar 2024 18:01:54 +0100 Subject: [PATCH 030/598] sample builders --- .../src/replica/storage.rs | 33 +- zenoh-ext/src/querying_subscriber.rs | 9 +- zenoh/src/sample.rs | 434 ++++++++++++------ zenoh/src/session.rs | 39 +- 4 files changed, 350 insertions(+), 165 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 307ca95680..5aa6b92a99 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -24,10 +24,13 @@ use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::ConsolidationMode; +use zenoh::sample::SampleBuilder; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; -use zenoh_backend_traits::{Capability, History, Persistence, Storage, StorageInsertionResult, StoredData}; +use zenoh_backend_traits::{ + Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, +}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; @@ -353,14 +356,20 @@ impl StorageService { select!( // on sample for key_expr sample = storage_sub.recv_async() => { - let mut sample = match sample { + let sample = match sample { Ok(sample) => sample, Err(e) => { log::error!("Error in sample: {}", e); continue; } }; - sample.ensure_timestamp(); + let sample = if sample.timestamp().is_none() { + SampleBuilder::new(sample).with_current_timestamp().res_sync() + + + } else { + sample + }; self.process_sample(sample.into()).await; }, // on query on key_expr @@ -411,9 +420,7 @@ impl StorageService { ); for k in matching_keys { - if !self - .is_deleted(&k.clone(), &sample.timestamp) - .await + if !self.is_deleted(&k.clone(), &sample.timestamp).await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) && self.is_latest(&k, &sample.timestamp).await)) @@ -443,18 +450,16 @@ impl StorageService { let result = match sample_to_store.kind { StorageSampleKind::Put(data) => { storage - .put( - stripped_key, - data, - sample_to_store.timestamp, - ) + .put(stripped_key, data, sample_to_store.timestamp) .await - }, + } StorageSampleKind::Delete => { // register a tombstone self.mark_tombstone(&k, sample_to_store.timestamp).await; - storage.delete(stripped_key, sample_to_store.timestamp).await - }, + storage + .delete(stripped_key, sample_to_store.timestamp) + .await + } }; drop(storage); if self.replication.is_some() diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 8cb5480e58..7ca2730f57 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -20,6 +20,7 @@ use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; +use zenoh::sample::SampleBuilder; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::Timestamp; use zenoh::Result as ZResult; @@ -655,7 +656,7 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { let sub_callback = { let state = state.clone(); let callback = callback.clone(); - move |mut s| { + move |s| { let state = &mut zlock!(state); if state.pending_fetches == 0 { callback(s); @@ -663,7 +664,11 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { log::trace!("Sample received while fetch in progress: push it to merge_queue"); // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. - s.ensure_timestamp(); + let s = if s.timestamp().is_none() { + SampleBuilder::new(s).with_current_timestamp().res_sync() + } else { + s + }; state.merge_queue.push(s); } } diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 757d65afd8..395191a0d6 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -20,10 +20,8 @@ use crate::time::{new_reception_timestamp, Timestamp}; use crate::Priority; #[zenoh_macros::unstable] use serde::Serialize; -use std::{ - convert::{TryFrom, TryInto}, - fmt, -}; +use std::{convert::TryFrom, fmt}; +use zenoh_core::{zresult, AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::{core::CongestionControl, network::push::ext::QoSType}; @@ -57,6 +55,83 @@ pub(crate) struct DataInfo { pub qos: QoS, } +pub(crate) trait DataInfoIntoSample { + fn into_sample( + self, + key_expr: IntoKeyExpr, + payload: IntoPayload, + #[cfg(feature = "unstable")] attachment: Option, + ) -> Sample + where + IntoKeyExpr: Into>, + IntoPayload: Into; +} + +impl DataInfoIntoSample for DataInfo { + // TODO: this is internal function. + // Technically it may create invalid sample (e.g. a delete sample with a payload and encoding) + // The test for it is intentionally not added to avoid inserting extra "if" into hot path. + // This need to be additionally investigated and measured. + #[inline] + fn into_sample( + self, + key_expr: IntoKeyExpr, + payload: IntoPayload, + #[cfg(feature = "unstable")] attachment: Option, + ) -> Sample + where + IntoKeyExpr: Into>, + IntoPayload: Into, + { + Sample { + key_expr: key_expr.into(), + payload: payload.into(), + kind: self.kind, + encoding: self.encoding.unwrap_or_default(), + timestamp: self.timestamp, + qos: self.qos, + #[cfg(feature = "unstable")] + source_info: SourceInfo { + source_id: self.source_id, + source_sn: self.source_sn, + }, + #[cfg(feature = "unstable")] + attachment, + } + } +} + +impl DataInfoIntoSample for Option { + #[inline] + fn into_sample( + self, + key_expr: IntoKeyExpr, + payload: IntoPayload, + #[cfg(feature = "unstable")] attachment: Option, + ) -> Sample + where + IntoKeyExpr: Into>, + IntoPayload: Into, + { + if let Some(data_info) = self { + data_info.into_sample(key_expr, payload, attachment) + } else { + Sample { + key_expr: key_expr.into(), + payload: payload.into(), + kind: SampleKind::Put, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment, + } + } + } +} + /// Informations on the source of a zenoh [`Sample`]. #[zenoh_macros::unstable] #[derive(Debug, Clone)] @@ -359,125 +434,275 @@ impl TryFrom for SampleKind { #[zenoh_macros::unstable] pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; -/// A zenoh sample. -#[non_exhaustive] -#[derive(Clone, Debug)] -pub struct Sample { - pub(crate) key_expr: KeyExpr<'static>, - pub(crate) payload: Payload, - pub(crate) kind: SampleKind, - pub(crate) encoding: Encoding, - pub(crate) timestamp: Option, - pub(crate) qos: QoS, +pub struct SampleBuilder(Sample); - #[cfg(feature = "unstable")] - pub(crate) source_info: SourceInfo, +impl SampleBuilder { + pub fn new(sample: Sample) -> Self { + Self(sample) + } - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, + pub fn with_keyexpr(mut self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + self.0.key_expr = key_expr.into(); + self + } + + // pub(crate) fn with_kind(mut self, kind: SampleKind) -> Self { + // self.0.kind = kind; + // self + // } + + pub(crate) fn with_encoding(mut self, encoding: Encoding) -> Self { + self.0.encoding = encoding; + self + } + + pub(crate) fn with_payload(mut self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + self.0.payload = payload.into(); + self + } + + pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { + self.0.timestamp = Some(timestamp); + self + } + + pub fn with_current_timestamp(mut self) -> Self { + self.0.timestamp = Some(new_reception_timestamp()); + self + } + + pub fn with_qos(mut self, qos: QoS) -> Self { + self.0.qos = qos; + self + } + + pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { + self.0.source_info = source_info; + self + } + + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.0.attachment = Some(attachment); + self + } } -impl Sample { - /// Creates a "put" Sample. - #[inline] - pub fn put(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self +pub struct PutSampleBuilder(SampleBuilder); + +impl PutSampleBuilder { + pub fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self where IntoKeyExpr: Into>, IntoPayload: Into, { - Sample { + Self(SampleBuilder::new(Sample { key_expr: key_expr.into(), payload: payload.into(), - encoding: Encoding::default(), kind: SampleKind::Put, + encoding: Encoding::default(), timestamp: None, qos: QoS::default(), #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), #[cfg(feature = "unstable")] attachment: None, - } + })) } - /// Creates a "delete" Sample. - #[inline] - pub fn delete(key_expr: IntoKeyExpr) -> Self + pub fn with_payload(mut self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + self.0 = self.0.with_payload(payload); + self + } + + pub fn with_encoding(mut self, encoding: Encoding) -> Self { + self.0 = self.0.with_encoding(encoding); + self + } + + pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { + self.0 = self.0.with_timestamp(timestamp); + self + } + + pub fn with_current_timestamp(mut self) -> Self { + self.0 = self.0.with_current_timestamp(); + self + } + + pub fn with_qos(mut self, qos: QoS) -> Self { + self.0 = self.0.with_qos(qos); + self + } + + #[zenoh_macros::unstable] + pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { + self.0 = self.0.with_source_info(source_info); + self + } + + #[zenoh_macros::unstable] + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.0 = self.0.with_attachment(attachment); + self + } +} + +pub struct DeleteSampleBuilder(SampleBuilder); + +impl DeleteSampleBuilder { + pub fn new(key_expr: IntoKeyExpr) -> Self where IntoKeyExpr: Into>, { - Sample { + Self(SampleBuilder::new(Sample { key_expr: key_expr.into(), payload: Payload::empty(), - encoding: Encoding::default(), kind: SampleKind::Delete, + encoding: Encoding::default(), timestamp: None, qos: QoS::default(), #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), #[cfg(feature = "unstable")] attachment: None, - } + })) } - - /// Attempts to create a "put" Sample - #[inline] - pub fn try_put( - key_expr: TryIntoKeyExpr, - payload: TryIntoPayload, - ) -> Result + pub fn with_keyexpr(mut self, key_expr: IntoKeyExpr) -> Self where - TryIntoKeyExpr: TryInto>, - >>::Error: Into, - TryIntoPayload: TryInto, - >::Error: Into, + IntoKeyExpr: Into>, { - let key_expr: KeyExpr<'static> = key_expr.try_into().map_err(Into::into)?; - let payload: Payload = payload.try_into().map_err(Into::into)?; - Ok(Self::put(key_expr, payload)) + self.0 = self.0.with_keyexpr(key_expr); + self + } + pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { + self.0 = self.0.with_timestamp(timestamp); + self + } + pub fn with_current_timestamp(mut self) -> Self { + self.0 = self.0.with_current_timestamp(); + self } + pub fn with_qos(mut self, qos: QoS) -> Self { + self.0 = self.0.with_qos(qos); + self + } + #[zenoh_macros::unstable] + pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { + self.0 = self.0.with_source_info(source_info); + self + } + #[zenoh_macros::unstable] + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.0 = self.0.with_attachment(attachment); + self + } +} - /// Attempts to create a "delete" Sample - #[inline] - pub fn try_delete( - key_expr: TryIntoKeyExpr, - ) -> Result - where - TryIntoKeyExpr: TryInto>, - >>::Error: Into, - { - let key_expr: KeyExpr<'static> = key_expr.try_into().map_err(Into::into)?; - Ok(Self::delete(key_expr)) +impl From for SampleBuilder { + fn from(sample: Sample) -> Self { + SampleBuilder(sample) } +} - /// Creates a new Sample with optional data info. - #[inline] - pub(crate) fn with_info(mut self, mut data_info: Option) -> Self { - if let Some(mut data_info) = data_info.take() { - self.kind = data_info.kind; - if let Some(encoding) = data_info.encoding.take() { - self.encoding = encoding; - } - self.qos = data_info.qos; - self.timestamp = data_info.timestamp; - #[cfg(feature = "unstable")] - { - self.source_info = SourceInfo { - source_id: data_info.source_id, - source_sn: data_info.source_sn, - }; - } +impl TryFrom for PutSampleBuilder { + type Error = zresult::Error; + fn try_from(sample: Sample) -> Result { + if sample.kind != SampleKind::Put { + bail!("Sample is not a put sample") } - self + Ok(Self(SampleBuilder(sample))) } +} - /// Sets the encoding of this Sample - #[inline] - pub fn with_encoding(mut self, encoding: Encoding) -> Self { - assert!(self.kind == SampleKind::Put, "Cannot set encoding on a delete sample"); - self.encoding = encoding; - self +impl TryFrom for DeleteSampleBuilder { + type Error = zresult::Error; + fn try_from(sample: Sample) -> Result { + if sample.kind != SampleKind::Delete { + bail!("Sample is not a delete sample") + } + Ok(Self(SampleBuilder(sample))) + } +} + +impl Resolvable for SampleBuilder { + type To = Sample; +} + +impl Resolvable for PutSampleBuilder { + type To = Sample; +} + +impl Resolvable for DeleteSampleBuilder { + type To = Sample; +} + +impl SyncResolve for SampleBuilder { + fn res_sync(self) -> Self::To { + self.0 } +} + +impl SyncResolve for PutSampleBuilder { + fn res_sync(self) -> Self::To { + self.0.res_sync() + } +} + +impl SyncResolve for DeleteSampleBuilder { + fn res_sync(self) -> Self::To { + self.0.res_sync() + } +} +impl AsyncResolve for SampleBuilder { + type Future = futures::future::Ready; + fn res_async(self) -> Self::Future { + futures::future::ready(self.0) + } +} + +impl AsyncResolve for PutSampleBuilder { + type Future = futures::future::Ready; + fn res_async(self) -> Self::Future { + self.0.res_async() + } +} + +impl AsyncResolve for DeleteSampleBuilder { + type Future = futures::future::Ready; + fn res_async(self) -> Self::Future { + self.0.res_async() + } +} + +/// A zenoh sample. +#[non_exhaustive] +#[derive(Clone, Debug)] +pub struct Sample { + pub(crate) key_expr: KeyExpr<'static>, + pub(crate) payload: Payload, + pub(crate) kind: SampleKind, + pub(crate) encoding: Encoding, + pub(crate) timestamp: Option, + pub(crate) qos: QoS, + + #[cfg(feature = "unstable")] + pub(crate) source_info: SourceInfo, + + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, +} + +impl Sample { /// Gets the key expression on which this Sample was published. #[inline] pub fn key_expr(&self) -> &KeyExpr<'static> { @@ -508,15 +733,6 @@ impl Sample { self.timestamp.as_ref() } - /// Sets the timestamp of this Sample. - #[inline] - #[doc(hidden)] - #[zenoh_macros::unstable] - pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { - self.timestamp = Some(timestamp); - self - } - /// Gets the quality of service settings this Sample was sent with. #[inline] pub fn qos(&self) -> &QoS { @@ -530,52 +746,12 @@ impl Sample { &self.source_info } - /// Sets the source info of this Sample. - #[zenoh_macros::unstable] - #[inline] - pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { - self.source_info = source_info; - self - } - - /// Ensure that an associated Timestamp is present in this Sample. - /// If not, a new one is created with the current system time and 0x00 as id. - /// Get the timestamp of this sample (either existing one or newly created) - #[inline] - #[doc(hidden)] - #[zenoh_macros::unstable] - pub fn ensure_timestamp(&mut self) -> &Timestamp { - if let Some(ref timestamp) = self.timestamp { - timestamp - } else { - let timestamp = new_reception_timestamp(); - self.timestamp = Some(timestamp); - self.timestamp.as_ref().unwrap() - } - } - /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. #[zenoh_macros::unstable] #[inline] pub fn attachment(&self) -> Option<&Attachment> { self.attachment.as_ref() } - - /// Gets the mutable sample attachment: a map of key-value pairs, where each key and value are byte-slices. - #[inline] - #[doc(hidden)] - #[zenoh_macros::unstable] - pub fn attachment_mut(&mut self) -> &mut Option { - &mut self.attachment - } - - #[inline] - #[doc(hidden)] - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.attachment = Some(attachment); - self - } } impl From for Value { diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 93d1e2fb9d..0a63d82354 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -32,6 +32,7 @@ use crate::queryable::*; #[cfg(feature = "unstable")] use crate::sample::Attachment; use crate::sample::DataInfo; +use crate::sample::DataInfoIntoSample; use crate::sample::QoS; use crate::selector::TIME_RANGE_KEY; use crate::subscriber::*; @@ -1537,21 +1538,21 @@ impl Session { drop(state); let zenoh_collections::single_or_vec::IntoIter { drain, last } = callbacks.into_iter(); for (cb, key_expr) in drain { - #[allow(unused_mut)] - let mut sample = Sample::put(key_expr, payload.clone()).with_info(info.clone()); - #[cfg(feature = "unstable")] - { - sample.attachment = attachment.clone(); - } + let sample = info.clone().into_sample( + key_expr, + payload.clone(), + #[cfg(feature = "unstable")] + attachment.clone(), + ); cb(sample); } if let Some((cb, key_expr)) = last { - #[allow(unused_mut)] - let mut sample = Sample::put(key_expr, payload).with_info(info); - #[cfg(feature = "unstable")] - { - sample.attachment = attachment; - } + let sample = info.into_sample( + key_expr, + payload, + #[cfg(feature = "unstable")] + attachment.clone(), + ); cb(sample); } } @@ -2254,14 +2255,12 @@ impl Primitives for Session { attachment: _attachment.map(Into::into), }, }; - - #[allow(unused_mut)] - let mut sample = - Sample::put(key_expr.into_owned(), payload).with_info(Some(info)); - #[cfg(feature = "unstable")] - { - sample.attachment = attachment; - } + let sample = info.into_sample( + key_expr.into_owned(), + payload, + #[cfg(feature = "unstable")] + attachment, + ); let new_reply = Reply { sample: Ok(sample), replier_id: ZenohId::rand(), // TODO From 4f1ba2f11fabc36a9c6900fee77107fd256fc14f Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 20 Mar 2024 19:32:38 +0100 Subject: [PATCH 031/598] compiles --- .../src/replica/storage.rs | 37 ++++++++++++------- zenoh/src/sample.rs | 29 +++++++++++++++ 2 files changed, 52 insertions(+), 14 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 5aa6b92a99..f90ea01754 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -28,9 +28,8 @@ use zenoh::sample::SampleBuilder; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; -use zenoh_backend_traits::{ - Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, -}; +use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; +use zenoh_core::SyncResolve; use zenoh_keyexpr::key_expr::OwnedKeyExpr; use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; @@ -274,7 +273,12 @@ impl StorageService { t.add_async(gc).await; // subscribe on key_expr - let storage_sub = match self.session.declare_subscriber(&self.key_expr).res().await { + let storage_sub = match self + .session + .declare_subscriber(&self.key_expr) + .res_async() + .await + { Ok(storage_sub) => storage_sub, Err(e) => { log::error!("Error starting storage '{}': {}", self.name, e); @@ -287,7 +291,7 @@ impl StorageService { .session .declare_queryable(&self.key_expr) .complete(self.complete) - .res() + .res_async() .await { Ok(storage_queryable) => storage_queryable, @@ -365,8 +369,6 @@ impl StorageService { }; let sample = if sample.timestamp().is_none() { SampleBuilder::new(sample).with_current_timestamp().res_sync() - - } else { sample }; @@ -622,8 +624,12 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let sample = entry.into_sample(key.clone()); - if let Err(e) = q.reply_sample(sample).res().await { + if let Err(e) = q + .reply(key.clone(), entry.value.payload) + .with_timestamp(entry.timestamp) + .res_async() + .await + { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -652,10 +658,13 @@ impl StorageService { let Value { payload, encoding, .. } = entry.value; - let sample = Sample::put(q.key_expr().clone(), payload) + if let Err(e) = q + .reply(q.key_expr().clone(), payload) .with_encoding(encoding) - .with_timestamp(entry.timestamp); - if let Err(e) = q.reply_sample(sample).res().await { + .with_timestamp(entry.timestamp) + .res_async() + .await + { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -668,7 +677,7 @@ impl StorageService { let err_message = format!("Storage '{}' raised an error on query: {}", self.name, e); log::warn!("{}", err_message); - if let Err(e) = q.reply_err(err_message).res().await { + if let Err(e) = q.reply_err(err_message).res_async().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -750,7 +759,7 @@ impl StorageService { .get(KeyExpr::from(&self.key_expr).with_parameters("_time=[..]")) .target(QueryTarget::All) .consolidation(ConsolidationMode::None) - .res() + .res_async() .await { Ok(replies) => replies, diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 395191a0d6..29d46cca3e 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -684,6 +684,19 @@ impl AsyncResolve for DeleteSampleBuilder { } } +pub struct SampleDecomposed { + pub key_expr: KeyExpr<'static>, + pub payload: Payload, + pub kind: SampleKind, + pub encoding: Encoding, + pub timestamp: Option, + pub qos: QoS, + #[cfg(feature = "unstable")] + pub source_info: SourceInfo, + #[cfg(feature = "unstable")] + pub attachment: Option, +} + /// A zenoh sample. #[non_exhaustive] #[derive(Clone, Debug)] @@ -752,6 +765,22 @@ impl Sample { pub fn attachment(&self) -> Option<&Attachment> { self.attachment.as_ref() } + + /// Decomposes the Sample into its components + pub fn decompose(self) -> SampleDecomposed { + SampleDecomposed { + key_expr: self.key_expr, + payload: self.payload, + kind: self.kind, + encoding: self.encoding, + timestamp: self.timestamp, + qos: self.qos, + #[cfg(feature = "unstable")] + source_info: self.source_info, + #[cfg(feature = "unstable")] + attachment: self.attachment, + } + } } impl From for Value { From d7cb97a3705b82364a2b48557d025aa3bff156da Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sat, 23 Mar 2024 00:41:17 +0100 Subject: [PATCH 032/598] SampleBuilderTrait --- .../src/replica/storage.rs | 4 +- zenoh-ext/src/querying_subscriber.rs | 8 +- zenoh/src/lib.rs | 1 + zenoh/src/sample.rs | 282 +--------------- zenoh/src/sample_builder.rs | 306 ++++++++++++++++++ 5 files changed, 315 insertions(+), 286 deletions(-) create mode 100644 zenoh/src/sample_builder.rs diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index f90ea01754..576f6adec2 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -24,7 +24,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::ConsolidationMode; -use zenoh::sample::SampleBuilder; +use zenoh::sample_builder::{SampleBuilder, SampleBuilderTrait}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; @@ -368,7 +368,7 @@ impl StorageService { } }; let sample = if sample.timestamp().is_none() { - SampleBuilder::new(sample).with_current_timestamp().res_sync() + SampleBuilder::from(sample).with_timestamp(new_reception_timestamp()).res_sync() } else { sample }; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 7ca2730f57..eb6d6e9516 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -20,9 +20,9 @@ use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample::SampleBuilder; +use zenoh::sample_builder::{SampleBuilder, SampleBuilderTrait}; use zenoh::subscriber::{Reliability, Subscriber}; -use zenoh::time::Timestamp; +use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; use zenoh::SessionRef; use zenoh_core::{zlock, AsyncResolve, Resolvable, SyncResolve}; @@ -665,7 +665,9 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. let s = if s.timestamp().is_none() { - SampleBuilder::new(s).with_current_timestamp().res_sync() + SampleBuilder::from(s) + .with_timestamp(new_reception_timestamp()) + .res_sync() } else { s }; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index eb1ba1bcd1..8618cb9a88 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -146,6 +146,7 @@ pub mod publication; pub mod query; pub mod queryable; pub mod sample; +pub mod sample_builder; pub mod subscriber; pub mod value; #[cfg(feature = "shared-memory")] diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 29d46cca3e..2c98d5ead1 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -16,12 +16,11 @@ use crate::encoding::Encoding; use crate::payload::Payload; use crate::prelude::{KeyExpr, Value}; -use crate::time::{new_reception_timestamp, Timestamp}; +use crate::time::Timestamp; use crate::Priority; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; -use zenoh_core::{zresult, AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::{core::CongestionControl, network::push::ext::QoSType}; @@ -434,269 +433,6 @@ impl TryFrom for SampleKind { #[zenoh_macros::unstable] pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; -pub struct SampleBuilder(Sample); - -impl SampleBuilder { - pub fn new(sample: Sample) -> Self { - Self(sample) - } - - pub fn with_keyexpr(mut self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - self.0.key_expr = key_expr.into(); - self - } - - // pub(crate) fn with_kind(mut self, kind: SampleKind) -> Self { - // self.0.kind = kind; - // self - // } - - pub(crate) fn with_encoding(mut self, encoding: Encoding) -> Self { - self.0.encoding = encoding; - self - } - - pub(crate) fn with_payload(mut self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { - self.0.payload = payload.into(); - self - } - - pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { - self.0.timestamp = Some(timestamp); - self - } - - pub fn with_current_timestamp(mut self) -> Self { - self.0.timestamp = Some(new_reception_timestamp()); - self - } - - pub fn with_qos(mut self, qos: QoS) -> Self { - self.0.qos = qos; - self - } - - pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { - self.0.source_info = source_info; - self - } - - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.0.attachment = Some(attachment); - self - } -} - -pub struct PutSampleBuilder(SampleBuilder); - -impl PutSampleBuilder { - pub fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self - where - IntoKeyExpr: Into>, - IntoPayload: Into, - { - Self(SampleBuilder::new(Sample { - key_expr: key_expr.into(), - payload: payload.into(), - kind: SampleKind::Put, - encoding: Encoding::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - })) - } - - pub fn with_payload(mut self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { - self.0 = self.0.with_payload(payload); - self - } - - pub fn with_encoding(mut self, encoding: Encoding) -> Self { - self.0 = self.0.with_encoding(encoding); - self - } - - pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { - self.0 = self.0.with_timestamp(timestamp); - self - } - - pub fn with_current_timestamp(mut self) -> Self { - self.0 = self.0.with_current_timestamp(); - self - } - - pub fn with_qos(mut self, qos: QoS) -> Self { - self.0 = self.0.with_qos(qos); - self - } - - #[zenoh_macros::unstable] - pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { - self.0 = self.0.with_source_info(source_info); - self - } - - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.0 = self.0.with_attachment(attachment); - self - } -} - -pub struct DeleteSampleBuilder(SampleBuilder); - -impl DeleteSampleBuilder { - pub fn new(key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self(SampleBuilder::new(Sample { - key_expr: key_expr.into(), - payload: Payload::empty(), - kind: SampleKind::Delete, - encoding: Encoding::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - })) - } - pub fn with_keyexpr(mut self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - self.0 = self.0.with_keyexpr(key_expr); - self - } - pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { - self.0 = self.0.with_timestamp(timestamp); - self - } - pub fn with_current_timestamp(mut self) -> Self { - self.0 = self.0.with_current_timestamp(); - self - } - pub fn with_qos(mut self, qos: QoS) -> Self { - self.0 = self.0.with_qos(qos); - self - } - #[zenoh_macros::unstable] - pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { - self.0 = self.0.with_source_info(source_info); - self - } - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.0 = self.0.with_attachment(attachment); - self - } -} - -impl From for SampleBuilder { - fn from(sample: Sample) -> Self { - SampleBuilder(sample) - } -} - -impl TryFrom for PutSampleBuilder { - type Error = zresult::Error; - fn try_from(sample: Sample) -> Result { - if sample.kind != SampleKind::Put { - bail!("Sample is not a put sample") - } - Ok(Self(SampleBuilder(sample))) - } -} - -impl TryFrom for DeleteSampleBuilder { - type Error = zresult::Error; - fn try_from(sample: Sample) -> Result { - if sample.kind != SampleKind::Delete { - bail!("Sample is not a delete sample") - } - Ok(Self(SampleBuilder(sample))) - } -} - -impl Resolvable for SampleBuilder { - type To = Sample; -} - -impl Resolvable for PutSampleBuilder { - type To = Sample; -} - -impl Resolvable for DeleteSampleBuilder { - type To = Sample; -} - -impl SyncResolve for SampleBuilder { - fn res_sync(self) -> Self::To { - self.0 - } -} - -impl SyncResolve for PutSampleBuilder { - fn res_sync(self) -> Self::To { - self.0.res_sync() - } -} - -impl SyncResolve for DeleteSampleBuilder { - fn res_sync(self) -> Self::To { - self.0.res_sync() - } -} - -impl AsyncResolve for SampleBuilder { - type Future = futures::future::Ready; - fn res_async(self) -> Self::Future { - futures::future::ready(self.0) - } -} - -impl AsyncResolve for PutSampleBuilder { - type Future = futures::future::Ready; - fn res_async(self) -> Self::Future { - self.0.res_async() - } -} - -impl AsyncResolve for DeleteSampleBuilder { - type Future = futures::future::Ready; - fn res_async(self) -> Self::Future { - self.0.res_async() - } -} - -pub struct SampleDecomposed { - pub key_expr: KeyExpr<'static>, - pub payload: Payload, - pub kind: SampleKind, - pub encoding: Encoding, - pub timestamp: Option, - pub qos: QoS, - #[cfg(feature = "unstable")] - pub source_info: SourceInfo, - #[cfg(feature = "unstable")] - pub attachment: Option, -} - /// A zenoh sample. #[non_exhaustive] #[derive(Clone, Debug)] @@ -765,22 +501,6 @@ impl Sample { pub fn attachment(&self) -> Option<&Attachment> { self.attachment.as_ref() } - - /// Decomposes the Sample into its components - pub fn decompose(self) -> SampleDecomposed { - SampleDecomposed { - key_expr: self.key_expr, - payload: self.payload, - kind: self.kind, - encoding: self.encoding, - timestamp: self.timestamp, - qos: self.qos, - #[cfg(feature = "unstable")] - source_info: self.source_info, - #[cfg(feature = "unstable")] - attachment: self.attachment, - } - } } impl From for Value { diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs new file mode 100644 index 0000000000..a9cffb22d1 --- /dev/null +++ b/zenoh/src/sample_builder.rs @@ -0,0 +1,306 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use crate::sample::Attachment; +use crate::sample::QoS; +use crate::sample::SourceInfo; +use crate::Encoding; +use crate::KeyExpr; +use crate::Payload; +use crate::Priority; +use crate::Sample; +use crate::SampleKind; +use uhlc::Timestamp; +use zenoh_core::zresult; +use zenoh_core::AsyncResolve; +use zenoh_core::Resolvable; +use zenoh_core::SyncResolve; +use zenoh_protocol::core::CongestionControl; + +pub trait SampleBuilderTrait { + fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>; + fn with_timestamp(self, timestamp: Timestamp) -> Self; + #[zenoh_macros::unstable] + fn with_source_info(self, source_info: SourceInfo) -> Self; + #[zenoh_macros::unstable] + fn with_attachment(self, attachment: Attachment) -> Self; + fn congestion_control(self, congestion_control: CongestionControl) -> Self; + fn priority(self, priority: Priority) -> Self; + fn express(self, is_express: bool) -> Self; +} + +pub trait PutSampleBuilderTrait: SampleBuilderTrait { + fn with_encoding(self, encoding: Encoding) -> Self; + fn with_payload(self, payload: IntoPayload) -> Self + where + IntoPayload: Into; +} + +pub trait DeleteSampleBuilderTrait: SampleBuilderTrait {} + +pub struct SampleBuilder(Sample); + +impl SampleBuilderTrait for SampleBuilder { + fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + let mut this = self; + this.0.key_expr = key_expr.into(); + this + } + + fn with_timestamp(self, timestamp: Timestamp) -> Self { + let mut this = self; + this.0.timestamp = Some(timestamp); + this + } + #[zenoh_macros::unstable] + fn with_source_info(self, source_info: SourceInfo) -> Self { + let mut this = self; + this.0.source_info = source_info; + this + } + #[zenoh_macros::unstable] + fn with_attachment(self, attachment: Attachment) -> Self { + let mut this = self; + this.0.attachment = Some(attachment); + this + } + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + let mut this = self; + this.0.qos = this.0.qos.with_congestion_control(congestion_control); + this + } + fn priority(self, priority: Priority) -> Self { + let mut this = self; + this.0.qos = this.0.qos.with_priority(priority); + this + } + fn express(self, is_express: bool) -> Self { + let mut this = self; + this.0.qos = this.0.qos.with_express(is_express); + this + } +} + +pub struct PutSampleBuilder(SampleBuilder); + +impl PutSampleBuilder { + pub fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self + where + IntoKeyExpr: Into>, + IntoPayload: Into, + { + Self(SampleBuilder::from(Sample { + key_expr: key_expr.into(), + payload: payload.into(), + kind: SampleKind::Put, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + })) + } + pub fn without_timestamp(self) -> Self { + let mut this = self; + this.0 .0.timestamp = None; + this + } + pub fn without_attachment(self) -> Self { + let mut this = self; + this.0 .0.attachment = None; + this + } +} + +impl SampleBuilderTrait for PutSampleBuilder { + fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self(self.0.with_keyexpr(key_expr)) + } + fn with_timestamp(self, timestamp: Timestamp) -> Self { + Self(self.0.with_timestamp(timestamp)) + } + #[zenoh_macros::unstable] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self(self.0.with_source_info(source_info)) + } + #[zenoh_macros::unstable] + fn with_attachment(self, attachment: Attachment) -> Self { + Self(self.0.with_attachment(attachment)) + } + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self(self.0.congestion_control(congestion_control)) + } + fn priority(self, priority: Priority) -> Self { + Self(self.0.priority(priority)) + } + fn express(self, is_express: bool) -> Self { + Self(self.0.express(is_express)) + } +} + +impl PutSampleBuilderTrait for PutSampleBuilder { + fn with_encoding(self, encoding: Encoding) -> Self { + let mut this = self; + this.0 .0.encoding = encoding; + this + } + fn with_payload(self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + let mut this = self; + this.0 .0.payload = payload.into(); + this + } +} + +pub struct DeleteSampleBuilder(SampleBuilder); + +impl DeleteSampleBuilder { + pub fn new(key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self(SampleBuilder::from(Sample { + key_expr: key_expr.into(), + payload: Payload::empty(), + kind: SampleKind::Delete, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + })) + } +} + +impl SampleBuilderTrait for DeleteSampleBuilder { + fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self(self.0.with_keyexpr(key_expr)) + } + fn with_timestamp(self, timestamp: Timestamp) -> Self { + Self(self.0.with_timestamp(timestamp)) + } + #[zenoh_macros::unstable] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self(self.0.with_source_info(source_info)) + } + #[zenoh_macros::unstable] + fn with_attachment(self, attachment: Attachment) -> Self { + Self(self.0.with_attachment(attachment)) + } + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self(self.0.congestion_control(congestion_control)) + } + fn priority(self, priority: Priority) -> Self { + Self(self.0.priority(priority)) + } + fn express(self, is_express: bool) -> Self { + Self(self.0.express(is_express)) + } +} + +impl DeleteSampleBuilderTrait for DeleteSampleBuilder {} + +impl From for SampleBuilder { + fn from(sample: Sample) -> Self { + SampleBuilder(sample) + } +} + +impl TryFrom for PutSampleBuilder { + type Error = zresult::Error; + fn try_from(sample: Sample) -> Result { + if sample.kind != SampleKind::Put { + bail!("Sample is not a put sample") + } + Ok(Self(SampleBuilder(sample))) + } +} + +impl TryFrom for DeleteSampleBuilder { + type Error = zresult::Error; + fn try_from(sample: Sample) -> Result { + if sample.kind != SampleKind::Delete { + bail!("Sample is not a delete sample") + } + Ok(Self(SampleBuilder(sample))) + } +} + +impl Resolvable for SampleBuilder { + type To = Sample; +} + +impl Resolvable for PutSampleBuilder { + type To = Sample; +} + +impl Resolvable for DeleteSampleBuilder { + type To = Sample; +} + +impl SyncResolve for SampleBuilder { + fn res_sync(self) -> Self::To { + self.0 + } +} + +impl SyncResolve for PutSampleBuilder { + fn res_sync(self) -> Self::To { + self.0.res_sync() + } +} + +impl SyncResolve for DeleteSampleBuilder { + fn res_sync(self) -> Self::To { + self.0.res_sync() + } +} + +impl AsyncResolve for SampleBuilder { + type Future = futures::future::Ready; + fn res_async(self) -> Self::Future { + futures::future::ready(self.0) + } +} + +impl AsyncResolve for PutSampleBuilder { + type Future = futures::future::Ready; + fn res_async(self) -> Self::Future { + self.0.res_async() + } +} + +impl AsyncResolve for DeleteSampleBuilder { + type Future = futures::future::Ready; + fn res_async(self) -> Self::Future { + self.0.res_async() + } +} From a05b93de8c9507e597d2f85bce88c9787241590b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sat, 23 Mar 2024 15:43:58 +0100 Subject: [PATCH 033/598] reply builder unfinished --- .../src/replica/storage.rs | 4 +- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/queryable.rs | 268 +++++++++++------- zenoh/src/sample_builder.rs | 99 ++++--- 4 files changed, 238 insertions(+), 135 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 576f6adec2..1aadc88611 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -24,7 +24,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::ConsolidationMode; -use zenoh::sample_builder::{SampleBuilder, SampleBuilderTrait}; +use zenoh::sample_builder::{SampleBuilderTrait, SampleUpdater}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; @@ -368,7 +368,7 @@ impl StorageService { } }; let sample = if sample.timestamp().is_none() { - SampleBuilder::from(sample).with_timestamp(new_reception_timestamp()).res_sync() + SampleUpdater::from(sample).with_timestamp(new_reception_timestamp()).res_sync() } else { sample }; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index eb6d6e9516..19388ea16f 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -20,7 +20,7 @@ use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample_builder::{SampleBuilder, SampleBuilderTrait}; +use zenoh::sample_builder::{SampleBuilderTrait, SampleUpdater}; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index d2eabcdc2a..eb6ef013c7 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -20,6 +20,10 @@ use crate::net::primitives::Primitives; use crate::prelude::*; use crate::sample::QoS; use crate::sample::SourceInfo; +use crate::sample_builder::{ + DeleteSampleBuilder, DeleteSampleBuilderTrait, PutSampleBuilder, PutSampleBuilderTrait, + SampleBuilder, SampleBuilderTrait, +}; use crate::Id; use crate::SessionRef; use crate::Undeclarable; @@ -102,43 +106,6 @@ impl Query { pub fn attachment(&self) -> Option<&Attachment> { self.inner.attachment.as_ref() } - /// Sends a reply in the form of [`Sample`] to this Query. - /// - /// By default, queries only accept replies whose key expression intersects with the query's. - /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), - /// replying on a disjoint key expression will result in an error when resolving the reply. - /// This api is for internal use only. - #[inline(always)] - #[cfg(feature = "unstable")] - #[doc(hidden)] - pub fn reply_sample(&self, sample: Sample) -> ReplyBuilder<'_> { - let Sample { - key_expr, - payload, - kind, - encoding, - timestamp, - qos, - #[cfg(feature = "unstable")] - source_info, - #[cfg(feature = "unstable")] - attachment, - } = sample; - ReplyBuilder { - query: self, - key_expr, - payload, - kind, - encoding, - timestamp, - qos, - #[cfg(feature = "unstable")] - source_info, - #[cfg(feature = "unstable")] - attachment, - } - } - /// Sends a reply to this Query. /// /// By default, queries only accept replies whose key expression intersects with the query's. @@ -154,18 +121,11 @@ impl Query { IntoKeyExpr: Into>, IntoPayload: Into, { + let sample_builder = PutSampleBuilder::new(key_expr, payload) + .with_qos(response::ext::QoSType::RESPONSE.into()); ReplyBuilder { query: self, - key_expr: key_expr.into(), - payload: payload.into(), - kind: SampleKind::Put, - timestamp: None, - encoding: Encoding::default(), - qos: response::ext::QoSType::RESPONSE.into(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, + sample_builder, } } /// Sends a error reply to this Query. @@ -187,22 +147,15 @@ impl Query { /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), /// replying on a disjoint key expression will result in an error when resolving the reply. #[inline(always)] - pub fn reply_del(&self, key_expr: IntoKeyExpr) -> ReplyBuilder<'_> + pub fn reply_del(&self, key_expr: IntoKeyExpr) -> ReplyDelBuilder<'_> where IntoKeyExpr: Into>, { - ReplyBuilder { + let sample_builder = + DeleteSampleBuilder::new(key_expr).with_qos(response::ext::QoSType::RESPONSE.into()); + ReplyDelBuilder { query: self, - key_expr: key_expr.into(), - payload: Payload::empty(), - kind: SampleKind::Delete, - timestamp: None, - encoding: Encoding::default(), - qos: response::ext::QoSType::RESPONSE.into(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, + sample_builder, } } @@ -250,45 +203,161 @@ impl fmt::Display for Query { #[derive(Debug)] pub struct ReplyBuilder<'a> { query: &'a Query, - key_expr: KeyExpr<'static>, - payload: Payload, - kind: SampleKind, - encoding: Encoding, - timestamp: Option, - qos: QoS, + sample_builder: PutSampleBuilder, +} + +impl SampleBuilderTrait for ReplyBuilder<'_> { + fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self { + sample_builder: self.sample_builder.with_keyexpr(key_expr), + ..self + } + } + + fn with_timestamp(self, timestamp: Timestamp) -> Self { + Self { + sample_builder: self.sample_builder.with_timestamp(timestamp), + ..self + } + } + #[cfg(feature = "unstable")] - source_info: SourceInfo, + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + sample_builder: self.sample_builder.with_source_info(source_info), + ..self + } + } + #[cfg(feature = "unstable")] - attachment: Option, + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + sample_builder: self.sample_builder.with_attachment(attachment), + ..self + } + } + + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + sample_builder: self.sample_builder.congestion_control(congestion_control), + ..self + } + } + + fn priority(self, priority: Priority) -> Self { + Self { + sample_builder: self.sample_builder.priority(priority), + ..self + } + } + + fn express(self, is_express: bool) -> Self { + Self { + sample_builder: self.sample_builder.express(is_express), + ..self + } + } } -/// A builder returned by [`Query::reply_err()`](Query::reply_err). +impl PutSampleBuilderTrait for ReplyBuilder<'_> { + fn with_encoding(self, encoding: Encoding) -> Self { + Self { + sample_builder: self.sample_builder.with_encoding(encoding), + ..self + } + } + + fn with_payload(self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + Self { + sample_builder: self.sample_builder.with_payload(payload), + ..self + } + } +} + +/// A builder returned by [`Query::reply_del()`](Query::reply) #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct ReplyErrBuilder<'a> { +pub struct ReplyDelBuilder<'a> { query: &'a Query, - value: Value, + sample_builder: DeleteSampleBuilder, } -impl<'a> ReplyBuilder<'a> { - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.attachment = Some(attachment); - self +impl SampleBuilderTrait for ReplyDelBuilder<'_> { + fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self { + sample_builder: self.sample_builder.with_keyexpr(key_expr), + ..self + } } - #[zenoh_macros::unstable] - pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { - self.source_info = source_info; - self + + fn with_timestamp(self, timestamp: Timestamp) -> Self { + Self { + sample_builder: self.sample_builder.with_timestamp(timestamp), + ..self + } } - pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { - self.timestamp = Some(timestamp); - self + + #[cfg(feature = "unstable")] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + sample_builder: self.sample_builder.with_source_info(source_info), + ..self + } } - pub fn with_encoding(mut self, encoding: Encoding) -> Self { - self.encoding = encoding; - self + #[cfg(feature = "unstable")] + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + sample_builder: self.sample_builder.with_attachment(attachment), + ..self + } + } + + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + sample_builder: self.sample_builder.congestion_control(congestion_control), + ..self + } + } + + fn priority(self, priority: Priority) -> Self { + Self { + sample_builder: self.sample_builder.priority(priority), + ..self + } + } + + fn express(self, is_express: bool) -> Self { + Self { + sample_builder: self.sample_builder.express(is_express), + ..self + } + } +} + +impl DeleteSampleBuilderTrait for ReplyDelBuilder<'_> {} + +/// A builder returned by [`Query::reply_err()`](Query::reply_err). +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug)] +pub struct ReplyErrBuilder<'a> { + query: &'a Query, + value: Value, +} + +impl AsRef for ReplyBuilder<'_> { + fn as_ref(&self) -> &PutSampleBuilder { + &self.sample_builder } } @@ -298,19 +367,20 @@ impl<'a> Resolvable for ReplyBuilder<'a> { impl SyncResolve for ReplyBuilder<'_> { fn res_sync(self) -> ::To { + let sample = self.sample_builder.res_sync(); if !self.query._accepts_any_replies().unwrap_or(false) - && !self.query.key_expr().intersects(&self.key_expr) + && !self.query.key_expr().intersects(&sample.key_expr) { - bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", self.key_expr, self.query.key_expr()) + bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.query.key_expr()) } #[allow(unused_mut)] // will be unused if feature = "unstable" is not enabled let mut ext_sinfo = None; #[cfg(feature = "unstable")] { - if self.source_info.source_id.is_some() || self.source_info.source_sn.is_some() { + if sample.source_info.source_id.is_some() || sample.source_info.source_sn.is_some() { ext_sinfo = Some(zenoh::put::ext::SourceInfoType { - id: self.source_info.source_id.unwrap_or_default(), - sn: self.source_info.source_sn.unwrap_or_default() as u32, + id: sample.source_info.source_id.unwrap_or_default(), + sn: sample.source_info.source_sn.unwrap_or_default() as u32, }) } } @@ -318,38 +388,38 @@ impl SyncResolve for ReplyBuilder<'_> { rid: self.query.inner.qid, wire_expr: WireExpr { scope: 0, - suffix: std::borrow::Cow::Owned(self.key_expr.into()), + suffix: std::borrow::Cow::Owned(sample.key_expr.into()), mapping: Mapping::Sender, }, payload: ResponseBody::Reply(zenoh::Reply { consolidation: zenoh::Consolidation::DEFAULT, ext_unknown: vec![], - payload: match self.kind { + payload: match sample.kind { SampleKind::Put => ReplyBody::Put(Put { - timestamp: self.timestamp, - encoding: self.encoding.into(), + timestamp: sample.timestamp, + encoding: sample.encoding.into(), ext_sinfo, #[cfg(feature = "shared-memory")] ext_shm: None, #[cfg(feature = "unstable")] - ext_attachment: self.attachment.map(|a| a.into()), + ext_attachment: sample.attachment.map(|a| a.into()), #[cfg(not(feature = "unstable"))] ext_attachment: None, ext_unknown: vec![], - payload: self.payload.into(), + payload: sample.payload.into(), }), SampleKind::Delete => ReplyBody::Del(Del { - timestamp: self.timestamp, + timestamp: sample.timestamp, ext_sinfo, #[cfg(feature = "unstable")] - ext_attachment: self.attachment.map(|a| a.into()), + ext_attachment: sample.attachment.map(|a| a.into()), #[cfg(not(feature = "unstable"))] ext_attachment: None, ext_unknown: vec![], }), }, }), - ext_qos: self.qos.into(), + ext_qos: sample.qos.into(), ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { zid: self.query.inner.zid, diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index a9cffb22d1..fcf3a64182 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -51,52 +51,76 @@ pub trait PutSampleBuilderTrait: SampleBuilderTrait { pub trait DeleteSampleBuilderTrait: SampleBuilderTrait {} +#[derive(Debug)] pub struct SampleBuilder(Sample); +impl SampleBuilder { + pub(crate) fn without_timestamp(self) -> Self { + Self(Sample { + timestamp: None, + ..self.0 + }) + } + pub(crate) fn without_attachment(self) -> Self { + Self(Sample { + attachment: None, + ..self.0 + }) + } +} + impl SampleBuilderTrait for SampleBuilder { fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self where IntoKeyExpr: Into>, { - let mut this = self; - this.0.key_expr = key_expr.into(); - this + Self(Sample { + key_expr: key_expr.into(), + ..self.0 + }) } fn with_timestamp(self, timestamp: Timestamp) -> Self { - let mut this = self; - this.0.timestamp = Some(timestamp); - this + Self(Sample { + timestamp: Some(timestamp), + ..self.0 + }) } #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self { - let mut this = self; - this.0.source_info = source_info; - this + Self(Sample { + source_info, + ..self.0 + }) } #[zenoh_macros::unstable] fn with_attachment(self, attachment: Attachment) -> Self { - let mut this = self; - this.0.attachment = Some(attachment); - this + Self(Sample { + attachment: Some(attachment), + ..self.0 + }) } fn congestion_control(self, congestion_control: CongestionControl) -> Self { - let mut this = self; - this.0.qos = this.0.qos.with_congestion_control(congestion_control); - this + Self(Sample { + qos: self.0.qos.with_congestion_control(congestion_control), + ..self.0 + }) } fn priority(self, priority: Priority) -> Self { - let mut this = self; - this.0.qos = this.0.qos.with_priority(priority); - this + Self(Sample { + qos: self.0.qos.with_priority(priority), + ..self.0 + }) } fn express(self, is_express: bool) -> Self { - let mut this = self; - this.0.qos = this.0.qos.with_express(is_express); - this + Self(Sample { + qos: self.0.qos.with_express(is_express), + ..self.0 + }) } } +#[derive(Debug)] pub struct PutSampleBuilder(SampleBuilder); impl PutSampleBuilder { @@ -118,15 +142,17 @@ impl PutSampleBuilder { attachment: None, })) } + #[zenoh_macros::unstable] pub fn without_timestamp(self) -> Self { - let mut this = self; - this.0 .0.timestamp = None; - this + Self(self.0.without_timestamp()) } + #[zenoh_macros::unstable] pub fn without_attachment(self) -> Self { - let mut this = self; - this.0 .0.attachment = None; - this + Self(self.0.without_attachment()) + } + // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. + pub(crate) fn with_qos(self, qos: QoS) -> Self { + Self(SampleBuilder(Sample { qos, ..self.0 .0 })) } } @@ -161,20 +187,23 @@ impl SampleBuilderTrait for PutSampleBuilder { impl PutSampleBuilderTrait for PutSampleBuilder { fn with_encoding(self, encoding: Encoding) -> Self { - let mut this = self; - this.0 .0.encoding = encoding; - this + Self(SampleBuilder(Sample { + encoding, + ..self.0 .0 + })) } fn with_payload(self, payload: IntoPayload) -> Self where IntoPayload: Into, { - let mut this = self; - this.0 .0.payload = payload.into(); - this + Self(SampleBuilder(Sample { + payload: payload.into(), + ..self.0 .0 + })) } } +#[derive(Debug)] pub struct DeleteSampleBuilder(SampleBuilder); impl DeleteSampleBuilder { @@ -195,6 +224,10 @@ impl DeleteSampleBuilder { attachment: None, })) } + // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. + pub(crate) fn with_qos(self, qos: QoS) -> Self { + Self(SampleBuilder(Sample { qos, ..self.0 .0 })) + } } impl SampleBuilderTrait for DeleteSampleBuilder { From 0992ff8812df04e4b0dc9acc01a45763739d0792 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sat, 23 Mar 2024 22:44:36 +0100 Subject: [PATCH 034/598] replybuilder unfinished --- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/net/runtime/adminspace.rs | 1 + zenoh/src/queryable.rs | 38 ++++++++++++++++++---------- 3 files changed, 26 insertions(+), 15 deletions(-) diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 19388ea16f..eb6d6e9516 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -20,7 +20,7 @@ use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample_builder::{SampleBuilderTrait, SampleUpdater}; +use zenoh::sample_builder::{SampleBuilder, SampleBuilderTrait}; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 29106cb89d..01f29ba19b 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -20,6 +20,7 @@ use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::SyncResolve; use crate::queryable::Query; use crate::queryable::QueryInner; +use crate::sample_builder::PutSampleBuilderTrait; use crate::value::Value; use async_std::task; use log::{error, trace}; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index eb6ef013c7..a5b6deca4c 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -18,11 +18,10 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::QoS; use crate::sample::SourceInfo; use crate::sample_builder::{ DeleteSampleBuilder, DeleteSampleBuilderTrait, PutSampleBuilder, PutSampleBuilderTrait, - SampleBuilder, SampleBuilderTrait, + SampleBuilderTrait, }; use crate::Id; use crate::SessionRef; @@ -355,23 +354,34 @@ pub struct ReplyErrBuilder<'a> { value: Value, } -impl AsRef for ReplyBuilder<'_> { - fn as_ref(&self) -> &PutSampleBuilder { - &self.sample_builder +impl<'a> Resolvable for ReplyBuilder<'a> { + type To = ZResult<()>; +} + +impl SyncResolve for ReplyBuilder<'_> { + fn res_sync(self) -> ::To { + let sample = self.sample_builder.res_sync(); + self.query._reply_sample(sample) } } -impl<'a> Resolvable for ReplyBuilder<'a> { +impl<'a> Resolvable for ReplyDelBuilder<'a> { type To = ZResult<()>; } -impl SyncResolve for ReplyBuilder<'_> { +impl SyncResolve for ReplyDelBuilder<'_> { fn res_sync(self) -> ::To { let sample = self.sample_builder.res_sync(); - if !self.query._accepts_any_replies().unwrap_or(false) - && !self.query.key_expr().intersects(&sample.key_expr) + self.query._reply_sample(sample) + } +} + +impl Query { + fn _reply_sample(&self, sample: Sample) -> ZResult<()> { + if !self._accepts_any_replies().unwrap_or(false) + && !self.key_expr().intersects(&sample.key_expr) { - bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.query.key_expr()) + bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.key_expr()) } #[allow(unused_mut)] // will be unused if feature = "unstable" is not enabled let mut ext_sinfo = None; @@ -384,8 +394,8 @@ impl SyncResolve for ReplyBuilder<'_> { }) } } - self.query.inner.primitives.send_response(Response { - rid: self.query.inner.qid, + self.inner.primitives.send_response(Response { + rid: self.inner.qid, wire_expr: WireExpr { scope: 0, suffix: std::borrow::Cow::Owned(sample.key_expr.into()), @@ -422,8 +432,8 @@ impl SyncResolve for ReplyBuilder<'_> { ext_qos: sample.qos.into(), ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { - zid: self.query.inner.zid, - eid: self.query.eid, + zid: self.inner.zid, + eid: self.eid, }), }); Ok(()) From 62378ad1805d3e13db06664f1176ca0f89393fe2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 11:21:32 +0100 Subject: [PATCH 035/598] new reply_sample --- zenoh/src/queryable.rs | 102 +++++++++++++++++++++++++++++++++++- zenoh/src/sample_builder.rs | 43 ++++++++++++++- 2 files changed, 141 insertions(+), 4 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index a5b6deca4c..d0b80e9a11 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -21,7 +21,7 @@ use crate::prelude::*; use crate::sample::SourceInfo; use crate::sample_builder::{ DeleteSampleBuilder, DeleteSampleBuilderTrait, PutSampleBuilder, PutSampleBuilderTrait, - SampleBuilderTrait, + SampleBuilder, SampleBuilderTrait, }; use crate::Id; use crate::SessionRef; @@ -105,6 +105,24 @@ impl Query { pub fn attachment(&self) -> Option<&Attachment> { self.inner.attachment.as_ref() } + + /// Sends a reply or delete reply to this Query + /// + /// This function is useful when resending the samples which can be of [`SampleKind::Put`] or [`SampleKind::Delete`] + /// It allows to build the reply with same common parameters, like timestamp, attachment, source_info, etc. + /// and only on final step to choose the kind of reply by calling [`ReplySampleBuilder::put`] or [`ReplySampleBuilder::delete`] methods. + #[inline(always)] + pub fn reply_sample(&self, key_expr: IntoKeyExpr) -> ReplySampleBuilder + where + IntoKeyExpr: Into>, + { + let sample_builder = SampleBuilder::new(key_expr); + ReplySampleBuilder { + query: self, + sample_builder, + } + } + /// Sends a reply to this Query. /// /// By default, queries only accept replies whose key expression intersects with the query's. @@ -197,7 +215,87 @@ impl fmt::Display for Query { } } -/// A builder returned by [`Query::reply()`](Query::reply) or [`Query::reply()`](Query::reply). +pub struct ReplySampleBuilder<'a> { + query: &'a Query, + sample_builder: SampleBuilder, +} + +impl<'a> ReplySampleBuilder<'a> { + pub fn put(self, payload: IntoPayload) -> ReplyBuilder<'a> + where + IntoPayload: Into, + { + let builder = ReplyBuilder { + query: self.query, + sample_builder: self.sample_builder.into(), + }; + builder.with_payload(payload) + } + pub fn delete(self) -> ReplyDelBuilder<'a> { + ReplyDelBuilder { + query: self.query, + sample_builder: self.sample_builder.into(), + } + } +} + +impl SampleBuilderTrait for ReplySampleBuilder<'_> { + fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self { + sample_builder: self.sample_builder.with_keyexpr(key_expr), + ..self + } + } + + fn with_timestamp(self, timestamp: Timestamp) -> Self { + Self { + sample_builder: self.sample_builder.with_timestamp(timestamp), + ..self + } + } + + #[cfg(feature = "unstable")] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + sample_builder: self.sample_builder.with_source_info(source_info), + ..self + } + } + + #[cfg(feature = "unstable")] + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + sample_builder: self.sample_builder.with_attachment(attachment), + ..self + } + } + + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + sample_builder: self.sample_builder.congestion_control(congestion_control), + ..self + } + } + + fn priority(self, priority: Priority) -> Self { + Self { + sample_builder: self.sample_builder.priority(priority), + ..self + } + } + + fn express(self, is_express: bool) -> Self { + Self { + sample_builder: self.sample_builder.express(is_express), + ..self + } + } +} + +/// A builder returned by [`Query::reply()`](Query::reply) #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] pub struct ReplyBuilder<'a> { diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index fcf3a64182..61e4bf81fb 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -55,13 +55,30 @@ pub trait DeleteSampleBuilderTrait: SampleBuilderTrait {} pub struct SampleBuilder(Sample); impl SampleBuilder { - pub(crate) fn without_timestamp(self) -> Self { + pub fn new(key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self(Sample { + key_expr: key_expr.into(), + payload: Payload::empty(), + kind: SampleKind::default(), + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + }) + } + pub fn without_timestamp(self) -> Self { Self(Sample { timestamp: None, ..self.0 }) } - pub(crate) fn without_attachment(self) -> Self { + pub fn without_attachment(self) -> Self { Self(Sample { attachment: None, ..self.0 @@ -123,6 +140,17 @@ impl SampleBuilderTrait for SampleBuilder { #[derive(Debug)] pub struct PutSampleBuilder(SampleBuilder); +impl From for PutSampleBuilder { + fn from(sample_builder: SampleBuilder) -> Self { + Self(SampleBuilder { + 0: Sample { + kind: SampleKind::Put, + ..sample_builder.0 + }, + }) + } +} + impl PutSampleBuilder { pub fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self where @@ -206,6 +234,17 @@ impl PutSampleBuilderTrait for PutSampleBuilder { #[derive(Debug)] pub struct DeleteSampleBuilder(SampleBuilder); +impl From for DeleteSampleBuilder { + fn from(sample_builder: SampleBuilder) -> Self { + Self(SampleBuilder { + 0: Sample { + kind: SampleKind::Delete, + ..sample_builder.0 + }, + }) + } +} + impl DeleteSampleBuilder { pub fn new(key_expr: IntoKeyExpr) -> Self where From cc580a5dd4a30409b12ab4ae7c5a81d0b9d5ab1d Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 13:08:10 +0100 Subject: [PATCH 036/598] sample decompose, opt setters --- zenoh/src/queryable.rs | 45 +++++++++++++++++++++++++++++ zenoh/src/sample.rs | 35 +++++++++++++++++++++++ zenoh/src/sample_builder.rs | 56 +++++++++++++++++++++---------------- 3 files changed, 112 insertions(+), 24 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index d0b80e9a11..7d4a0903c2 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -250,6 +250,13 @@ impl SampleBuilderTrait for ReplySampleBuilder<'_> { } } + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self { + sample_builder: self.sample_builder.with_timestamp_opt(timestamp), + ..self + } + } + fn with_timestamp(self, timestamp: Timestamp) -> Self { Self { sample_builder: self.sample_builder.with_timestamp(timestamp), @@ -265,6 +272,14 @@ impl SampleBuilderTrait for ReplySampleBuilder<'_> { } } + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { + sample_builder: self.sample_builder.with_attachment_opt(attachment), + ..self + } + } + #[cfg(feature = "unstable")] fn with_attachment(self, attachment: Attachment) -> Self { Self { @@ -314,6 +329,13 @@ impl SampleBuilderTrait for ReplyBuilder<'_> { } } + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self { + sample_builder: self.sample_builder.with_timestamp_opt(timestamp), + ..self + } + } + fn with_timestamp(self, timestamp: Timestamp) -> Self { Self { sample_builder: self.sample_builder.with_timestamp(timestamp), @@ -329,6 +351,14 @@ impl SampleBuilderTrait for ReplyBuilder<'_> { } } + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { + sample_builder: self.sample_builder.with_attachment_opt(attachment), + ..self + } + } + #[cfg(feature = "unstable")] fn with_attachment(self, attachment: Attachment) -> Self { Self { @@ -397,6 +427,13 @@ impl SampleBuilderTrait for ReplyDelBuilder<'_> { } } + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self { + sample_builder: self.sample_builder.with_timestamp_opt(timestamp), + ..self + } + } + fn with_timestamp(self, timestamp: Timestamp) -> Self { Self { sample_builder: self.sample_builder.with_timestamp(timestamp), @@ -412,6 +449,14 @@ impl SampleBuilderTrait for ReplyDelBuilder<'_> { } } + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { + sample_builder: self.sample_builder.with_attachment_opt(attachment), + ..self + } + } + #[cfg(feature = "unstable")] fn with_attachment(self, attachment: Attachment) -> Self { Self { diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 2c98d5ead1..3ac3087836 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -433,6 +433,41 @@ impl TryFrom for SampleKind { #[zenoh_macros::unstable] pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; +/// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. +pub struct SampleFields { + pub key_expr: KeyExpr<'static>, + pub payload: Payload, + pub kind: SampleKind, + pub encoding: Encoding, + pub timestamp: Option, + pub express: bool, + pub priority: Priority, + pub congestion_control: CongestionControl, + #[cfg(feature = "unstable")] + pub source_info: SourceInfo, + #[cfg(feature = "unstable")] + pub attachment: Option, +} + +impl From for SampleFields { + fn from(sample: Sample) -> Self { + SampleFields { + key_expr: sample.key_expr, + payload: sample.payload, + kind: sample.kind, + encoding: sample.encoding, + timestamp: sample.timestamp, + express: sample.qos.express(), + priority: sample.qos.priority(), + congestion_control: sample.qos.congestion_control(), + #[cfg(feature = "unstable")] + source_info: sample.source_info, + #[cfg(feature = "unstable")] + attachment: sample.attachment, + } + } +} + /// A zenoh sample. #[non_exhaustive] #[derive(Clone, Debug)] diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 61e4bf81fb..c0ebf8c9d0 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -32,10 +32,13 @@ pub trait SampleBuilderTrait { fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self where IntoKeyExpr: Into>; + fn with_timestamp_opt(self, timestamp: Option) -> Self; fn with_timestamp(self, timestamp: Timestamp) -> Self; #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self; #[zenoh_macros::unstable] + fn with_attachment_opt(self, attachment: Option) -> Self; + #[zenoh_macros::unstable] fn with_attachment(self, attachment: Attachment) -> Self; fn congestion_control(self, congestion_control: CongestionControl) -> Self; fn priority(self, priority: Priority) -> Self; @@ -72,18 +75,6 @@ impl SampleBuilder { attachment: None, }) } - pub fn without_timestamp(self) -> Self { - Self(Sample { - timestamp: None, - ..self.0 - }) - } - pub fn without_attachment(self) -> Self { - Self(Sample { - attachment: None, - ..self.0 - }) - } } impl SampleBuilderTrait for SampleBuilder { @@ -97,12 +88,17 @@ impl SampleBuilderTrait for SampleBuilder { }) } - fn with_timestamp(self, timestamp: Timestamp) -> Self { + fn with_timestamp_opt(self, timestamp: Option) -> Self { Self(Sample { - timestamp: Some(timestamp), + timestamp, ..self.0 }) } + + fn with_timestamp(self, timestamp: Timestamp) -> Self { + self.with_timestamp_opt(Some(timestamp)) + } + #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self { Self(Sample { @@ -110,13 +106,19 @@ impl SampleBuilderTrait for SampleBuilder { ..self.0 }) } + #[zenoh_macros::unstable] - fn with_attachment(self, attachment: Attachment) -> Self { + fn with_attachment_opt(self, attachment: Option) -> Self { Self(Sample { - attachment: Some(attachment), + attachment, ..self.0 }) } + + #[zenoh_macros::unstable] + fn with_attachment(self, attachment: Attachment) -> Self { + self.with_attachment_opt(Some(attachment)) + } fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self(Sample { qos: self.0.qos.with_congestion_control(congestion_control), @@ -170,14 +172,6 @@ impl PutSampleBuilder { attachment: None, })) } - #[zenoh_macros::unstable] - pub fn without_timestamp(self) -> Self { - Self(self.0.without_timestamp()) - } - #[zenoh_macros::unstable] - pub fn without_attachment(self) -> Self { - Self(self.0.without_attachment()) - } // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. pub(crate) fn with_qos(self, qos: QoS) -> Self { Self(SampleBuilder(Sample { qos, ..self.0 .0 })) @@ -194,6 +188,9 @@ impl SampleBuilderTrait for PutSampleBuilder { fn with_timestamp(self, timestamp: Timestamp) -> Self { Self(self.0.with_timestamp(timestamp)) } + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self(self.0.with_timestamp_opt(timestamp)) + } #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self { Self(self.0.with_source_info(source_info)) @@ -202,6 +199,10 @@ impl SampleBuilderTrait for PutSampleBuilder { fn with_attachment(self, attachment: Attachment) -> Self { Self(self.0.with_attachment(attachment)) } + #[zenoh_macros::unstable] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self(self.0.with_attachment_opt(attachment)) + } fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self(self.0.congestion_control(congestion_control)) } @@ -279,6 +280,9 @@ impl SampleBuilderTrait for DeleteSampleBuilder { fn with_timestamp(self, timestamp: Timestamp) -> Self { Self(self.0.with_timestamp(timestamp)) } + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self(self.0.with_timestamp_opt(timestamp)) + } #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self { Self(self.0.with_source_info(source_info)) @@ -287,6 +291,10 @@ impl SampleBuilderTrait for DeleteSampleBuilder { fn with_attachment(self, attachment: Attachment) -> Self { Self(self.0.with_attachment(attachment)) } + #[zenoh_macros::unstable] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self(self.0.with_attachment_opt(attachment)) + } fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self(self.0.congestion_control(congestion_control)) } From 270840247c72238654be20f611c4d4cb6338cfc4 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 15:38:36 +0100 Subject: [PATCH 037/598] samples, plugins updated --- plugins/zenoh-plugin-example/src/lib.rs | 13 +++++++++- .../src/replica/align_queryable.rs | 2 ++ .../src/replica/storage.rs | 4 +-- zenoh-ext/src/publication_cache.rs | 25 ++++++++++++++----- zenoh/src/queryable.rs | 24 ++++++++++++------ 5 files changed, 51 insertions(+), 17 deletions(-) diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 04f49b4739..9b9dda40de 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -24,6 +24,7 @@ use std::sync::{ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::runtime::Runtime; +use zenoh::sample_builder::SampleBuilderTrait; use zenoh_core::zlock; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, ZResult}; @@ -174,7 +175,17 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { info!("Handling query '{}'", query.selector()); for (key_expr, sample) in stored.iter() { if query.selector().key_expr.intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { - query.reply_sample(sample.clone()).res().await.unwrap(); + let reply = query + .reply_sample(sample.key_expr().clone().into_owned()) + .with_timestamp_opt(sample.timestamp().cloned()); + #[cfg(feature = "unstable")] + let reply = reply + .with_attachment_opt(sample.attachment()) + .with_source_info(sample.source_info()); + match sample.kind() { + SampleKind::Put => reply.put(sample.payload().clone()).res().await.unwrap(), + SampleKind::Delete => reply.delete().res().await.unwrap(), + } } } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 32be4a5534..691fabd7a7 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -20,6 +20,8 @@ use std::str; use std::str::FromStr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; +use zenoh::sample_builder::PutSampleBuilderTrait; +use zenoh::sample_builder::SampleBuilderTrait; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 1aadc88611..95af3c97a2 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -24,7 +24,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::ConsolidationMode; -use zenoh::sample_builder::{SampleBuilderTrait, SampleUpdater}; +use zenoh::sample_builder::{PutSampleBuilderTrait, SampleBuilder, SampleBuilderTrait}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; @@ -368,7 +368,7 @@ impl StorageService { } }; let sample = if sample.timestamp().is_none() { - SampleUpdater::from(sample).with_timestamp(new_reception_timestamp()).res_sync() + SampleBuilder::from(sample).with_timestamp(new_reception_timestamp()).res_sync() } else { sample }; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 85cb96cce2..78fff32014 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -20,6 +20,7 @@ use std::convert::TryInto; use std::future::Ready; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; +use zenoh::sample_builder::SampleBuilderTrait; use zenoh::subscriber::FlumeSubscriber; use zenoh::SessionRef; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; @@ -116,6 +117,22 @@ pub struct PublicationCache<'a> { _stoptx: Sender, } +async fn reply_sample(query: &Query, sample: &Sample) { + let reply = query + .reply_sample(sample.key_expr().clone().into_owned()) + .with_timestamp_opt(sample.timestamp().cloned()); + #[cfg(feature = "unstable")] + let reply = reply + .with_attachment_opt(sample.attachment()) + .with_source_info(sample.source_info()); + if let Err(e) = match sample.kind() { + SampleKind::Put => reply.put(sample.payload().clone()).res_async().await, + SampleKind::Delete => reply.delete().res_async().await, + } { + log::warn!("Error replying to query: {}", e); + } +} + impl<'a> PublicationCache<'a> { fn new(conf: PublicationCacheBuilder<'a, '_, '_>) -> ZResult> { let key_expr = conf.pub_key_expr?; @@ -212,9 +229,7 @@ impl<'a> PublicationCache<'a> { continue; } } - if let Err(e) = query.reply_sample(sample.clone()).res_async().await { - log::warn!("Error replying to query: {}", e); - } + reply_sample(&query, sample).await; } } } else { @@ -226,9 +241,7 @@ impl<'a> PublicationCache<'a> { continue; } } - if let Err(e) = query.reply_sample(sample.clone()).res_async().await { - log::warn!("Error replying to query: {}", e); - } + reply_sample(&query, sample).await; } } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 7d4a0903c2..f2e00e47c6 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -519,6 +519,22 @@ impl SyncResolve for ReplyDelBuilder<'_> { } } +impl<'a> AsyncResolve for ReplyBuilder<'a> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +impl<'a> AsyncResolve for ReplyDelBuilder<'a> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + impl Query { fn _reply_sample(&self, sample: Sample) -> ZResult<()> { if !self._accepts_any_replies().unwrap_or(false) @@ -583,14 +599,6 @@ impl Query { } } -impl<'a> AsyncResolve for ReplyBuilder<'a> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - impl<'a> Resolvable for ReplyErrBuilder<'a> { type To = ZResult<()>; } From b80fd0aa30842e607ae661547368df7f818f3a29 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 18:58:55 +0100 Subject: [PATCH 038/598] interceptors removed from plugin storage API --- plugins/zenoh-backend-example/src/lib.rs | 13 +--- plugins/zenoh-backend-traits/src/lib.rs | 68 +------------------ .../src/backends_mgt.rs | 7 -- .../zenoh-plugin-storage-manager/src/lib.rs | 4 -- .../src/memory_backend/mod.rs | 20 ------ .../src/replica/storage.rs | 23 ------- 6 files changed, 3 insertions(+), 132 deletions(-) diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index 602d29f375..f81231a498 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -13,11 +13,8 @@ // use async_std::sync::RwLock; use async_trait::async_trait; -use std::{ - collections::{hash_map::Entry, HashMap}, - sync::Arc, -}; -use zenoh::{prelude::OwnedKeyExpr, sample::Sample, time::Timestamp, value::Value}; +use std::collections::{hash_map::Entry, HashMap}; +use zenoh::{prelude::OwnedKeyExpr, time::Timestamp, value::Value}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, Volume, @@ -71,12 +68,6 @@ impl Volume for ExampleBackend { async fn create_storage(&self, _props: StorageConfig) -> ZResult> { Ok(Box::::default()) } - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { - None - } - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { - None - } } #[async_trait] diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index d17e6dfd77..40d022f1ec 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -68,16 +68,6 @@ //! // The properties are the ones passed via a PUT in the admin space for Storage creation. //! Ok(Box::new(MyStorage::new(properties).await?)) //! } -//! -//! fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { -//! // No interception point for incoming data (on PUT operations) -//! None -//! } -//! -//! fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { -//! // No interception point for outgoing data (on GET operations) -//! None -//! } //! } //! //! // Your Storage implementation @@ -135,9 +125,7 @@ use async_trait::async_trait; use const_format::concatcp; -use std::sync::Arc; -use zenoh::prelude::{KeyExpr, OwnedKeyExpr, Sample, Selector}; -use zenoh::queryable::ReplyBuilder; +use zenoh::prelude::OwnedKeyExpr; use zenoh::time::Timestamp; use zenoh::value::Value; pub use zenoh::Result as ZResult; @@ -210,14 +198,6 @@ pub trait Volume: Send + Sync { /// Creates a storage configured with some properties. async fn create_storage(&self, props: StorageConfig) -> ZResult>; - - /// Returns an interceptor that will be called before pushing any data - /// into a storage created by this backend. `None` can be returned for no interception point. - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>>; - - /// Returns an interceptor that will be called before sending any reply - /// to a query from a storage created by this backend. `None` can be returned for no interception point. - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>>; } pub type VolumeInstance = Box; @@ -282,49 +262,3 @@ pub trait Storage: Send + Sync { /// Remember to fetch the entry corresponding to the `None` key async fn get_all_entries(&self) -> ZResult, Timestamp)>>; } - -/// A wrapper around the [`zenoh::queryable::Query`] allowing to call the -/// OutgoingDataInterceptor (if any) before to send the reply -pub struct Query { - q: zenoh::queryable::Query, - interceptor: Option Sample + Send + Sync>>, -} - -impl Query { - pub fn new( - q: zenoh::queryable::Query, - interceptor: Option Sample + Send + Sync>>, - ) -> Query { - Query { q, interceptor } - } - - /// The full [`Selector`] of this Query. - #[inline(always)] - pub fn selector(&self) -> Selector<'_> { - self.q.selector() - } - - /// The key selector part of this Query. - #[inline(always)] - pub fn key_expr(&self) -> &KeyExpr<'static> { - self.q.key_expr() - } - - /// This Query's selector parameters. - #[inline(always)] - pub fn parameters(&self) -> &str { - self.q.parameters() - } - - /// Sends a Sample as a reply to this Query - pub fn reply(&self, sample: Sample) -> ReplyBuilder<'_> { - // Call outgoing intercerceptor - let sample = if let Some(ref interceptor) = self.interceptor { - interceptor(sample) - } else { - sample - }; - // Send reply - self.q.reply_sample(sample) - } -} diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index aa7260e868..90a6ae6250 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -14,7 +14,6 @@ use super::storages_mgt::*; use flume::Sender; use std::sync::Arc; -use zenoh::prelude::r#async::*; use zenoh::Session; use zenoh_backend_traits::config::StorageConfig; use zenoh_backend_traits::{Capability, VolumeInstance}; @@ -23,16 +22,12 @@ use zenoh_result::ZResult; pub struct StoreIntercept { pub storage: Box, pub capability: Capability, - pub in_interceptor: Option Sample + Send + Sync>>, - pub out_interceptor: Option Sample + Send + Sync>>, } pub(crate) async fn create_and_start_storage( admin_key: String, config: StorageConfig, backend: &VolumeInstance, - in_interceptor: Option Sample + Send + Sync>>, - out_interceptor: Option Sample + Send + Sync>>, zenoh: Arc, ) -> ZResult> { log::trace!("Create storage '{}'", &admin_key); @@ -41,8 +36,6 @@ pub(crate) async fn create_and_start_storage( let store_intercept = StoreIntercept { storage, capability, - in_interceptor, - out_interceptor, }; start_storage(store_intercept, config, admin_key, zenoh).await diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 0db30bbd6a..91df2f108d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -239,14 +239,10 @@ impl StorageRuntimeInner { volume_id, backend.name() ); - let in_interceptor = backend.instance().incoming_data_interceptor(); - let out_interceptor = backend.instance().outgoing_data_interceptor(); let stopper = async_std::task::block_on(create_and_start_storage( admin_key, storage.clone(), backend.instance(), - in_interceptor, - out_interceptor, self.session.clone(), ))?; self.storages diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index ebb4922c9d..4e333b8592 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -61,26 +61,6 @@ impl Volume for MemoryBackend { log::debug!("Create Memory Storage with configuration: {:?}", properties); Ok(Box::new(MemoryStorage::new(properties).await?)) } - - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { - // By default: no interception point - None - // To test interceptors, uncomment this line: - // Some(Arc::new(|sample| { - // trace!(">>>> IN INTERCEPTOR FOR {:?}", sample); - // sample - // })) - } - - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { - // By default: no interception point - None - // To test interceptors, uncomment this line: - // Some(Arc::new(|sample| { - // trace!("<<<< OUT INTERCEPTOR FOR {:?}", sample); - // sample - // })) - } } impl Drop for MemoryBackend { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 0708dcabd9..35134dfe43 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -60,8 +60,6 @@ pub struct StorageService { capability: Capability, tombstones: Arc>>, wildcard_updates: Arc>>, - in_interceptor: Option Sample + Send + Sync>>, - out_interceptor: Option Sample + Send + Sync>>, replication: Option, } @@ -85,8 +83,6 @@ impl StorageService { capability: store_intercept.capability, tombstones: Arc::new(RwLock::new(KeBoxTree::default())), wildcard_updates: Arc::new(RwLock::new(KeBoxTree::default())), - in_interceptor: store_intercept.in_interceptor, - out_interceptor: store_intercept.out_interceptor, replication, }; if storage_service @@ -263,13 +259,6 @@ impl StorageService { // the trimming during PUT and GET should be handled by the plugin async fn process_sample(&self, sample: Sample) { log::trace!("[STORAGE] Processing sample: {:?}", sample); - // Call incoming data interceptor (if any) - let sample = if let Some(ref interceptor) = self.in_interceptor { - interceptor(sample) - } else { - sample - }; - // if wildcard, update wildcard_updates if sample.key_expr().is_wild() { self.register_wildcard_update(sample.clone()).await; @@ -523,12 +512,6 @@ impl StorageService { let sample = Sample::new(key.clone(), payload) .with_encoding(encoding) .with_timestamp(entry.timestamp); - // apply outgoing interceptor on results - let sample = if let Some(ref interceptor) = self.out_interceptor { - interceptor(sample) - } else { - sample - }; if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", @@ -561,12 +544,6 @@ impl StorageService { let sample = Sample::new(q.key_expr().clone(), payload) .with_encoding(encoding) .with_timestamp(entry.timestamp); - // apply outgoing interceptor on results - let sample = if let Some(ref interceptor) = self.out_interceptor { - interceptor(sample) - } else { - sample - }; if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", From 7d2abd44b19ed7ba86713f1752990ba344d07235 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 19:20:11 +0100 Subject: [PATCH 039/598] deconstruct sample api used --- .../src/replica/aligner.rs | 3 +-- .../src/replica/storage.rs | 25 +++++++++++-------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 3f672382f1..f00029442f 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -12,11 +12,10 @@ // ZettaScale Zenoh Team, // -use crate::replica::storage::StorageSampleKind; - use super::storage::StorageSample; use super::{Digest, EraType, LogEntry, Snapshotter}; use super::{CONTENTS, ERA, INTERVALS, SUBINTERVALS}; +use crate::replica::storage::StorageSampleKind; use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 95af3c97a2..fbc734d716 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -24,6 +24,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::ConsolidationMode; +use zenoh::sample::SampleFields; use zenoh::sample_builder::{PutSampleBuilderTrait, SampleBuilder, SampleBuilderTrait}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; @@ -55,19 +56,23 @@ pub struct StorageSample { impl From for StorageSample { fn from(sample: Sample) -> Self { - let timestamp = *sample.timestamp().unwrap_or(&new_reception_timestamp()); - // TODO: add API for disassembly of Sample - let key_expr = sample.key_expr().clone(); - let payload = sample.payload().clone(); - let encoding = sample.encoding().clone(); - let kind = match sample.kind() { - SampleKind::Put => StorageSampleKind::Put(Value::new(payload).with_encoding(encoding)), - SampleKind::Delete => StorageSampleKind::Delete, - }; - StorageSample { + let SampleFields { key_expr, timestamp, kind, + payload, + encoding, + .. + } = sample.into(); + StorageSample { + key_expr, + timestamp: timestamp.unwrap_or(new_reception_timestamp()), + kind: match kind { + SampleKind::Put => { + StorageSampleKind::Put(Value::new(payload).with_encoding(encoding)) + } + SampleKind::Delete => StorageSampleKind::Delete, + }, } } } From 2b1071f9b9b06dd10d401969ce5c8678560aea03 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 19:52:30 +0100 Subject: [PATCH 040/598] comment, clippy fix --- zenoh/src/sample.rs | 4 ++-- zenoh/src/sample_builder.rs | 20 ++++++++------------ 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 3ac3087836..acf8536a0e 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -67,10 +67,10 @@ pub(crate) trait DataInfoIntoSample { } impl DataInfoIntoSample for DataInfo { - // TODO: this is internal function. + // This function is for internal use only. // Technically it may create invalid sample (e.g. a delete sample with a payload and encoding) // The test for it is intentionally not added to avoid inserting extra "if" into hot path. - // This need to be additionally investigated and measured. + // The correctness of the data should be ensured by the caller. #[inline] fn into_sample( self, diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index c0ebf8c9d0..c7ee6e8368 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -144,12 +144,10 @@ pub struct PutSampleBuilder(SampleBuilder); impl From for PutSampleBuilder { fn from(sample_builder: SampleBuilder) -> Self { - Self(SampleBuilder { - 0: Sample { - kind: SampleKind::Put, - ..sample_builder.0 - }, - }) + Self(SampleBuilder(Sample { + kind: SampleKind::Put, + ..sample_builder.0 + })) } } @@ -237,12 +235,10 @@ pub struct DeleteSampleBuilder(SampleBuilder); impl From for DeleteSampleBuilder { fn from(sample_builder: SampleBuilder) -> Self { - Self(SampleBuilder { - 0: Sample { - kind: SampleKind::Delete, - ..sample_builder.0 - }, - }) + Self(SampleBuilder(Sample { + kind: SampleKind::Delete, + ..sample_builder.0 + })) } } From 3386237bea3e10f80ddb5089617f723577cef5b5 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 19:58:46 +0100 Subject: [PATCH 041/598] clippy fix --- plugins/zenoh-plugin-storage-manager/src/replica/storage.rs | 2 +- zenoh/tests/attachments.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index fbc734d716..f2fb0386c3 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -443,7 +443,7 @@ impl StorageService { match self.ovderriding_wild_update(&k, &sample.timestamp).await { Some(overriding_update) => overriding_update.into_sample(k.clone()), - None => sample.clone().into(), + None => sample.clone(), }; let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 38d03b0a84..8d26cc0344 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -51,7 +51,7 @@ fn pubsub() { #[cfg(feature = "unstable")] #[test] fn queries() { - use zenoh::{prelude::sync::*, sample::Attachment}; + use zenoh::{prelude::sync::*, sample::Attachment, sample_builder::SampleBuilderTrait}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh From f52140aec5909389323cdad70d84b9fc4ba71395 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 20:13:49 +0100 Subject: [PATCH 042/598] zenoh-ext links zenoh with unstable --- zenoh-ext/src/publication_cache.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 78fff32014..a4eff1e932 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -121,10 +121,9 @@ async fn reply_sample(query: &Query, sample: &Sample) { let reply = query .reply_sample(sample.key_expr().clone().into_owned()) .with_timestamp_opt(sample.timestamp().cloned()); - #[cfg(feature = "unstable")] let reply = reply - .with_attachment_opt(sample.attachment()) - .with_source_info(sample.source_info()); + .with_attachment_opt(sample.attachment().cloned()) + .with_source_info(sample.source_info().clone()); if let Err(e) = match sample.kind() { SampleKind::Put => reply.put(sample.payload().clone()).res_async().await, SampleKind::Delete => reply.delete().res_async().await, From a629c765fb86823d3f4fa57d979936c49915221a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 22:06:24 +0100 Subject: [PATCH 043/598] samplefields used --- plugins/zenoh-plugin-example/src/lib.rs | 16 ++++++++-------- zenoh-ext/src/publication_cache.rs | 23 ++++++++++++++++------- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 9b9dda40de..40f8d69488 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -24,6 +24,7 @@ use std::sync::{ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::runtime::Runtime; +use zenoh::sample::SampleFields; use zenoh::sample_builder::SampleBuilderTrait; use zenoh_core::zlock; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; @@ -175,15 +176,14 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { info!("Handling query '{}'", query.selector()); for (key_expr, sample) in stored.iter() { if query.selector().key_expr.intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { + let SampleFields { key_expr, timestamp, attachment, source_info, payload, kind, .. } = sample.clone().into(); let reply = query - .reply_sample(sample.key_expr().clone().into_owned()) - .with_timestamp_opt(sample.timestamp().cloned()); - #[cfg(feature = "unstable")] - let reply = reply - .with_attachment_opt(sample.attachment()) - .with_source_info(sample.source_info()); - match sample.kind() { - SampleKind::Put => reply.put(sample.payload().clone()).res().await.unwrap(), + .reply_sample(key_expr) + .with_timestamp_opt(timestamp) + .with_attachment_opt(attachment) + .with_source_info(source_info); + match kind { + SampleKind::Put => reply.put(payload).res().await.unwrap(), SampleKind::Delete => reply.delete().res().await.unwrap(), } } diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index a4eff1e932..8a782a179e 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -20,6 +20,7 @@ use std::convert::TryInto; use std::future::Ready; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; +use zenoh::sample::SampleFields; use zenoh::sample_builder::SampleBuilderTrait; use zenoh::subscriber::FlumeSubscriber; use zenoh::SessionRef; @@ -118,14 +119,22 @@ pub struct PublicationCache<'a> { } async fn reply_sample(query: &Query, sample: &Sample) { + let SampleFields { + key_expr, + timestamp, + attachment, + source_info, + payload, + kind, + .. + } = sample.clone().into(); let reply = query - .reply_sample(sample.key_expr().clone().into_owned()) - .with_timestamp_opt(sample.timestamp().cloned()); - let reply = reply - .with_attachment_opt(sample.attachment().cloned()) - .with_source_info(sample.source_info().clone()); - if let Err(e) = match sample.kind() { - SampleKind::Put => reply.put(sample.payload().clone()).res_async().await, + .reply_sample(key_expr) + .with_timestamp_opt(timestamp) + .with_attachment_opt(attachment) + .with_source_info(source_info); + if let Err(e) = match kind { + SampleKind::Put => reply.put(payload).res_async().await, SampleKind::Delete => reply.delete().res_async().await, } { log::warn!("Error replying to query: {}", e); From 1945492ec9a27546e211e5bffac5bd5206cbdcd1 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 25 Mar 2024 10:41:14 +0100 Subject: [PATCH 044/598] restored old storage manager code --- .../src/replica/aligner.rs | 22 +- .../src/replica/storage.rs | 343 +++++++----------- 2 files changed, 146 insertions(+), 219 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index f00029442f..a899196e7e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -12,10 +12,8 @@ // ZettaScale Zenoh Team, // -use super::storage::StorageSample; use super::{Digest, EraType, LogEntry, Snapshotter}; use super::{CONTENTS, ERA, INTERVALS, SUBINTERVALS}; -use crate::replica::storage::StorageSampleKind; use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; @@ -23,15 +21,17 @@ use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; +use zenoh::sample_builder::{PutSampleBuilder, PutSampleBuilderTrait, SampleBuilderTrait}; use zenoh::time::Timestamp; use zenoh::Session; +use zenoh_core::{AsyncResolve, SyncResolve}; pub struct Aligner { session: Arc, digest_key: OwnedKeyExpr, snapshotter: Arc, rx_digest: Receiver<(String, Digest)>, - tx_sample: Sender, + tx_sample: Sender, digests_processed: RwLock>, } @@ -40,7 +40,7 @@ impl Aligner { session: Arc, digest_key: OwnedKeyExpr, rx_digest: Receiver<(String, Digest)>, - tx_sample: Sender, + tx_sample: Sender, snapshotter: Arc, ) { let aligner = Aligner { @@ -107,11 +107,13 @@ impl Aligner { log::trace!("[ALIGNER] Received queried samples: {missing_data:?}"); for (key, (ts, value)) in missing_data { - let sample = StorageSample { - key_expr: key.into(), - timestamp: ts, - kind: StorageSampleKind::Put(value), - }; + let Value { + payload, encoding, .. + } = value; + let sample = PutSampleBuilder::new(key, payload) + .with_encoding(encoding) + .with_timestamp(ts) + .res_sync(); log::debug!("[ALIGNER] Adding {:?} to storage", sample); self.tx_sample.send_async(sample).await.unwrap_or_else(|e| { log::error!("[ALIGNER] Error adding sample to storage: {}", e) @@ -329,7 +331,7 @@ impl Aligner { .get(&selector) .consolidation(zenoh::query::ConsolidationMode::None) .accept_replies(zenoh::query::ReplyKeyExpr::Any) - .res() + .res_async() .await { Ok(replies) => { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index f2fb0386c3..67ce871bb0 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -19,18 +19,19 @@ use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; use std::collections::{HashMap, HashSet}; -use std::str::FromStr; +use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::ConsolidationMode; -use zenoh::sample::SampleFields; -use zenoh::sample_builder::{PutSampleBuilderTrait, SampleBuilder, SampleBuilderTrait}; +use zenoh::sample_builder::{ + PutSampleBuilder, PutSampleBuilderTrait, SampleBuilder, SampleBuilderTrait, +}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; -use zenoh_core::SyncResolve; +use zenoh_core::{AsyncResolve, SyncResolve}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; @@ -41,152 +42,15 @@ use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer}; pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; -#[derive(Clone, Debug)] -pub enum StorageSampleKind { - Put(Value), - Delete, -} - -#[derive(Clone, Debug)] -pub struct StorageSample { - pub key_expr: KeyExpr<'static>, - pub timestamp: Timestamp, - pub kind: StorageSampleKind, -} - -impl From for StorageSample { - fn from(sample: Sample) -> Self { - let SampleFields { - key_expr, - timestamp, - kind, - payload, - encoding, - .. - } = sample.into(); - StorageSample { - key_expr, - timestamp: timestamp.unwrap_or(new_reception_timestamp()), - kind: match kind { - SampleKind::Put => { - StorageSampleKind::Put(Value::new(payload).with_encoding(encoding)) - } - SampleKind::Delete => StorageSampleKind::Delete, - }, - } - } -} - #[derive(Clone)] -enum Update { - Put(StoredData), - Delete(Timestamp), -} - -impl From for Update { - fn from(value: StorageSample) -> Self { - match value.kind { - StorageSampleKind::Put(data) => Update::Put(StoredData { - value: data, - timestamp: value.timestamp, - }), - StorageSampleKind::Delete => Update::Delete(value.timestamp), - } - } -} - -impl Update { - fn timestamp(&self) -> &Timestamp { - match self { - Update::Put(data) => &data.timestamp, - Update::Delete(ts) => ts, - } - } -} - -// implement from String for Update -impl TryFrom for Update { - type Error = zenoh::Error; - - fn try_from(value: String) -> Result { - let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&value)?; - let mut payload = ZBuf::default(); - for slice in result.3 { - payload.push_zslice(slice.to_vec().into()); - } - let value = Value::new(payload).with_encoding(result.2); - let timestamp = Timestamp::from_str(&result.1).map_err(|_| "Error parsing timestamp")?; - if result.0.eq(&(SampleKind::Put).to_string()) { - Ok(Update::Put(StoredData { value, timestamp })) - } else { - Ok(Update::Delete(timestamp)) - } - } -} - -// implement to_string for Update -impl ToString for Update { - fn to_string(&self) -> String { - let result = match self { - Update::Put(data) => ( - SampleKind::Put.to_string(), - data.timestamp.to_string(), - data.value.encoding.to_string(), - data.value.payload.slices().collect::>(), - ), - Update::Delete(ts) => ( - SampleKind::Delete.to_string(), - ts.to_string(), - "".to_string(), - vec![], - ), - }; - serde_json::to_string_pretty(&result).unwrap() - } -} - -trait IntoStorageSample { - fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample - where - IntoKeyExpr: Into>; -} - -impl IntoStorageSample for StoredData { - fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample - where - IntoKeyExpr: Into>, - { - StorageSample { - key_expr: key_expr.into(), - timestamp: self.timestamp, - kind: StorageSampleKind::Put(self.value), - } - } -} - -impl IntoStorageSample for Update { - fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample - where - IntoKeyExpr: Into>, - { - match self { - Update::Put(data) => StorageSample { - key_expr: key_expr.into(), - timestamp: data.timestamp, - kind: StorageSampleKind::Put(data.value), - }, - Update::Delete(ts) => StorageSample { - key_expr: key_expr.into(), - timestamp: ts, - kind: StorageSampleKind::Delete, - }, - } - } +struct Update { + kind: SampleKind, + data: StoredData, } pub struct ReplicationService { pub empty_start: bool, - pub aligner_updates: Receiver, + pub aligner_updates: Receiver, pub log_propagation: Sender<(OwnedKeyExpr, Timestamp)>, } @@ -245,11 +109,10 @@ impl StorageService { let saved_wc = std::fs::read_to_string(zenoh_home().join(WILDCARD_UPDATES_FILENAME)).unwrap(); let saved_wc: HashMap = - serde_json::from_str(&saved_wc).unwrap(); // TODO: Remove unwrap + serde_json::from_str(&saved_wc).unwrap(); let mut wildcard_updates = storage_service.wildcard_updates.write().await; for (k, data) in saved_wc { - wildcard_updates.insert(&k, Update::try_from(data).unwrap()); - // TODO: Remove unwrap + wildcard_updates.insert(&k, construct_update(data)); } } } @@ -325,7 +188,7 @@ impl StorageService { log::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { - self.process_sample(sample.into()).await; + self.process_sample(sample).await; } }, // on query on key_expr @@ -377,7 +240,7 @@ impl StorageService { } else { sample }; - self.process_sample(sample.into()).await; + self.process_sample(sample).await; }, // on query on key_expr query = storage_queryable.recv_async() => { @@ -407,46 +270,61 @@ impl StorageService { // The storage should only simply save the key, sample pair while put and retrieve the same during get // the trimming during PUT and GET should be handled by the plugin - async fn process_sample(&self, sample: StorageSample) { + async fn process_sample(&self, sample: Sample) { log::trace!("[STORAGE] Processing sample: {:?}", sample); - // if wildcard, update wildcard_updates - if sample.key_expr.is_wild() { + if sample.key_expr().is_wild() { self.register_wildcard_update(sample.clone()).await; } - let matching_keys = if sample.key_expr.is_wild() { - self.get_matching_keys(&sample.key_expr).await + let matching_keys = if sample.key_expr().is_wild() { + self.get_matching_keys(sample.key_expr()).await } else { - vec![sample.key_expr.clone().into()] + vec![sample.key_expr().clone().into()] }; log::trace!( "The list of keys matching `{}` is : {:?}", - sample.key_expr, + sample.key_expr(), matching_keys ); for k in matching_keys { - if !self.is_deleted(&k.clone(), &sample.timestamp).await + if !self + .is_deleted(&k.clone(), sample.timestamp().unwrap()) + .await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) - && self.is_latest(&k, &sample.timestamp).await)) + && self.is_latest(&k, sample.timestamp().unwrap()).await)) { log::trace!( "Sample `{:?}` identified as neded processing for key {}", sample, - &k + k ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store = - match self.ovderriding_wild_update(&k, &sample.timestamp).await { - Some(overriding_update) => overriding_update.into_sample(k.clone()), - - None => sample.clone(), - }; + let sample_to_store = match self + .ovderriding_wild_update(&k, sample.timestamp().unwrap()) + .await + { + Some(overriding_update) => { + let Value { + payload, encoding, .. + } = overriding_update.data.value; + PutSampleBuilder::new(KeyExpr::from(k.clone()), payload) + .with_encoding(encoding) + .with_timestamp(overriding_update.data.timestamp) + .res_sync() + } + None => { + PutSampleBuilder::new(KeyExpr::from(k.clone()), sample.payload().clone()) + .with_encoding(sample.encoding().clone()) + .with_timestamp(*sample.timestamp().unwrap()) + .res_sync() + } + }; - let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { + let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { Ok(stripped) => stripped, Err(e) => { log::error!("{}", e); @@ -454,17 +332,23 @@ impl StorageService { } }; let mut storage = self.storage.lock().await; - let result = match sample_to_store.kind { - StorageSampleKind::Put(data) => { + let result = match sample.kind() { + SampleKind::Put => { storage - .put(stripped_key, data, sample_to_store.timestamp) + .put( + stripped_key, + Value::new(sample_to_store.payload().clone()) + .with_encoding(sample_to_store.encoding().clone()), + *sample_to_store.timestamp().unwrap(), + ) .await } - StorageSampleKind::Delete => { + SampleKind::Delete => { // register a tombstone - self.mark_tombstone(&k, sample_to_store.timestamp).await; + self.mark_tombstone(&k, *sample_to_store.timestamp().unwrap()) + .await; storage - .delete(stripped_key, sample_to_store.timestamp) + .delete(stripped_key, *sample_to_store.timestamp().unwrap()) .await } }; @@ -478,7 +362,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), sample_to_store.timestamp)); + .send((k.clone(), *sample_to_store.timestamp().unwrap())); match sending { Ok(_) => (), Err(e) => { @@ -509,16 +393,26 @@ impl StorageService { } } - async fn register_wildcard_update(&self, sample: StorageSample) { + async fn register_wildcard_update(&self, sample: Sample) { // @TODO: change into a better store that does incremental writes - let key = sample.key_expr.clone(); + let key = sample.key_expr().clone(); let mut wildcards = self.wildcard_updates.write().await; - wildcards.insert(&key, sample.into()); + let timestamp = *sample.timestamp().unwrap(); + wildcards.insert( + &key, + Update { + kind: sample.kind(), + data: StoredData { + value: Value::from(sample), + timestamp, + }, + }, + ); if self.capability.persistence.eq(&Persistence::Durable) { // flush to disk to makeit durable let mut serialized_data = HashMap::new(); for (k, update) in wildcards.key_value_pairs() { - serialized_data.insert(k, update.to_string()); + serialized_data.insert(k, serialize_update(update)); } if let Err(e) = std::fs::write( zenoh_home().join(WILDCARD_UPDATES_FILENAME), @@ -547,36 +441,34 @@ impl StorageService { let mut update = None; for node in wildcards.intersecting_keys(key_expr) { let weight = wildcards.weight_at(&node); - if let Some(weight) = weight { - if weight.timestamp() > ts { - // if the key matches a wild card update, check whether it was saved in storage - // remember that wild card updates change only existing keys - let stripped_key = match self.strip_prefix(&key_expr.into()) { - Ok(stripped) => stripped, - Err(e) => { - log::error!("{}", e); - break; - } - }; - let mut storage = self.storage.lock().await; - match storage.get(stripped_key, "").await { - Ok(stored_data) => { - for entry in stored_data { - if entry.timestamp > *ts { - return None; - } + if weight.is_some() && weight.unwrap().data.timestamp > *ts { + // if the key matches a wild card update, check whether it was saved in storage + // remember that wild card updates change only existing keys + let stripped_key = match self.strip_prefix(&key_expr.into()) { + Ok(stripped) => stripped, + Err(e) => { + log::error!("{}", e); + break; + } + }; + let mut storage = self.storage.lock().await; + match storage.get(stripped_key, "").await { + Ok(stored_data) => { + for entry in stored_data { + if entry.timestamp > *ts { + return None; } } - Err(e) => { - log::warn!( - "Storage '{}' raised an error fetching a query on key {} : {}", - self.name, - key_expr, - e - ); - ts = weight.timestamp(); - update = Some(weight.clone()); - } + } + Err(e) => { + log::warn!( + "Storage '{}' raised an error fetching a query on key {} : {}", + self.name, + key_expr, + e + ); + ts = &weight.unwrap().data.timestamp; + update = Some(weight.unwrap().clone()); } } } @@ -629,8 +521,12 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { + let Value { + payload, encoding, .. + } = entry.value; if let Err(e) = q - .reply(key.clone(), entry.value.payload) + .reply(key.clone(), payload) + .with_encoding(encoding) .with_timestamp(entry.timestamp) .res_async() .await @@ -776,7 +672,7 @@ impl StorageService { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { - self.process_sample(sample.into()).await; + self.process_sample(sample).await; } Err(e) => log::warn!( "Storage '{}' received an error to align query: {:?}", @@ -789,6 +685,35 @@ impl StorageService { } } +fn serialize_update(update: &Update) -> String { + let result = ( + update.kind.to_string(), + update.data.timestamp.to_string(), + update.data.value.encoding.to_string(), + update.data.value.payload.slices().collect::>(), + ); + serde_json::to_string_pretty(&result).unwrap() +} + +fn construct_update(data: String) -> Update { + let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); // @TODO: remove the unwrap() + let mut payload = ZBuf::default(); + for slice in result.3 { + payload.push_zslice(slice.to_vec().into()); + } + let value = Value::new(payload).with_encoding(result.2); + let data = StoredData { + value, + timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() + }; + let kind = if result.0.eq(&(SampleKind::Put).to_string()) { + SampleKind::Put + } else { + SampleKind::Delete + }; + Update { kind, data } +} + // Periodic event cleaning-up data info for old metadata struct GarbageCollectionEvent { config: GarbageCollectionConfig, @@ -820,7 +745,7 @@ impl Timed for GarbageCollectionEvent { let mut to_be_removed = HashSet::new(); for (k, update) in wildcard_updates.key_value_pairs() { - let ts = update.timestamp(); + let ts = update.data.timestamp; if ts.get_time() < &time_limit { // mark key to be removed to_be_removed.insert(k); From 65a4d7f8646b159641df015a9a47608d5bae26af Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 25 Mar 2024 10:54:56 +0100 Subject: [PATCH 045/598] interceptors removed from plugin storage API (#859) --- plugins/zenoh-backend-example/src/lib.rs | 13 +--- plugins/zenoh-backend-traits/src/lib.rs | 68 +------------------ .../src/backends_mgt.rs | 7 -- .../zenoh-plugin-storage-manager/src/lib.rs | 4 -- .../src/memory_backend/mod.rs | 20 ------ .../src/replica/storage.rs | 23 ------- 6 files changed, 3 insertions(+), 132 deletions(-) diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index 602d29f375..f81231a498 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -13,11 +13,8 @@ // use async_std::sync::RwLock; use async_trait::async_trait; -use std::{ - collections::{hash_map::Entry, HashMap}, - sync::Arc, -}; -use zenoh::{prelude::OwnedKeyExpr, sample::Sample, time::Timestamp, value::Value}; +use std::collections::{hash_map::Entry, HashMap}; +use zenoh::{prelude::OwnedKeyExpr, time::Timestamp, value::Value}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, Volume, @@ -71,12 +68,6 @@ impl Volume for ExampleBackend { async fn create_storage(&self, _props: StorageConfig) -> ZResult> { Ok(Box::::default()) } - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { - None - } - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { - None - } } #[async_trait] diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index d17e6dfd77..40d022f1ec 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -68,16 +68,6 @@ //! // The properties are the ones passed via a PUT in the admin space for Storage creation. //! Ok(Box::new(MyStorage::new(properties).await?)) //! } -//! -//! fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { -//! // No interception point for incoming data (on PUT operations) -//! None -//! } -//! -//! fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { -//! // No interception point for outgoing data (on GET operations) -//! None -//! } //! } //! //! // Your Storage implementation @@ -135,9 +125,7 @@ use async_trait::async_trait; use const_format::concatcp; -use std::sync::Arc; -use zenoh::prelude::{KeyExpr, OwnedKeyExpr, Sample, Selector}; -use zenoh::queryable::ReplyBuilder; +use zenoh::prelude::OwnedKeyExpr; use zenoh::time::Timestamp; use zenoh::value::Value; pub use zenoh::Result as ZResult; @@ -210,14 +198,6 @@ pub trait Volume: Send + Sync { /// Creates a storage configured with some properties. async fn create_storage(&self, props: StorageConfig) -> ZResult>; - - /// Returns an interceptor that will be called before pushing any data - /// into a storage created by this backend. `None` can be returned for no interception point. - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>>; - - /// Returns an interceptor that will be called before sending any reply - /// to a query from a storage created by this backend. `None` can be returned for no interception point. - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>>; } pub type VolumeInstance = Box; @@ -282,49 +262,3 @@ pub trait Storage: Send + Sync { /// Remember to fetch the entry corresponding to the `None` key async fn get_all_entries(&self) -> ZResult, Timestamp)>>; } - -/// A wrapper around the [`zenoh::queryable::Query`] allowing to call the -/// OutgoingDataInterceptor (if any) before to send the reply -pub struct Query { - q: zenoh::queryable::Query, - interceptor: Option Sample + Send + Sync>>, -} - -impl Query { - pub fn new( - q: zenoh::queryable::Query, - interceptor: Option Sample + Send + Sync>>, - ) -> Query { - Query { q, interceptor } - } - - /// The full [`Selector`] of this Query. - #[inline(always)] - pub fn selector(&self) -> Selector<'_> { - self.q.selector() - } - - /// The key selector part of this Query. - #[inline(always)] - pub fn key_expr(&self) -> &KeyExpr<'static> { - self.q.key_expr() - } - - /// This Query's selector parameters. - #[inline(always)] - pub fn parameters(&self) -> &str { - self.q.parameters() - } - - /// Sends a Sample as a reply to this Query - pub fn reply(&self, sample: Sample) -> ReplyBuilder<'_> { - // Call outgoing intercerceptor - let sample = if let Some(ref interceptor) = self.interceptor { - interceptor(sample) - } else { - sample - }; - // Send reply - self.q.reply_sample(sample) - } -} diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index aa7260e868..90a6ae6250 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -14,7 +14,6 @@ use super::storages_mgt::*; use flume::Sender; use std::sync::Arc; -use zenoh::prelude::r#async::*; use zenoh::Session; use zenoh_backend_traits::config::StorageConfig; use zenoh_backend_traits::{Capability, VolumeInstance}; @@ -23,16 +22,12 @@ use zenoh_result::ZResult; pub struct StoreIntercept { pub storage: Box, pub capability: Capability, - pub in_interceptor: Option Sample + Send + Sync>>, - pub out_interceptor: Option Sample + Send + Sync>>, } pub(crate) async fn create_and_start_storage( admin_key: String, config: StorageConfig, backend: &VolumeInstance, - in_interceptor: Option Sample + Send + Sync>>, - out_interceptor: Option Sample + Send + Sync>>, zenoh: Arc, ) -> ZResult> { log::trace!("Create storage '{}'", &admin_key); @@ -41,8 +36,6 @@ pub(crate) async fn create_and_start_storage( let store_intercept = StoreIntercept { storage, capability, - in_interceptor, - out_interceptor, }; start_storage(store_intercept, config, admin_key, zenoh).await diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 0db30bbd6a..91df2f108d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -239,14 +239,10 @@ impl StorageRuntimeInner { volume_id, backend.name() ); - let in_interceptor = backend.instance().incoming_data_interceptor(); - let out_interceptor = backend.instance().outgoing_data_interceptor(); let stopper = async_std::task::block_on(create_and_start_storage( admin_key, storage.clone(), backend.instance(), - in_interceptor, - out_interceptor, self.session.clone(), ))?; self.storages diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index ebb4922c9d..4e333b8592 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -61,26 +61,6 @@ impl Volume for MemoryBackend { log::debug!("Create Memory Storage with configuration: {:?}", properties); Ok(Box::new(MemoryStorage::new(properties).await?)) } - - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { - // By default: no interception point - None - // To test interceptors, uncomment this line: - // Some(Arc::new(|sample| { - // trace!(">>>> IN INTERCEPTOR FOR {:?}", sample); - // sample - // })) - } - - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { - // By default: no interception point - None - // To test interceptors, uncomment this line: - // Some(Arc::new(|sample| { - // trace!("<<<< OUT INTERCEPTOR FOR {:?}", sample); - // sample - // })) - } } impl Drop for MemoryBackend { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 0708dcabd9..35134dfe43 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -60,8 +60,6 @@ pub struct StorageService { capability: Capability, tombstones: Arc>>, wildcard_updates: Arc>>, - in_interceptor: Option Sample + Send + Sync>>, - out_interceptor: Option Sample + Send + Sync>>, replication: Option, } @@ -85,8 +83,6 @@ impl StorageService { capability: store_intercept.capability, tombstones: Arc::new(RwLock::new(KeBoxTree::default())), wildcard_updates: Arc::new(RwLock::new(KeBoxTree::default())), - in_interceptor: store_intercept.in_interceptor, - out_interceptor: store_intercept.out_interceptor, replication, }; if storage_service @@ -263,13 +259,6 @@ impl StorageService { // the trimming during PUT and GET should be handled by the plugin async fn process_sample(&self, sample: Sample) { log::trace!("[STORAGE] Processing sample: {:?}", sample); - // Call incoming data interceptor (if any) - let sample = if let Some(ref interceptor) = self.in_interceptor { - interceptor(sample) - } else { - sample - }; - // if wildcard, update wildcard_updates if sample.key_expr().is_wild() { self.register_wildcard_update(sample.clone()).await; @@ -523,12 +512,6 @@ impl StorageService { let sample = Sample::new(key.clone(), payload) .with_encoding(encoding) .with_timestamp(entry.timestamp); - // apply outgoing interceptor on results - let sample = if let Some(ref interceptor) = self.out_interceptor { - interceptor(sample) - } else { - sample - }; if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", @@ -561,12 +544,6 @@ impl StorageService { let sample = Sample::new(q.key_expr().clone(), payload) .with_encoding(encoding) .with_timestamp(entry.timestamp); - // apply outgoing interceptor on results - let sample = if let Some(ref interceptor) = self.out_interceptor { - interceptor(sample) - } else { - sample - }; if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", From 48d8d776986ba31d0030b23250be00da93043b64 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 25 Mar 2024 11:49:52 +0100 Subject: [PATCH 046/598] separate qosbuilder trait --- zenoh/src/queryable.rs | 8 +++++++- zenoh/src/sample_builder.rs | 18 +++++++++++++++--- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index f2e00e47c6..68b27526ce 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -21,7 +21,7 @@ use crate::prelude::*; use crate::sample::SourceInfo; use crate::sample_builder::{ DeleteSampleBuilder, DeleteSampleBuilderTrait, PutSampleBuilder, PutSampleBuilderTrait, - SampleBuilder, SampleBuilderTrait, + QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, }; use crate::Id; use crate::SessionRef; @@ -287,7 +287,9 @@ impl SampleBuilderTrait for ReplySampleBuilder<'_> { ..self } } +} +impl QoSBuilderTrait for ReplySampleBuilder<'_> { fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self { sample_builder: self.sample_builder.congestion_control(congestion_control), @@ -366,7 +368,9 @@ impl SampleBuilderTrait for ReplyBuilder<'_> { ..self } } +} +impl QoSBuilderTrait for ReplyBuilder<'_> { fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self { sample_builder: self.sample_builder.congestion_control(congestion_control), @@ -464,7 +468,9 @@ impl SampleBuilderTrait for ReplyDelBuilder<'_> { ..self } } +} +impl QoSBuilderTrait for ReplyDelBuilder<'_> { fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self { sample_builder: self.sample_builder.congestion_control(congestion_control), diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index c7ee6e8368..7545646b91 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -28,6 +28,12 @@ use zenoh_core::Resolvable; use zenoh_core::SyncResolve; use zenoh_protocol::core::CongestionControl; +pub trait QoSBuilderTrait { + fn congestion_control(self, congestion_control: CongestionControl) -> Self; + fn priority(self, priority: Priority) -> Self; + fn express(self, is_express: bool) -> Self; +} + pub trait SampleBuilderTrait { fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self where @@ -40,9 +46,6 @@ pub trait SampleBuilderTrait { fn with_attachment_opt(self, attachment: Option) -> Self; #[zenoh_macros::unstable] fn with_attachment(self, attachment: Attachment) -> Self; - fn congestion_control(self, congestion_control: CongestionControl) -> Self; - fn priority(self, priority: Priority) -> Self; - fn express(self, is_express: bool) -> Self; } pub trait PutSampleBuilderTrait: SampleBuilderTrait { @@ -119,6 +122,9 @@ impl SampleBuilderTrait for SampleBuilder { fn with_attachment(self, attachment: Attachment) -> Self { self.with_attachment_opt(Some(attachment)) } +} + +impl QoSBuilderTrait for SampleBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self(Sample { qos: self.0.qos.with_congestion_control(congestion_control), @@ -201,6 +207,9 @@ impl SampleBuilderTrait for PutSampleBuilder { fn with_attachment_opt(self, attachment: Option) -> Self { Self(self.0.with_attachment_opt(attachment)) } +} + +impl QoSBuilderTrait for PutSampleBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self(self.0.congestion_control(congestion_control)) } @@ -291,6 +300,9 @@ impl SampleBuilderTrait for DeleteSampleBuilder { fn with_attachment_opt(self, attachment: Option) -> Self { Self(self.0.with_attachment_opt(attachment)) } +} + +impl QoSBuilderTrait for DeleteSampleBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self(self.0.congestion_control(congestion_control)) } From 322a4e06b0acdc5dc25be1b6e4abcd0a5c04bf82 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 25 Mar 2024 15:56:17 +0100 Subject: [PATCH 047/598] removed `with_keyexpr` from trait --- zenoh/src/queryable.rs | 30 -------------------------- zenoh/src/sample_builder.rs | 42 ++++++++++++++++++++----------------- 2 files changed, 23 insertions(+), 49 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 68b27526ce..9edb9fb26c 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -240,16 +240,6 @@ impl<'a> ReplySampleBuilder<'a> { } impl SampleBuilderTrait for ReplySampleBuilder<'_> { - fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self { - sample_builder: self.sample_builder.with_keyexpr(key_expr), - ..self - } - } - fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp_opt(timestamp), @@ -321,16 +311,6 @@ pub struct ReplyBuilder<'a> { } impl SampleBuilderTrait for ReplyBuilder<'_> { - fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self { - sample_builder: self.sample_builder.with_keyexpr(key_expr), - ..self - } - } - fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp_opt(timestamp), @@ -421,16 +401,6 @@ pub struct ReplyDelBuilder<'a> { } impl SampleBuilderTrait for ReplyDelBuilder<'_> { - fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self { - sample_builder: self.sample_builder.with_keyexpr(key_expr), - ..self - } - } - fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp_opt(timestamp), diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 7545646b91..0df98773fc 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -29,15 +29,17 @@ use zenoh_core::SyncResolve; use zenoh_protocol::core::CongestionControl; pub trait QoSBuilderTrait { + /// Change the `congestion_control` to apply when routing the data. fn congestion_control(self, congestion_control: CongestionControl) -> Self; + /// Change the priority of the written data. fn priority(self, priority: Priority) -> Self; + /// Change the `express` policy to apply when routing the data. + /// When express is set to `true`, then the message will not be batched. + /// This usually has a positive impact on latency but negative impact on throughput. fn express(self, is_express: bool) -> Self; } pub trait SampleBuilderTrait { - fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>; fn with_timestamp_opt(self, timestamp: Option) -> Self; fn with_timestamp(self, timestamp: Timestamp) -> Self; #[zenoh_macros::unstable] @@ -78,10 +80,8 @@ impl SampleBuilder { attachment: None, }) } -} - -impl SampleBuilderTrait for SampleBuilder { - fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + /// Allows to change keyexpr of [`Sample`] + pub fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self where IntoKeyExpr: Into>, { @@ -90,7 +90,9 @@ impl SampleBuilderTrait for SampleBuilder { ..self.0 }) } +} +impl SampleBuilderTrait for SampleBuilder { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self(Sample { timestamp, @@ -176,6 +178,13 @@ impl PutSampleBuilder { attachment: None, })) } + /// Allows to change keyexpr of [`Sample`] + pub fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self(self.0.with_keyexpr(key_expr)) + } // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. pub(crate) fn with_qos(self, qos: QoS) -> Self { Self(SampleBuilder(Sample { qos, ..self.0 .0 })) @@ -183,12 +192,6 @@ impl PutSampleBuilder { } impl SampleBuilderTrait for PutSampleBuilder { - fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self(self.0.with_keyexpr(key_expr)) - } fn with_timestamp(self, timestamp: Timestamp) -> Self { Self(self.0.with_timestamp(timestamp)) } @@ -269,6 +272,13 @@ impl DeleteSampleBuilder { attachment: None, })) } + /// Allows to change keyexpr of [`Sample`] + pub fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self(self.0.with_keyexpr(key_expr)) + } // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. pub(crate) fn with_qos(self, qos: QoS) -> Self { Self(SampleBuilder(Sample { qos, ..self.0 .0 })) @@ -276,12 +286,6 @@ impl DeleteSampleBuilder { } impl SampleBuilderTrait for DeleteSampleBuilder { - fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self(self.0.with_keyexpr(key_expr)) - } fn with_timestamp(self, timestamp: Timestamp) -> Self { Self(self.0.with_timestamp(timestamp)) } From 9515c7d63bec0744d9a1bf2e86b7242ee9121480 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 25 Mar 2024 17:36:36 +0100 Subject: [PATCH 048/598] put, delete builder --- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 1 + plugins/zenoh-plugin-rest/src/lib.rs | 1 + zenoh-ext/src/group.rs | 1 + zenoh/src/publication.rs | 284 +++++++++++++----- zenoh/src/sample_builder.rs | 6 + zenoh/src/session.rs | 12 +- zenoh/tests/qos.rs | 1 + zenoh/tests/routing.rs | 1 + zenoh/tests/session.rs | 1 + zenoh/tests/unicity.rs | 1 + 10 files changed, 233 insertions(+), 76 deletions(-) diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index bb76005d6e..48f152e488 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -15,6 +15,7 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh::{config::Config, key_expr::keyexpr}; const HTML: &str = r#" diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index e2718f6579..cc97590636 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -34,6 +34,7 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; +use zenoh::sample_builder::PutSampleBuilderTrait; use zenoh::selector::TIME_RANGE_KEY; use zenoh::Session; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 41007d8b87..973baf271b 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -28,6 +28,7 @@ use std::time::{Duration, Instant}; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh::Error as ZError; use zenoh::Result as ZResult; use zenoh::Session; diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 392c0bf8c1..97f485f1e3 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -18,6 +18,9 @@ use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; use crate::sample::{DataInfo, QoS, Sample, SampleKind}; +use crate::sample_builder::{ + DeleteSampleBuilderTrait, PutSampleBuilderTrait, QoSBuilderTrait, SampleBuilderTrait, +}; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] @@ -56,7 +59,14 @@ pub use zenoh_protocol::core::CongestionControl; /// .unwrap(); /// # }) /// ``` -pub type DeleteBuilder<'a, 'b> = PutBuilder<'a, 'b>; +pub struct DeleteBuilder<'a, 'b> { + pub(crate) publisher: PublisherBuilder<'a, 'b>, + pub(crate) timestamp: Option, + #[cfg(feature = "unstable")] + pub(crate) source_info: Option, + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, +} /// A builder for initializing a [`put`](crate::Session::put) operation. /// @@ -81,36 +91,141 @@ pub type DeleteBuilder<'a, 'b> = PutBuilder<'a, 'b>; pub struct PutBuilder<'a, 'b> { pub(crate) publisher: PublisherBuilder<'a, 'b>, pub(crate) payload: Payload, - pub(crate) kind: SampleKind, pub(crate) encoding: Encoding, + pub(crate) timestamp: Option, + #[cfg(feature = "unstable")] + pub(crate) source_info: Option, #[cfg(feature = "unstable")] pub(crate) attachment: Option, } -impl PutBuilder<'_, '_> { - /// Change the `congestion_control` to apply when routing the data. +impl QoSBuilderTrait for PutBuilder<'_, '_> { #[inline] - pub fn congestion_control(mut self, congestion_control: CongestionControl) -> Self { - self.publisher = self.publisher.congestion_control(congestion_control); - self + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + publisher: self.publisher.congestion_control(congestion_control), + ..self + } } - - /// Change the priority of the written data. #[inline] - pub fn priority(mut self, priority: Priority) -> Self { - self.publisher = self.publisher.priority(priority); - self + fn priority(self, priority: Priority) -> Self { + Self { + publisher: self.publisher.priority(priority), + ..self + } } + #[inline] + fn express(self, is_express: bool) -> Self { + Self { + publisher: self.publisher.express(is_express), + ..self + } + } +} - /// Change the `express` policy to apply when routing the data. - /// When express is set to `true`, then the message will not be batched. - /// This usually has a positive impact on latency but negative impact on throughput. +impl QoSBuilderTrait for DeleteBuilder<'_, '_> { #[inline] - pub fn express(mut self, is_express: bool) -> Self { - self.publisher = self.publisher.express(is_express); - self + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + publisher: self.publisher.congestion_control(congestion_control), + ..self + } + } + #[inline] + fn priority(self, priority: Priority) -> Self { + Self { + publisher: self.publisher.priority(priority), + ..self + } + } + #[inline] + fn express(self, is_express: bool) -> Self { + Self { + publisher: self.publisher.express(is_express), + ..self + } + } +} + +impl SampleBuilderTrait for PutBuilder<'_, '_> { + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self { timestamp, ..self } + } + fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { + Self { + timestamp: Some(timestamp), + ..self + } + } + #[cfg(feature = "unstable")] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info: Some(source_info), + ..self + } + } + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { attachment, ..self } + } + #[cfg(feature = "unstable")] + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + attachment: Some(attachment), + ..self + } + } +} + +impl SampleBuilderTrait for DeleteBuilder<'_, '_> { + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self { timestamp, ..self } + } + fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { + Self { + timestamp: Some(timestamp), + ..self + } + } + #[cfg(feature = "unstable")] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info: Some(source_info), + ..self + } + } + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { attachment, ..self } + } + #[cfg(feature = "unstable")] + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + attachment: Some(attachment), + ..self + } + } +} + +impl PutSampleBuilderTrait for PutBuilder<'_, '_> { + fn with_encoding(self, encoding: Encoding) -> Self { + Self { encoding, ..self } + } + + fn with_payload(self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + Self { + payload: payload.into(), + ..self + } } +} +impl DeleteSampleBuilderTrait for DeleteBuilder<'_, '_> {} + +impl PutBuilder<'_, '_> { /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] @@ -119,21 +234,15 @@ impl PutBuilder<'_, '_> { self.publisher = self.publisher.allowed_destination(destination); self } +} - /// Set the [`Encoding`] of the written data. - #[inline] - pub fn with_encoding(mut self, encoding: IntoEncoding) -> Self - where - IntoEncoding: Into, - { - self.encoding = encoding.into(); - self - } - +impl DeleteBuilder<'_, '_> { + /// Restrict the matching subscribers that will receive the published data + /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] - /// Attach user-provided data to the written data. - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.attachment = Some(attachment); + #[inline] + pub fn allowed_destination(mut self, destination: Locality) -> Self { + self.publisher = self.publisher.allowed_destination(destination); self } } @@ -142,36 +251,40 @@ impl Resolvable for PutBuilder<'_, '_> { type To = ZResult<()>; } +impl Resolvable for DeleteBuilder<'_, '_> { + type To = ZResult<()>; +} + impl SyncResolve for PutBuilder<'_, '_> { #[inline] fn res_sync(self) -> ::To { - let PublisherBuilder { - session, - key_expr, - congestion_control, - priority, - is_express, - destination, - } = self.publisher; - - let publisher = Publisher { - session, - #[cfg(feature = "unstable")] - eid: 0, // This is a one shot Publisher - key_expr: key_expr?, - congestion_control, - priority, - is_express, - destination, - }; - + let publisher = self.publisher.one_time_res_sync()?; resolve_put( &publisher, self.payload, - self.kind, + SampleKind::Put, self.encoding, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + #[cfg(feature = "unstable")] + self.attachment, + ) + } +} + +impl SyncResolve for DeleteBuilder<'_, '_> { + #[inline] + fn res_sync(self) -> ::To { + let publisher = self.publisher.one_time_res_sync()?; + resolve_put( + &publisher, + Payload::empty(), + SampleKind::Delete, + Encoding::ZENOH_BYTES, + self.timestamp, #[cfg(feature = "unstable")] - None, + self.source_info, #[cfg(feature = "unstable")] self.attachment, ) @@ -186,6 +299,14 @@ impl AsyncResolve for PutBuilder<'_, '_> { } } +impl AsyncResolve for DeleteBuilder<'_, '_> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + use futures::Sink; use std::convert::TryFrom; use std::convert::TryInto; @@ -293,25 +414,22 @@ impl<'a> Publisher<'a> { /// Change the `congestion_control` to apply when routing the data. #[inline] - pub fn congestion_control(mut self, congestion_control: CongestionControl) -> Self { + pub fn set_congestion_control(&mut self, congestion_control: CongestionControl) { self.congestion_control = congestion_control; - self } /// Change the priority of the written data. #[inline] - pub fn priority(mut self, priority: Priority) -> Self { + pub fn set_priority(&mut self, priority: Priority) { self.priority = priority; - self } /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { + pub fn set_allowed_destination(&mut self, destination: Locality) { self.destination = destination; - self } /// Consumes the given `Publisher`, returning a thread-safe reference-counting @@ -355,6 +473,7 @@ impl<'a> Publisher<'a> { payload, kind, encoding: Encoding::ZENOH_BYTES, + timestamp: None, #[cfg(feature = "unstable")] source_info: None, #[cfg(feature = "unstable")] @@ -625,6 +744,7 @@ pub struct Publication<'a> { payload: Payload, kind: SampleKind, encoding: Encoding, + timestamp: Option, #[cfg(feature = "unstable")] pub(crate) source_info: Option, #[cfg(feature = "unstable")] @@ -676,6 +796,7 @@ impl SyncResolve for Publication<'_> { self.payload, self.kind, self.encoding, + self.timestamp, #[cfg(feature = "unstable")] self.source_info, #[cfg(feature = "unstable")] @@ -707,6 +828,7 @@ impl<'a> Sink for Publisher<'a> { payload: item.payload, kind: item.kind, encoding: item.encoding, + timestamp: None, #[cfg(feature = "unstable")] source_info: None, #[cfg(feature = "unstable")] @@ -770,30 +892,32 @@ impl<'a, 'b> Clone for PublisherBuilder<'a, 'b> { } } -impl<'a, 'b> PublisherBuilder<'a, 'b> { +impl QoSBuilderTrait for PublisherBuilder<'_, '_> { /// Change the `congestion_control` to apply when routing the data. #[inline] - pub fn congestion_control(mut self, congestion_control: CongestionControl) -> Self { - self.congestion_control = congestion_control; - self + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + congestion_control, + ..self + } } /// Change the priority of the written data. #[inline] - pub fn priority(mut self, priority: Priority) -> Self { - self.priority = priority; - self + fn priority(self, priority: Priority) -> Self { + Self { priority, ..self } } /// Change the `express` policy to apply when routing the data. /// When express is set to `true`, then the message will not be batched. /// This usually has a positive impact on latency but negative impact on throughput. #[inline] - pub fn express(mut self, is_express: bool) -> Self { - self.is_express = is_express; - self + fn express(self, is_express: bool) -> Self { + Self { is_express, ..self } } +} +impl<'a, 'b> PublisherBuilder<'a, 'b> { /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] @@ -802,6 +926,20 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { self.destination = destination; self } + + // internal function for `PutBuilder` and `DeleteBuilder` + fn one_time_res_sync(self) -> ZResult> { + Ok(Publisher { + session: self.session, + #[cfg(feature = "unstable")] + eid: 0, // This is a one shot Publisher + key_expr: self.key_expr?, + congestion_control: self.congestion_control, + priority: self.priority, + is_express: self.is_express, + destination: self.destination, + }) + } } impl<'a, 'b> Resolvable for PublisherBuilder<'a, 'b> { @@ -874,6 +1012,7 @@ fn resolve_put( payload: Payload, kind: SampleKind, encoding: Encoding, + timestamp: Option, #[cfg(feature = "unstable")] source_info: Option, #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { @@ -883,8 +1022,11 @@ fn resolve_put( .as_ref() .unwrap() .clone(); - let timestamp = publisher.session.runtime.new_timestamp(); - + let timestamp = if timestamp.is_none() { + publisher.session.runtime.new_timestamp() + } else { + timestamp + }; if publisher.destination != Locality::SessionLocal { primitives.send_push(Push { wire_expr: publisher.key_expr.to_wire(&publisher.session).to_owned(), diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 0df98773fc..1710cbc85b 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -40,17 +40,23 @@ pub trait QoSBuilderTrait { } pub trait SampleBuilderTrait { + /// Sets of clears timestamp fn with_timestamp_opt(self, timestamp: Option) -> Self; + /// Sets timestamp fn with_timestamp(self, timestamp: Timestamp) -> Self; + /// Attach source information #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self; + /// Attach or remove user-provided data in key-value format #[zenoh_macros::unstable] fn with_attachment_opt(self, attachment: Option) -> Self; + /// Attach user-provided data in key-value format #[zenoh_macros::unstable] fn with_attachment(self, attachment: Attachment) -> Self; } pub trait PutSampleBuilderTrait: SampleBuilderTrait { + /// Set the [`Encoding`] fn with_encoding(self, encoding: Encoding) -> Self; fn with_payload(self, payload: IntoPayload) -> Self where diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 89c18ec4a8..e26bdeadaf 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -706,10 +706,12 @@ impl Session { PutBuilder { publisher: self.declare_publisher(key_expr), payload: payload.into(), - kind: SampleKind::Put, + timestamp: None, encoding: Encoding::default(), #[cfg(feature = "unstable")] attachment: None, + #[cfg(feature = "unstable")] + source_info: None, } } @@ -737,13 +739,13 @@ impl Session { TryIntoKeyExpr: TryInto>, >>::Error: Into, { - PutBuilder { + DeleteBuilder { publisher: self.declare_publisher(key_expr), - payload: Payload::empty(), - kind: SampleKind::Delete, - encoding: Encoding::default(), + timestamp: None, #[cfg(feature = "unstable")] attachment: None, + #[cfg(feature = "unstable")] + source_info: None, } } /// Query data from the matching queryables in the system. diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 1a9df306b2..8dc39423cb 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -15,6 +15,7 @@ use async_std::prelude::FutureExt; use async_std::task; use std::time::Duration; use zenoh::prelude::r#async::*; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh::{publication::Priority, SessionDeclarations}; use zenoh_core::zasync_executor_init; diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 06a8f5da45..123550852e 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -20,6 +20,7 @@ use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh::{value::Value, Result}; use zenoh_core::zasync_executor_init; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index e3f5e2df63..955ec7a73f 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -17,6 +17,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh_core::zasync_executor_init; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 8eb007b0c0..3d1327398d 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -17,6 +17,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh_core::zasync_executor_init; const TIMEOUT: Duration = Duration::from_secs(60); From 4e14cf9e24f5bc7ba2cde3e1494f398d58ed1415 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 26 Mar 2024 01:49:14 +0100 Subject: [PATCH 049/598] build fixes --- examples/examples/z_ping.rs | 1 + examples/examples/z_pong.rs | 1 + examples/examples/z_pub_thr.rs | 1 + zenoh/tests/attachments.rs | 2 +- 4 files changed, 4 insertions(+), 1 deletion(-) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index a57c937e48..b40afc1f53 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -16,6 +16,7 @@ use std::time::{Duration, Instant}; use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index baa5683f62..0003958b5d 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -15,6 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 4354ad2e68..7e7c1ac9b5 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -16,6 +16,7 @@ use clap::Parser; use std::convert::TryInto; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 8d26cc0344..04ed28b761 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -1,7 +1,7 @@ #[cfg(feature = "unstable")] #[test] fn pubsub() { - use zenoh::prelude::sync::*; + use zenoh::{prelude::sync::*, sample_builder::SampleBuilderTrait}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh From c7cc5758138ba6e9fea380acf1605cee2d650624 Mon Sep 17 00:00:00 2001 From: "ChenYing Kuo (CY)" Date: Tue, 26 Mar 2024 19:46:29 +0800 Subject: [PATCH 050/598] Support RingBuffer to get the latest sample. (#851) * Add RingQueue to support getting the latest sample. Signed-off-by: ChenYing Kuo * Rename RingQueue to RingBuffer. Signed-off-by: ChenYing Kuo * Update examples. Signed-off-by: ChenYing Kuo * Add document. Signed-off-by: ChenYing Kuo * Add test for RingBuffer. Signed-off-by: ChenYing Kuo * Use the correct naming convention (CameCase) Signed-off-by: ChenYing Kuo * Add file header. Signed-off-by: ChenYing Kuo * gename z_pull and update the usage. Signed-off-by: ChenYing Kuo * Use ring instead of cache. Signed-off-by: ChenYing Kuo * Add sleep to wait for the result in pubsub_with_ringbuffer. Signed-off-by: ChenYing Kuo --------- Signed-off-by: ChenYing Kuo --- examples/README.md | 4 +- examples/examples/z_pull.rs | 38 +++++++----------- zenoh/src/handlers.rs | 52 ++++++++++++++++++++++++ zenoh/tests/attachments.rs | 13 ++++++ zenoh/tests/formatters.rs | 13 ++++++ zenoh/tests/handler.rs | 80 +++++++++++++++++++++++++++++++++++++ zenoh/tests/interceptors.rs | 13 ++++++ 7 files changed, 188 insertions(+), 25 deletions(-) create mode 100644 zenoh/tests/handler.rs diff --git a/examples/README.md b/examples/README.md index 8e5b3085ba..dab1c99911 100644 --- a/examples/README.md +++ b/examples/README.md @@ -80,7 +80,7 @@ ### z_pull Declares a key expression and a pull subscriber. - On each pull, the pull subscriber will be notified of the last `put` or `delete` made on each key expression matching the subscriber key expression, and will print this notification. + On each pull, the pull subscriber will be notified of the last N `put` or `delete` made on each key expression matching the subscriber key expression, and will print this notification. Typical usage: @@ -89,7 +89,7 @@ ``` or ```bash - z_pull -k demo/** + z_pull -k demo/** --size 3 ``` ### z_get diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index d2c9a5380b..9d64b7b758 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,12 +13,8 @@ // use async_std::task::sleep; use clap::Parser; -use std::{ - sync::{Arc, Mutex}, - time::Duration, -}; -use zenoh::{config::Config, prelude::r#async::*}; -use zenoh_collections::RingBuffer; +use std::time::Duration; +use zenoh::{config::Config, handlers::RingBuffer, prelude::r#async::*}; use zenoh_examples::CommonArgs; #[async_std::main] @@ -26,31 +22,24 @@ async fn main() { // initiate logging env_logger::init(); - let (config, key_expr, cache, interval) = parse_args(); + let (config, key_expr, size, interval) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).res().await.unwrap(); - println!("Creating a local queue keeping the last {cache} elements..."); - let arb = Arc::new(Mutex::new(RingBuffer::new(cache))); - let arb_c = arb.clone(); - println!("Declaring Subscriber on '{key_expr}'..."); - let _subscriber = session + let subscriber = session .declare_subscriber(&key_expr) - .callback(move |sample| { - arb_c.lock().unwrap().push_force(sample); - }) + .with(RingBuffer::new(size)) .res() .await .unwrap(); println!("Pulling data every {:#?} seconds", interval); loop { - let mut res = arb.lock().unwrap().pull(); print!(">> [Subscriber] Pulling "); - match res.take() { - Some(sample) => { + match subscriber.recv() { + Ok(Some(sample)) => { let payload = sample .payload() .deserialize::() @@ -62,10 +51,13 @@ async fn main() { payload, ); } - None => { + Ok(None) => { println!("nothing... sleep for {:#?}", interval); sleep(interval).await; } + Err(e) => { + println!("Pull error: {e}"); + } } } } @@ -75,10 +67,10 @@ struct SubArgs { #[arg(short, long, default_value = "demo/example/**")] /// The Key Expression to subscribe to. key: KeyExpr<'static>, - /// The size of the cache. + /// The size of the ringbuffer. #[arg(long, default_value = "3")] - cache: usize, - /// The interval for pulling the cache. + size: usize, + /// The interval for pulling the ringbuffer. #[arg(long, default_value = "5.0")] interval: f32, #[command(flatten)] @@ -88,5 +80,5 @@ struct SubArgs { fn parse_args() -> (Config, KeyExpr<'static>, usize, Duration) { let args = SubArgs::parse(); let interval = Duration::from_secs_f32(args.interval); - (args.common.into(), args.key, args.cache, interval) + (args.common.into(), args.key, args.size, interval) } diff --git a/zenoh/src/handlers.rs b/zenoh/src/handlers.rs index e5ec3bb0dc..c5d2c6bb90 100644 --- a/zenoh/src/handlers.rs +++ b/zenoh/src/handlers.rs @@ -15,6 +15,10 @@ //! Callback handler trait. use crate::API_DATA_RECEPTION_CHANNEL_SIZE; +use std::sync::{Arc, Mutex, Weak}; +use zenoh_collections::RingBuffer as RingBufferInner; +use zenoh_result::ZResult; + /// An alias for `Arc`. pub type Dyn = std::sync::Arc; @@ -88,6 +92,54 @@ impl IntoHandler<'static, T> } } +/// Ring buffer with a limited queue size, which allows users to keep the last N data. +pub struct RingBuffer { + ring: Arc>>, +} + +impl RingBuffer { + /// Initialize the RingBuffer with the capacity size. + pub fn new(capacity: usize) -> Self { + RingBuffer { + ring: Arc::new(Mutex::new(RingBufferInner::new(capacity))), + } + } +} + +pub struct RingBufferHandler { + ring: Weak>>, +} + +impl RingBufferHandler { + pub fn recv(&self) -> ZResult> { + let Some(ring) = self.ring.upgrade() else { + bail!("The ringbuffer has been deleted."); + }; + let mut guard = ring.lock().map_err(|e| zerror!("{}", e))?; + Ok(guard.pull()) + } +} + +impl IntoHandler<'static, T> for RingBuffer { + type Handler = RingBufferHandler; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + let receiver = RingBufferHandler { + ring: Arc::downgrade(&self.ring), + }; + ( + Dyn::new(move |t| match self.ring.lock() { + Ok(mut g) => { + // Eventually drop the oldest element. + g.push_force(t); + } + Err(e) => log::error!("{}", e), + }), + receiver, + ) + } +} + /// A function that can transform a [`FnMut`]`(T)` to /// a [`Fn`]`(T)` with the help of a [`Mutex`](std::sync::Mutex). pub fn locked(fnmut: impl FnMut(T)) -> impl Fn(T) { diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 38d03b0a84..603939bc0e 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -1,3 +1,16 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// #[cfg(feature = "unstable")] #[test] fn pubsub() { diff --git a/zenoh/tests/formatters.rs b/zenoh/tests/formatters.rs index ae894e44b6..22600b6cc0 100644 --- a/zenoh/tests/formatters.rs +++ b/zenoh/tests/formatters.rs @@ -1,3 +1,16 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// #[test] fn reuse() { zenoh::kedefine!( diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs new file mode 100644 index 0000000000..c1e912fc75 --- /dev/null +++ b/zenoh/tests/handler.rs @@ -0,0 +1,80 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +#[test] +fn pubsub_with_ringbuffer() { + use std::{thread, time::Duration}; + use zenoh::{handlers::RingBuffer, prelude::sync::*}; + + let zenoh = zenoh::open(Config::default()).res().unwrap(); + let sub = zenoh + .declare_subscriber("test/ringbuffer") + .with(RingBuffer::new(3)) + .res() + .unwrap(); + for i in 0..10 { + zenoh + .put("test/ringbuffer", format!("put{i}")) + .res() + .unwrap(); + } + // Should only receive the last three samples ("put7", "put8", "put9") + for i in 7..10 { + assert_eq!( + sub.recv() + .unwrap() + .unwrap() + .payload() + .deserialize::() + .unwrap(), + format!("put{i}") + ); + } + // Wait for the subscriber to get the value + thread::sleep(Duration::from_millis(1000)); +} + +#[test] +fn query_with_ringbuffer() { + use zenoh::{handlers::RingBuffer, prelude::sync::*}; + + let zenoh = zenoh::open(Config::default()).res().unwrap(); + let queryable = zenoh + .declare_queryable("test/ringbuffer_query") + .with(RingBuffer::new(1)) + .res() + .unwrap(); + + let _reply1 = zenoh + .get("test/ringbuffer_query") + .with_value("query1") + .res() + .unwrap(); + let _reply2 = zenoh + .get("test/ringbuffer_query") + .with_value("query2") + .res() + .unwrap(); + + let query = queryable.recv().unwrap().unwrap(); + // Only receive the latest query + assert_eq!( + query + .value() + .unwrap() + .payload + .deserialize::() + .unwrap(), + "query2" + ); +} diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 1f502138e4..bf7ec3d7eb 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -1,3 +1,16 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// use std::sync::{Arc, Mutex}; use zenoh_core::zlock; From fa4b98d0a791d16b9f7c19865aeee4d08ced1766 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 26 Mar 2024 13:33:44 +0100 Subject: [PATCH 051/598] Remove unmantained complete_n feature (#862) * Remove unmantained complete_n feature * Refined QueryableInfo message format * Remove useless bitflag --- commons/zenoh-codec/Cargo.toml | 1 - commons/zenoh-codec/src/network/declare.rs | 53 +++++- commons/zenoh-codec/src/network/request.rs | 5 - commons/zenoh-protocol/Cargo.toml | 1 - commons/zenoh-protocol/src/network/declare.rs | 49 ++--- commons/zenoh-protocol/src/network/request.rs | 4 - zenoh/Cargo.toml | 1 - zenoh/src/lib.rs | 1 - zenoh/src/net/routing/dispatcher/queries.rs | 178 +++++------------- zenoh/src/net/routing/dispatcher/resource.rs | 9 +- zenoh/src/net/routing/hat/client/mod.rs | 6 +- zenoh/src/net/routing/hat/client/queries.rs | 32 ++-- .../src/net/routing/hat/linkstate_peer/mod.rs | 6 +- .../net/routing/hat/linkstate_peer/queries.rs | 49 ++--- zenoh/src/net/routing/hat/mod.rs | 4 +- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 4 +- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 32 ++-- zenoh/src/net/routing/hat/router/mod.rs | 8 +- zenoh/src/net/routing/hat/router/queries.rs | 60 +++--- zenoh/src/net/runtime/adminspace.rs | 7 +- zenoh/src/session.rs | 6 +- zenohd/src/main.rs | 2 - 22 files changed, 194 insertions(+), 324 deletions(-) diff --git a/commons/zenoh-codec/Cargo.toml b/commons/zenoh-codec/Cargo.toml index 72f507a596..5b7b8de6ed 100644 --- a/commons/zenoh-codec/Cargo.toml +++ b/commons/zenoh-codec/Cargo.toml @@ -41,7 +41,6 @@ shared-memory = [ "zenoh-shm", "zenoh-protocol/shared-memory" ] -complete_n = ["zenoh-protocol/complete_n"] [dependencies] log = { workspace = true, optional = true } diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index bcc55ed62b..c81514ab3e 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -511,7 +511,46 @@ where } // QueryableInfo -crate::impl_zextz64!(queryable::ext::QueryableInfo, queryable::ext::Info::ID); +impl WCodec<(&queryable::ext::QueryableInfoType, bool), &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + fn write(self, writer: &mut W, x: (&queryable::ext::QueryableInfoType, bool)) -> Self::Output { + let (x, more) = x; + + let mut flags: u8 = 0; + if x.complete { + flags |= queryable::ext::flag::C; + } + let v: u64 = (flags as u64) | ((x.distance as u64) << 8); + let ext = queryable::ext::QueryableInfo::new(v); + + self.write(&mut *writer, (&ext, more)) + } +} + +impl RCodec<(queryable::ext::QueryableInfoType, bool), &mut R> for Zenoh080Header +where + R: Reader, +{ + type Error = DidntRead; + + fn read( + self, + reader: &mut R, + ) -> Result<(queryable::ext::QueryableInfoType, bool), Self::Error> { + let (ext, more): (queryable::ext::QueryableInfo, bool) = self.read(&mut *reader)?; + + let complete = imsg::has_flag(ext.value as u8, queryable::ext::flag::C); + let distance = (ext.value >> 8) as u16; + + Ok(( + queryable::ext::QueryableInfoType { complete, distance }, + more, + )) + } +} // DeclareQueryable impl WCodec<&queryable::DeclareQueryable, &mut W> for Zenoh080 @@ -529,7 +568,7 @@ where // Header let mut header = declare::id::D_QUERYABLE; - let mut n_exts = (ext_info != &queryable::ext::QueryableInfo::DEFAULT) as u8; + let mut n_exts = (ext_info != &queryable::ext::QueryableInfoType::DEFAULT) as u8; if n_exts != 0 { header |= subscriber::flag::Z; } @@ -544,9 +583,9 @@ where // Body self.write(&mut *writer, id)?; self.write(&mut *writer, wire_expr)?; - if ext_info != &queryable::ext::QueryableInfo::DEFAULT { + if ext_info != &queryable::ext::QueryableInfoType::DEFAULT { n_exts -= 1; - self.write(&mut *writer, (*ext_info, n_exts != 0))?; + self.write(&mut *writer, (ext_info, n_exts != 0))?; } Ok(()) @@ -589,15 +628,15 @@ where }; // Extensions - let mut ext_info = queryable::ext::QueryableInfo::DEFAULT; + let mut ext_info = queryable::ext::QueryableInfoType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, queryable::flag::Z); while has_ext { let ext: u8 = self.codec.read(&mut *reader)?; let eodec = Zenoh080Header::new(ext); match iext::eid(ext) { - queryable::ext::Info::ID => { - let (i, ext): (queryable::ext::QueryableInfo, bool) = + queryable::ext::QueryableInfo::ID => { + let (i, ext): (queryable::ext::QueryableInfoType, bool) = eodec.read(&mut *reader)?; ext_info = i; has_ext = ext; diff --git a/commons/zenoh-codec/src/network/request.rs b/commons/zenoh-codec/src/network/request.rs index 364c1af3d0..6173840d7e 100644 --- a/commons/zenoh-codec/src/network/request.rs +++ b/commons/zenoh-codec/src/network/request.rs @@ -43,8 +43,6 @@ where ext::TargetType::BestMatching => 0, ext::TargetType::All => 1, ext::TargetType::AllComplete => 2, - #[cfg(feature = "complete_n")] - ext::TargetType::Complete(n) => 3 + *n, }; let ext = ext::Target::new(v); self.write(&mut *writer, (&ext, more)) @@ -63,9 +61,6 @@ where 0 => ext::TargetType::BestMatching, 1 => ext::TargetType::All, 2 => ext::TargetType::AllComplete, - #[cfg(feature = "complete_n")] - n => ext::TargetType::Complete(n - 3), - #[cfg(not(feature = "complete_n"))] _ => return Err(DidntRead), }; Ok((rt, more)) diff --git a/commons/zenoh-protocol/Cargo.toml b/commons/zenoh-protocol/Cargo.toml index 93c92ee33f..9d7e35d690 100644 --- a/commons/zenoh-protocol/Cargo.toml +++ b/commons/zenoh-protocol/Cargo.toml @@ -36,7 +36,6 @@ std = [ test = ["rand", "zenoh-buffers/test"] shared-memory = ["std", "zenoh-buffers/shared-memory"] stats = [] -complete_n = [] [dependencies] const_format = { workspace = true } diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 187fa87662..d41d8bf67f 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -464,31 +464,35 @@ pub mod queryable { pub struct DeclareQueryable { pub id: QueryableId, pub wire_expr: WireExpr<'static>, - pub ext_info: ext::QueryableInfo, + pub ext_info: ext::QueryableInfoType, } pub mod ext { use super::*; - pub type Info = zextz64!(0x01, false); + pub type QueryableInfo = zextz64!(0x01, false); + pub mod flag { + pub const C: u8 = 1; // 0x01 Complete if C==1 then the queryable is complete + } + /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// |Z|0_1| ID | /// +-+-+-+---------+ - /// ~ complete_n ~ + /// |x|x|x|x|x|x|x|C| /// +---------------+ - /// ~ distance ~ + /// ~ distance ~ /// +---------------+ #[derive(Debug, Clone, Copy, PartialEq, Eq)] - pub struct QueryableInfo { - pub complete: u8, // Default 0: incomplete // @TODO: maybe a bitflag - pub distance: u32, // Default 0: no distance + pub struct QueryableInfoType { + pub complete: bool, // Default false: incomplete + pub distance: u16, // Default 0: no distance } - impl QueryableInfo { + impl QueryableInfoType { pub const DEFAULT: Self = Self { - complete: 0, + complete: false, distance: 0, }; @@ -496,35 +500,18 @@ pub mod queryable { pub fn rand() -> Self { use rand::Rng; let mut rng = rand::thread_rng(); - let complete: u8 = rng.gen(); - let distance: u32 = rng.gen(); + let complete: bool = rng.gen_bool(0.5); + let distance: u16 = rng.gen(); Self { complete, distance } } } - impl Default for QueryableInfo { + impl Default for QueryableInfoType { fn default() -> Self { Self::DEFAULT } } - - impl From for QueryableInfo { - fn from(ext: Info) -> Self { - let complete = ext.value as u8; - let distance = (ext.value >> 8) as u32; - - Self { complete, distance } - } - } - - impl From for Info { - fn from(ext: QueryableInfo) -> Self { - let mut v: u64 = ext.complete as u64; - v |= (ext.distance as u64) << 8; - Info::new(v) - } - } } impl DeclareQueryable { @@ -535,7 +522,7 @@ pub mod queryable { let id: QueryableId = rng.gen(); let wire_expr = WireExpr::rand(); - let ext_info = ext::QueryableInfo::rand(); + let ext_info = ext::QueryableInfoType::rand(); Self { id, @@ -553,7 +540,7 @@ pub mod queryable { /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ - /// |Z|X|X| U_QBL | + /// |Z|0_2| U_QBL | /// +---------------+ /// ~ qbls_id:z32 ~ /// +---------------+ diff --git a/commons/zenoh-protocol/src/network/request.rs b/commons/zenoh-protocol/src/network/request.rs index aba6bb057a..ff978744e8 100644 --- a/commons/zenoh-protocol/src/network/request.rs +++ b/commons/zenoh-protocol/src/network/request.rs @@ -93,8 +93,6 @@ pub mod ext { BestMatching, All, AllComplete, - #[cfg(feature = "complete_n")] - Complete(u64), } impl TargetType { @@ -109,8 +107,6 @@ pub mod ext { TargetType::All, TargetType::AllComplete, TargetType::BestMatching, - #[cfg(feature = "complete_n")] - TargetType::Complete(rng.gen()), ] .choose(&mut rng) .unwrap() diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index e6f7a4d9aa..1333ea6a57 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -31,7 +31,6 @@ maintenance = { status = "actively-developed" } [features] auth_pubkey = ["zenoh-transport/auth_pubkey"] auth_usrpwd = ["zenoh-transport/auth_usrpwd"] -complete_n = ["zenoh-codec/complete_n"] shared-memory = [ "zenoh-shm", "zenoh-protocol/shared-memory", diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index eb1ba1bcd1..ed2f01f180 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -106,7 +106,6 @@ pub const FEATURES: &str = concat_enabled_features!( features = [ "auth_pubkey", "auth_usrpwd", - "complete_n", "shared-memory", "stats", "transport_multilink", diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 04262e555d..753a4003e1 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -24,7 +24,7 @@ use zenoh_config::WhatAmI; use zenoh_protocol::{ core::{key_expr::keyexpr, Encoding, WireExpr}, network::{ - declare::{ext, queryable::ext::QueryableInfo, QueryableId}, + declare::{ext, queryable::ext::QueryableInfoType, QueryableId}, request::{ext::TargetType, Request, RequestId}, response::{self, ext::ResponderIdType, Response, ResponseFinal}, }, @@ -44,7 +44,7 @@ pub(crate) fn declare_queryable( face: &mut Arc, id: QueryableId, expr: &WireExpr, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, node_id: NodeId, ) { let rtables = zread!(tables.tables); @@ -287,22 +287,11 @@ fn compute_final_route( .hat_code .egress_filter(tables, src_face, &qabl.direction.0, expr) { - #[cfg(feature = "complete_n")] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, *target) - }); - } - #[cfg(not(feature = "complete_n"))] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid) - }); - } + route.entry(qabl.direction.0.id).or_insert_with(|| { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query.clone()); + (direction, qid) + }); } } route @@ -315,46 +304,11 @@ fn compute_final_route( .hat_code .egress_filter(tables, src_face, &qabl.direction.0, expr) { - #[cfg(feature = "complete_n")] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, *target) - }); - } - #[cfg(not(feature = "complete_n"))] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid) - }); - } - } - } - route - } - #[cfg(feature = "complete_n")] - TargetType::Complete(n) => { - let mut route = HashMap::new(); - let mut remaining = *n; - for qabl in qabls.iter() { - if qabl.complete > 0 - && tables - .hat_code - .egress_filter(tables, src_face, &qabl.direction.0, expr) - { - let nb = std::cmp::min(qabl.complete, remaining); route.entry(qabl.direction.0.id).or_insert_with(|| { let mut direction = qabl.direction.clone(); let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, TargetType::Complete(nb)) + (direction, qid) }); - remaining -= nb; - if remaining == 0 { - break; - } } } route @@ -365,18 +319,11 @@ fn compute_final_route( .find(|qabl| qabl.direction.0.id != src_face.id && qabl.complete > 0) { let mut route = HashMap::new(); - #[cfg(feature = "complete_n")] - { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query); - route.insert(direction.0.id, (direction, qid, *target)); - } - #[cfg(not(feature = "complete_n"))] - { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query); - route.insert(direction.0.id, (direction, qid)); - } + + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query); + route.insert(direction.0.id, (direction, qid)); + route } else { compute_final_route(tables, qabls, src_face, expr, &TargetType::All, query) @@ -624,78 +571,37 @@ pub fn route_query( expr.full_expr().to_string(), )); } else { - // let timer = tables.timer.clone(); - // let timeout = tables.queries_default_timeout; - #[cfg(feature = "complete_n")] - { - for ((outface, key_expr, context), qid, t) in route.values() { - // timer.add(TimedEvent::once( - // Instant::now() + timeout, - // QueryCleanup { - // tables: tables_ref.clone(), - // face: Arc::downgrade(&outface), - // *qid, - // }, - // )); - #[cfg(feature = "stats")] - if !admin { - inc_req_stats!(outface, tx, user, body) - } else { - inc_req_stats!(outface, tx, admin, body) - } - - log::trace!("Propagate query {}:{} to {}", face, qid, outface); - outface.primitives.send_request(RoutingContext::with_expr( - Request { - id: *qid, - wire_expr: key_expr.into(), - ext_qos: ext::QoSType::REQUEST, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, - ext_target: *t, - ext_budget: None, - ext_timeout: None, - payload: body.clone(), - }, - expr.full_expr().to_string(), - )); + for ((outface, key_expr, context), qid) in route.values() { + // timer.add(TimedEvent::once( + // Instant::now() + timeout, + // QueryCleanup { + // tables: tables_ref.clone(), + // face: Arc::downgrade(&outface), + // *qid, + // }, + // )); + #[cfg(feature = "stats")] + if !admin { + inc_req_stats!(outface, tx, user, body) + } else { + inc_req_stats!(outface, tx, admin, body) } - } - #[cfg(not(feature = "complete_n"))] - { - for ((outface, key_expr, context), qid) in route.values() { - // timer.add(TimedEvent::once( - // Instant::now() + timeout, - // QueryCleanup { - // tables: tables_ref.clone(), - // face: Arc::downgrade(&outface), - // *qid, - // }, - // )); - #[cfg(feature = "stats")] - if !admin { - inc_req_stats!(outface, tx, user, body) - } else { - inc_req_stats!(outface, tx, admin, body) - } - - log::trace!("Propagate query {}:{} to {}", face, qid, outface); - outface.primitives.send_request(RoutingContext::with_expr( - Request { - id: *qid, - wire_expr: key_expr.into(), - ext_qos: ext::QoSType::REQUEST, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, - ext_target: target, - ext_budget: None, - ext_timeout: None, - payload: body.clone(), - }, - expr.full_expr().to_string(), - )); - } + log::trace!("Propagate query {}:{} to {}", face, qid, outface); + outface.primitives.send_request(RoutingContext::with_expr( + Request { + id: *qid, + wire_expr: key_expr.into(), + ext_qos: ext::QoSType::REQUEST, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: *context }, + ext_target: target, + ext_budget: None, + ext_timeout: None, + payload: body.clone(), + }, + expr.full_expr().to_string(), + )); } } } else { diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 3e35db14b6..0450dab38a 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -21,14 +21,12 @@ use std::convert::TryInto; use std::hash::{Hash, Hasher}; use std::sync::{Arc, Weak}; use zenoh_config::WhatAmI; -#[cfg(feature = "complete_n")] -use zenoh_protocol::network::request::ext::TargetType; use zenoh_protocol::network::RequestId; use zenoh_protocol::{ core::{key_expr::keyexpr, ExprId, WireExpr}, network::{ declare::{ - ext, queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo, Declare, + ext, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, }, Mapping, @@ -40,9 +38,6 @@ pub(crate) type NodeId = u16; pub(crate) type Direction = (Arc, WireExpr<'static>, NodeId); pub(crate) type Route = HashMap; -#[cfg(feature = "complete_n")] -pub(crate) type QueryRoute = HashMap; -#[cfg(not(feature = "complete_n"))] pub(crate) type QueryRoute = HashMap; pub(crate) struct QueryTargetQabl { pub(crate) direction: Direction, @@ -56,7 +51,7 @@ pub(crate) struct SessionContext { pub(crate) local_expr_id: Option, pub(crate) remote_expr_id: Option, pub(crate) subs: Option, - pub(crate) qabl: Option, + pub(crate) qabl: Option, pub(crate) in_interceptor_cache: Option>, pub(crate) e_interceptor_cache: Option>, } diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index a9908f5f58..8b7031152a 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -42,7 +42,9 @@ use std::{ sync::{atomic::AtomicU32, Arc}, }; use zenoh_config::WhatAmI; -use zenoh_protocol::network::declare::{queryable::ext::QueryableInfo, QueryableId, SubscriberId}; +use zenoh_protocol::network::declare::{ + queryable::ext::QueryableInfoType, QueryableId, SubscriberId, +}; use zenoh_protocol::network::Oam; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; @@ -284,7 +286,7 @@ struct HatFace { next_id: AtomicU32, // @TODO: manage rollover and uniqueness local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, - local_qabls: HashMap, (QueryableId, QueryableInfo)>, + local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 81e5ba52d9..2ac3f1b993 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -32,29 +32,24 @@ use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; -#[cfg(feature = "complete_n")] #[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete += info.complete; +fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { + this.complete = this.complete || info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -#[cfg(not(feature = "complete_n"))] -#[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete = u8::from(this.complete != 0 || info.complete != 0); - this.distance = std::cmp::min(this.distance, info.distance); - this -} - -fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { +fn local_qabl_info( + _tables: &Tables, + res: &Arc, + face: &Arc, +) -> QueryableInfoType { res.session_ctxs .values() .fold(None, |accu, ctx| { @@ -71,10 +66,7 @@ fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } fn propagate_simple_queryable( @@ -121,7 +113,7 @@ fn register_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { // Register queryable { @@ -147,7 +139,7 @@ fn declare_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { register_client_queryable(tables, face, id, res, qabl_info); propagate_simple_queryable(tables, res, Some(face)); @@ -263,7 +255,7 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, _node_id: NodeId, ) { declare_client_queryable(tables, face, id, res, qabl_info); diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 3c4e2091f0..71c483e7bd 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -51,7 +51,7 @@ use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, Ze use zenoh_protocol::{ common::ZExtBody, network::{ - declare::{queryable::ext::QueryableInfo, QueryableId, SubscriberId}, + declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, oam::id::OAM_LINKSTATE, Oam, }, @@ -449,7 +449,7 @@ impl HatBaseTrait for HatCode { struct HatContext { router_subs: HashSet, peer_subs: HashSet, - peer_qabls: HashMap, + peer_qabls: HashMap, } impl HatContext { @@ -467,7 +467,7 @@ struct HatFace { next_id: AtomicU32, // @TODO: manage rollover and uniqueness local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, - local_qabls: HashMap, (QueryableId, QueryableInfo)>, + local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index fa553e5121..9fba744a9c 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -35,29 +35,20 @@ use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; -#[cfg(feature = "complete_n")] #[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete += info.complete; +fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { + this.complete = this.complete || info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -#[cfg(not(feature = "complete_n"))] -#[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete = u8::from(this.complete != 0 || info.complete != 0); - this.distance = std::cmp::min(this.distance, info.distance); - this -} - -fn local_peer_qabl_info(_tables: &Tables, res: &Arc) -> QueryableInfo { +fn local_peer_qabl_info(_tables: &Tables, res: &Arc) -> QueryableInfoType { res.session_ctxs .values() .fold(None, |accu, ctx| { @@ -70,13 +61,14 @@ fn local_peer_qabl_info(_tables: &Tables, res: &Arc) -> QueryableInfo accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } -fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { +fn local_qabl_info( + tables: &Tables, + res: &Arc, + face: &Arc, +) -> QueryableInfoType { let info = if res.context.is_some() { res_hat!(res) .peer_qabls @@ -112,10 +104,7 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } #[inline] @@ -124,7 +113,7 @@ fn send_sourced_queryable_to_net_childs( net: &Network, childs: &[NodeIndex], res: &Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, routing_context: NodeId, ) { @@ -198,7 +187,7 @@ fn propagate_simple_queryable( fn propagate_sourced_queryable( tables: &Tables, res: &Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, source: &ZenohId, ) { @@ -236,7 +225,7 @@ fn register_peer_queryable( tables: &mut Tables, mut face: Option<&mut Arc>, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, peer: ZenohId, ) { let current_info = res_hat!(res).peer_qabls.get(&peer); @@ -261,7 +250,7 @@ fn declare_peer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, peer: ZenohId, ) { let face = Some(face); @@ -273,7 +262,7 @@ fn register_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { // Register queryable { @@ -299,7 +288,7 @@ fn declare_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { register_client_queryable(tables, face, id, res, qabl_info); let local_details = local_peer_qabl_info(tables, res); @@ -599,7 +588,7 @@ fn insert_target_for_qabls( tables: &Tables, net: &Network, source: NodeId, - qabls: &HashMap, + qabls: &HashMap, complete: bool, ) { if net.trees.len() > source as usize { @@ -645,7 +634,7 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, node_id: NodeId, ) { if face.whatami != WhatAmI::Client { diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index d9feb687f2..70e94ac176 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -32,7 +32,7 @@ use zenoh_protocol::{ core::WireExpr, network::{ declare::{ - queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo, QueryableId, + queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, QueryableId, SubscriberId, }, Oam, @@ -154,7 +154,7 @@ pub(crate) trait HatQueriesTrait { face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, node_id: NodeId, ); fn undeclare_queryable( diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 59b39d4284..1d87c2eb23 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -53,7 +53,7 @@ use zenoh_protocol::network::{ }; use zenoh_protocol::{ common::ZExtBody, - network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE}, + network::{declare::queryable::ext::QueryableInfoType, oam::id::OAM_LINKSTATE}, }; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; @@ -360,7 +360,7 @@ struct HatFace { next_id: AtomicU32, // @TODO: manage rollover and uniqueness local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, - local_qabls: HashMap, (QueryableId, QueryableInfo)>, + local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index caea6fe6b8..38f77bec45 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -32,29 +32,24 @@ use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; -#[cfg(feature = "complete_n")] #[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete += info.complete; +fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { + this.complete = this.complete || info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -#[cfg(not(feature = "complete_n"))] -#[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete = u8::from(this.complete != 0 || info.complete != 0); - this.distance = std::cmp::min(this.distance, info.distance); - this -} - -fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { +fn local_qabl_info( + _tables: &Tables, + res: &Arc, + face: &Arc, +) -> QueryableInfoType { res.session_ctxs .values() .fold(None, |accu, ctx| { @@ -71,10 +66,7 @@ fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } fn propagate_simple_queryable( @@ -121,7 +113,7 @@ fn register_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { // Register queryable { @@ -147,7 +139,7 @@ fn declare_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { register_client_queryable(tables, face, id, res, qabl_info); propagate_simple_queryable(tables, res, Some(face)); @@ -263,7 +255,7 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, _node_id: NodeId, ) { declare_client_queryable(tables, face, id, res, qabl_info); diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 47cf02db46..27db136eda 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -56,7 +56,7 @@ use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, Ze use zenoh_protocol::{ common::ZExtBody, network::{ - declare::{queryable::ext::QueryableInfo, QueryableId, SubscriberId}, + declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, oam::id::OAM_LINKSTATE, Oam, }, @@ -748,8 +748,8 @@ impl HatBaseTrait for HatCode { struct HatContext { router_subs: HashSet, peer_subs: HashSet, - router_qabls: HashMap, - peer_qabls: HashMap, + router_qabls: HashMap, + peer_qabls: HashMap, } impl HatContext { @@ -768,7 +768,7 @@ struct HatFace { next_id: AtomicU32, // @TODO: manage rollover and uniqueness local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, - local_qabls: HashMap, (QueryableId, QueryableInfo)>, + local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index aca6f71b3e..61abaa7c55 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -35,29 +35,20 @@ use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; -#[cfg(feature = "complete_n")] #[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete += info.complete; +fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { + this.complete = this.complete || info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -#[cfg(not(feature = "complete_n"))] -#[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete = u8::from(this.complete != 0 || info.complete != 0); - this.distance = std::cmp::min(this.distance, info.distance); - this -} - -fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { +fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfoType { let info = if hat!(tables).full_net(WhatAmI::Peer) { res.context.as_ref().and_then(|_| { res_hat!(res) @@ -89,13 +80,10 @@ fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } -fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { +fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfoType { let info = if res.context.is_some() { res_hat!(res) .router_qabls @@ -125,13 +113,14 @@ fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } -fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { +fn local_qabl_info( + tables: &Tables, + res: &Arc, + face: &Arc, +) -> QueryableInfoType { let mut info = if res.context.is_some() { res_hat!(res) .router_qabls @@ -183,10 +172,7 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } #[inline] @@ -195,7 +181,7 @@ fn send_sourced_queryable_to_net_childs( net: &Network, childs: &[NodeIndex], res: &Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, routing_context: NodeId, ) { @@ -279,7 +265,7 @@ fn propagate_simple_queryable( fn propagate_sourced_queryable( tables: &Tables, res: &Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, source: &ZenohId, net_type: WhatAmI, @@ -318,7 +304,7 @@ fn register_router_queryable( tables: &mut Tables, mut face: Option<&mut Arc>, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, router: ZenohId, ) { let current_info = res_hat!(res).router_qabls.get(&router); @@ -356,7 +342,7 @@ fn declare_router_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, router: ZenohId, ) { register_router_queryable(tables, Some(face), res, qabl_info, router); @@ -366,7 +352,7 @@ fn register_peer_queryable( tables: &mut Tables, face: Option<&mut Arc>, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, peer: ZenohId, ) { let current_info = res_hat!(res).peer_qabls.get(&peer); @@ -386,7 +372,7 @@ fn declare_peer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, peer: ZenohId, ) { let mut face = Some(face); @@ -401,7 +387,7 @@ fn register_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { // Register queryable { @@ -427,7 +413,7 @@ fn declare_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { register_client_queryable(tables, face, id, res, qabl_info); let local_details = local_router_qabl_info(tables, res); @@ -975,7 +961,7 @@ fn insert_target_for_qabls( tables: &Tables, net: &Network, source: NodeId, - qabls: &HashMap, + qabls: &HashMap, complete: bool, ) { if net.trees.len() > source as usize { @@ -1021,7 +1007,7 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, node_id: NodeId, ) { match face.whatami { diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 29106cb89d..343199e367 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -39,7 +39,7 @@ use zenoh_protocol::{ ExprId, WireExpr, ZenohId, EMPTY_EXPR_ID, }, network::{ - declare::{queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo}, + declare::{queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo}, ext, Declare, DeclareBody, DeclareQueryable, DeclareSubscriber, Push, Request, Response, ResponseFinal, }, @@ -283,10 +283,7 @@ impl AdminSpace { body: DeclareBody::DeclareQueryable(DeclareQueryable { id: runtime.next_id(), wire_expr: [&root_key, "/**"].concat().into(), - ext_info: QueryableInfo { - complete: 0, - distance: 0, - }, + ext_info: QueryableInfoType::DEFAULT, }), }); diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 496c6879ce..58d315c848 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -71,7 +71,7 @@ use zenoh_protocol::{ }, network::{ declare::{ - self, common::ext::WireExprType, queryable::ext::QueryableInfo, + self, common::ext::WireExprType, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, @@ -1177,8 +1177,8 @@ impl Session { if origin != Locality::SessionLocal { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); - let qabl_info = QueryableInfo { - complete: if complete { 1 } else { 0 }, + let qabl_info = QueryableInfoType { + complete, distance: 0, }; primitives.send_declare(Declare { diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index b0d29ea89b..af7ec3bf43 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -371,7 +371,6 @@ fn test_default_features() { concat!( " zenoh/auth_pubkey", " zenoh/auth_usrpwd", - // " zenoh/complete_n", // " zenoh/shared-memory", // " zenoh/stats", " zenoh/transport_multilink", @@ -397,7 +396,6 @@ fn test_no_default_features() { concat!( // " zenoh/auth_pubkey", // " zenoh/auth_usrpwd", - // " zenoh/complete_n", // " zenoh/shared-memory", // " zenoh/stats", // " zenoh/transport_multilink", From 8cd60d0afaeec5ab0468e899db300302f65c62e6 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 27 Mar 2024 00:47:48 +0100 Subject: [PATCH 052/598] Publication updated --- examples/examples/z_pub.rs | 1 + zenoh/src/publication.rs | 254 +++++++++++++++++++++++++------------ zenoh/src/queryable.rs | 13 +- zenoh/src/sample.rs | 18 ++- zenoh/src/session.rs | 5 +- 5 files changed, 196 insertions(+), 95 deletions(-) diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 7ba17745b5..d22d4d55ee 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -16,6 +16,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh::sample_builder::SampleBuilderTrait; use zenoh_examples::CommonArgs; #[async_std::main] diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 97f485f1e3..1e1c0cb509 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -17,7 +17,7 @@ use crate::net::primitives::Primitives; use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; -use crate::sample::{DataInfo, QoS, Sample, SampleKind}; +use crate::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; use crate::sample_builder::{ DeleteSampleBuilderTrait, PutSampleBuilderTrait, QoSBuilderTrait, SampleBuilderTrait, }; @@ -33,8 +33,6 @@ use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_protocol::network::push::ext; use zenoh_protocol::network::Mapping; use zenoh_protocol::network::Push; -#[zenoh_macros::unstable] -use zenoh_protocol::zenoh::ext::SourceInfoType; use zenoh_protocol::zenoh::Del; use zenoh_protocol::zenoh::PushBody; use zenoh_protocol::zenoh::Put; @@ -63,7 +61,7 @@ pub struct DeleteBuilder<'a, 'b> { pub(crate) publisher: PublisherBuilder<'a, 'b>, pub(crate) timestamp: Option, #[cfg(feature = "unstable")] - pub(crate) source_info: Option, + pub(crate) source_info: SourceInfo, #[cfg(feature = "unstable")] pub(crate) attachment: Option, } @@ -94,7 +92,7 @@ pub struct PutBuilder<'a, 'b> { pub(crate) encoding: Encoding, pub(crate) timestamp: Option, #[cfg(feature = "unstable")] - pub(crate) source_info: Option, + pub(crate) source_info: SourceInfo, #[cfg(feature = "unstable")] pub(crate) attachment: Option, } @@ -160,7 +158,7 @@ impl SampleBuilderTrait for PutBuilder<'_, '_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { - source_info: Some(source_info), + source_info, ..self } } @@ -190,7 +188,7 @@ impl SampleBuilderTrait for DeleteBuilder<'_, '_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { - source_info: Some(source_info), + source_info, ..self } } @@ -258,7 +256,7 @@ impl Resolvable for DeleteBuilder<'_, '_> { impl SyncResolve for PutBuilder<'_, '_> { #[inline] fn res_sync(self) -> ::To { - let publisher = self.publisher.one_time_res_sync()?; + let publisher = self.publisher.create_one_shot_publisher()?; resolve_put( &publisher, self.payload, @@ -276,7 +274,7 @@ impl SyncResolve for PutBuilder<'_, '_> { impl SyncResolve for DeleteBuilder<'_, '_> { #[inline] fn res_sync(self) -> ::To { - let publisher = self.publisher.one_time_res_sync()?; + let publisher = self.publisher.create_one_shot_publisher()?; resolve_put( &publisher, Payload::empty(), @@ -467,20 +465,6 @@ impl<'a> Publisher<'a> { std::sync::Arc::new(self) } - fn _write(&self, kind: SampleKind, payload: Payload) -> Publication { - Publication { - publisher: self, - payload, - kind, - encoding: Encoding::ZENOH_BYTES, - timestamp: None, - #[cfg(feature = "unstable")] - source_info: None, - #[cfg(feature = "unstable")] - attachment: None, - } - } - /// Put data. /// /// # Examples @@ -494,11 +478,20 @@ impl<'a> Publisher<'a> { /// # }) /// ``` #[inline] - pub fn put(&self, payload: IntoPayload) -> Publication + pub fn put(&self, payload: IntoPayload) -> PutPublication where IntoPayload: Into, { - self._write(SampleKind::Put, payload.into()) + PutPublication { + publisher: self, + payload: payload.into(), + encoding: Encoding::ZENOH_BYTES, + timestamp: None, + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + } } /// Delete data. @@ -513,8 +506,15 @@ impl<'a> Publisher<'a> { /// publisher.delete().res().await.unwrap(); /// # }) /// ``` - pub fn delete(&self) -> Publication { - self._write(SampleKind::Delete, Payload::empty()) + pub fn delete(&self) -> DeletePublication { + DeletePublication { + publisher: self, + timestamp: None, + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + } } /// Return the [`MatchingStatus`] of the publisher. @@ -737,64 +737,129 @@ impl Drop for Publisher<'_> { } /// A [`Resolvable`] returned by [`Publisher::put()`](Publisher::put), -/// [`Publisher::delete()`](Publisher::delete) and [`Publisher::write()`](Publisher::write). #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -pub struct Publication<'a> { +pub struct PutPublication<'a> { publisher: &'a Publisher<'a>, payload: Payload, - kind: SampleKind, encoding: Encoding, timestamp: Option, #[cfg(feature = "unstable")] - pub(crate) source_info: Option, + pub(crate) source_info: SourceInfo, #[cfg(feature = "unstable")] pub(crate) attachment: Option, } -impl<'a> Publication<'a> { - pub fn with_encoding(mut self, encoding: Encoding) -> Self { - self.encoding = encoding; - self +/// A [`Resolvable`] returned by [`Publisher::delete()`](Publisher::delete) +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +pub struct DeletePublication<'a> { + publisher: &'a Publisher<'a>, + timestamp: Option, + #[cfg(feature = "unstable")] + pub(crate) source_info: SourceInfo, + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, +} + +impl SampleBuilderTrait for PutPublication<'_> { + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self { timestamp, ..self } } - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.attachment = Some(attachment); - self + fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { + Self { + timestamp: Some(timestamp), + ..self + } } - /// Send data with the given [`SourceInfo`]. - /// - /// # Examples - /// ``` - /// # async_std::task::block_on(async { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.put("Value").with_source_info(SourceInfo { - /// source_id: Some(publisher.id()), - /// source_sn: Some(0), - /// }).res().await.unwrap(); - /// # }) - /// ``` - #[zenoh_macros::unstable] - pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { - self.source_info = Some(source_info); - self + #[cfg(feature = "unstable")] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info, + ..self + } + } + + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { attachment, ..self } + } + + #[cfg(feature = "unstable")] + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + attachment: Some(attachment), + ..self + } + } +} + +impl PutSampleBuilderTrait for PutPublication<'_> { + fn with_encoding(self, encoding: Encoding) -> Self { + Self { encoding, ..self } } + + fn with_payload(self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + Self { + payload: payload.into(), + ..self + } + } +} + +impl SampleBuilderTrait for DeletePublication<'_> { + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self { timestamp, ..self } + } + + fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { + Self { + timestamp: Some(timestamp), + ..self + } + } + + #[cfg(feature = "unstable")] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info, + ..self + } + } + + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { attachment, ..self } + } + + #[cfg(feature = "unstable")] + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + attachment: Some(attachment), + ..self + } + } +} + +impl DeleteSampleBuilderTrait for DeletePublication<'_> {} + +impl Resolvable for PutPublication<'_> { + type To = ZResult<()>; } -impl Resolvable for Publication<'_> { +impl Resolvable for DeletePublication<'_> { type To = ZResult<()>; } -impl SyncResolve for Publication<'_> { +impl SyncResolve for PutPublication<'_> { fn res_sync(self) -> ::To { resolve_put( self.publisher, self.payload, - self.kind, + SampleKind::Put, self.encoding, self.timestamp, #[cfg(feature = "unstable")] @@ -805,7 +870,31 @@ impl SyncResolve for Publication<'_> { } } -impl AsyncResolve for Publication<'_> { +impl SyncResolve for DeletePublication<'_> { + fn res_sync(self) -> ::To { + resolve_put( + self.publisher, + Payload::empty(), + SampleKind::Delete, + Encoding::ZENOH_BYTES, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + #[cfg(feature = "unstable")] + self.attachment, + ) + } +} + +impl AsyncResolve for PutPublication<'_> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +impl AsyncResolve for DeletePublication<'_> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -823,18 +912,25 @@ impl<'a> Sink for Publisher<'a> { #[inline] fn start_send(self: Pin<&mut Self>, item: Sample) -> Result<(), Self::Error> { - Publication { - publisher: &self, - payload: item.payload, - kind: item.kind, - encoding: item.encoding, - timestamp: None, + let SampleFields { + payload, + kind, + encoding, + #[cfg(feature = "unstable")] + attachment, + .. + } = item.into(); + resolve_put( + &self, + payload, + kind, + encoding, + None, #[cfg(feature = "unstable")] - source_info: None, + SourceInfo::empty(), #[cfg(feature = "unstable")] - attachment: item.attachment, - } - .res_sync() + attachment, + ) } #[inline] @@ -928,7 +1024,7 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { } // internal function for `PutBuilder` and `DeleteBuilder` - fn one_time_res_sync(self) -> ZResult> { + fn create_one_shot_publisher(self) -> ZResult> { Ok(Publisher { session: self.session, #[cfg(feature = "unstable")] @@ -1013,7 +1109,7 @@ fn resolve_put( kind: SampleKind, encoding: Encoding, timestamp: Option, - #[cfg(feature = "unstable")] source_info: Option, + #[cfg(feature = "unstable")] source_info: SourceInfo, #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { log::trace!("write({:?}, [...])", &publisher.key_expr); @@ -1051,10 +1147,7 @@ fn resolve_put( timestamp, encoding: encoding.clone().into(), #[cfg(feature = "unstable")] - ext_sinfo: source_info.map(|s| SourceInfoType { - id: s.source_id.unwrap_or_default(), - sn: s.source_sn.unwrap_or_default() as u32, - }), + ext_sinfo: source_info.into(), #[cfg(not(feature = "unstable"))] ext_sinfo: None, #[cfg(feature = "shared-memory")] @@ -1076,10 +1169,7 @@ fn resolve_put( PushBody::Del(Del { timestamp, #[cfg(feature = "unstable")] - ext_sinfo: source_info.map(|s| SourceInfoType { - id: s.source_id.unwrap_or_default(), - sn: s.source_sn.unwrap_or_default() as u32, - }), + ext_sinfo: source_info.into(), #[cfg(not(feature = "unstable"))] ext_sinfo: None, ext_attachment, diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 9edb9fb26c..6f71cd7fb7 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -518,17 +518,10 @@ impl Query { { bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.key_expr()) } - #[allow(unused_mut)] // will be unused if feature = "unstable" is not enabled - let mut ext_sinfo = None; + #[cfg(not(feature = "unstable"))] + let ext_sinfo = None; #[cfg(feature = "unstable")] - { - if sample.source_info.source_id.is_some() || sample.source_info.source_sn.is_some() { - ext_sinfo = Some(zenoh::put::ext::SourceInfoType { - id: sample.source_info.source_id.unwrap_or_default(), - sn: sample.source_info.source_sn.unwrap_or_default() as u32, - }) - } - } + let ext_sinfo = sample.source_info.into(); self.inner.primitives.send_response(Response { rid: self.inner.qid, wire_expr: WireExpr { diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index acf8536a0e..1998f3e844 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -22,7 +22,7 @@ use crate::Priority; use serde::Serialize; use std::{convert::TryFrom, fmt}; use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::{core::CongestionControl, network::push::ext::QoSType}; +use zenoh_protocol::{core::CongestionControl, network::push::ext::QoSType, zenoh}; pub type SourceSn = u64; @@ -163,6 +163,22 @@ impl SourceInfo { source_sn: None, } } + pub(crate) fn is_empty(&self) -> bool { + self.source_id.is_none() && self.source_sn.is_none() + } +} + +impl From for Option { + fn from(source_info: SourceInfo) -> Option { + if source_info.is_empty() { + None + } else { + Some(zenoh::put::ext::SourceInfoType { + id: source_info.source_id.unwrap_or_default(), + sn: source_info.source_sn.unwrap_or_default() as u32, + }) + } + } } #[zenoh_macros::unstable] diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index db81888018..a2371d1bfa 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -41,6 +41,7 @@ use crate::Priority; use crate::Sample; use crate::SampleKind; use crate::Selector; +use crate::SourceInfo; use crate::Value; use async_std::task; use log::{error, trace, warn}; @@ -711,7 +712,7 @@ impl Session { #[cfg(feature = "unstable")] attachment: None, #[cfg(feature = "unstable")] - source_info: None, + source_info: SourceInfo::empty(), } } @@ -745,7 +746,7 @@ impl Session { #[cfg(feature = "unstable")] attachment: None, #[cfg(feature = "unstable")] - source_info: None, + source_info: SourceInfo::empty(), } } /// Query data from the matching queryables in the system. From 00e0a59a71804fa54e4e2cc6d92a35731a079654 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 27 Mar 2024 00:59:12 +0100 Subject: [PATCH 053/598] build fix --- examples/examples/z_pub_shm_thr.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 7c6f3cbbd3..5230ea3ce6 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -15,6 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh::shm::SharedMemoryManager; use zenoh_examples::CommonArgs; From e601271c25becb47c0f14fbbf0dccce2dfdb81f5 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 27 Mar 2024 02:26:16 +0100 Subject: [PATCH 054/598] reply_sample restored --- plugins/zenoh-plugin-example/src/lib.rs | 13 +----- .../src/replica/storage.rs | 42 +++++++++++-------- zenoh-ext/src/publication_cache.rs | 33 +++------------ zenoh-ext/src/querying_subscriber.rs | 9 +--- zenoh/src/queryable.rs | 38 ++++++++++++----- zenoh/src/sample_builder.rs | 6 +-- 6 files changed, 64 insertions(+), 77 deletions(-) diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 40f8d69488..04f49b4739 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -24,8 +24,6 @@ use std::sync::{ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::runtime::Runtime; -use zenoh::sample::SampleFields; -use zenoh::sample_builder::SampleBuilderTrait; use zenoh_core::zlock; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, ZResult}; @@ -176,16 +174,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { info!("Handling query '{}'", query.selector()); for (key_expr, sample) in stored.iter() { if query.selector().key_expr.intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { - let SampleFields { key_expr, timestamp, attachment, source_info, payload, kind, .. } = sample.clone().into(); - let reply = query - .reply_sample(key_expr) - .with_timestamp_opt(timestamp) - .with_attachment_opt(attachment) - .with_source_info(source_info); - match kind { - SampleKind::Put => reply.put(payload).res().await.unwrap(), - SampleKind::Delete => reply.delete().res().await.unwrap(), - } + query.reply_sample(sample.clone()).res().await.unwrap(); } } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 67ce871bb0..aed13bbbf1 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -21,14 +21,17 @@ use futures::select; use std::collections::{HashMap, HashSet}; use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; +use zenoh::buffers::buffer::SplitBuffer; use zenoh::buffers::ZBuf; -use zenoh::prelude::r#async::*; -use zenoh::query::ConsolidationMode; +use zenoh::key_expr::KeyExpr; +use zenoh::query::{ConsolidationMode, QueryTarget}; +use zenoh::sample::{Sample, SampleKind}; use zenoh::sample_builder::{ - PutSampleBuilder, PutSampleBuilderTrait, SampleBuilder, SampleBuilderTrait, + DeleteSampleBuilder, PutSampleBuilder, PutSampleBuilderTrait, SampleBuilder, SampleBuilderTrait, }; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; -use zenoh::{Result as ZResult, Session}; +use zenoh::value::Value; +use zenoh::{Result as ZResult, Session, SessionDeclarations}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; use zenoh_core::{AsyncResolve, SyncResolve}; @@ -235,11 +238,8 @@ impl StorageService { continue; } }; - let sample = if sample.timestamp().is_none() { - SampleBuilder::from(sample).with_timestamp(new_reception_timestamp()).res_sync() - } else { - sample - }; + let timestamp = sample.timestamp().cloned().unwrap_or(new_reception_timestamp()); + let sample = SampleBuilder::from(sample).with_timestamp(timestamp).res_sync(); self.process_sample(sample).await; }, // on query on key_expr @@ -307,21 +307,27 @@ impl StorageService { .ovderriding_wild_update(&k, sample.timestamp().unwrap()) .await { - Some(overriding_update) => { + Some(Update { + kind: SampleKind::Put, + data, + }) => { let Value { payload, encoding, .. - } = overriding_update.data.value; + } = data.value; PutSampleBuilder::new(KeyExpr::from(k.clone()), payload) .with_encoding(encoding) - .with_timestamp(overriding_update.data.timestamp) - .res_sync() - } - None => { - PutSampleBuilder::new(KeyExpr::from(k.clone()), sample.payload().clone()) - .with_encoding(sample.encoding().clone()) - .with_timestamp(*sample.timestamp().unwrap()) + .with_timestamp(data.timestamp) .res_sync() } + Some(Update { + kind: SampleKind::Delete, + data, + }) => DeleteSampleBuilder::new(KeyExpr::from(k.clone())) + .with_timestamp(data.timestamp) + .res_sync(), + None => SampleBuilder::from(sample.clone()) + .keyexpr(k.clone()) + .res_sync(), }; let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 8a782a179e..85cb96cce2 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -20,8 +20,6 @@ use std::convert::TryInto; use std::future::Ready; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; -use zenoh::sample::SampleFields; -use zenoh::sample_builder::SampleBuilderTrait; use zenoh::subscriber::FlumeSubscriber; use zenoh::SessionRef; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; @@ -118,29 +116,6 @@ pub struct PublicationCache<'a> { _stoptx: Sender, } -async fn reply_sample(query: &Query, sample: &Sample) { - let SampleFields { - key_expr, - timestamp, - attachment, - source_info, - payload, - kind, - .. - } = sample.clone().into(); - let reply = query - .reply_sample(key_expr) - .with_timestamp_opt(timestamp) - .with_attachment_opt(attachment) - .with_source_info(source_info); - if let Err(e) = match kind { - SampleKind::Put => reply.put(payload).res_async().await, - SampleKind::Delete => reply.delete().res_async().await, - } { - log::warn!("Error replying to query: {}", e); - } -} - impl<'a> PublicationCache<'a> { fn new(conf: PublicationCacheBuilder<'a, '_, '_>) -> ZResult> { let key_expr = conf.pub_key_expr?; @@ -237,7 +212,9 @@ impl<'a> PublicationCache<'a> { continue; } } - reply_sample(&query, sample).await; + if let Err(e) = query.reply_sample(sample.clone()).res_async().await { + log::warn!("Error replying to query: {}", e); + } } } } else { @@ -249,7 +226,9 @@ impl<'a> PublicationCache<'a> { continue; } } - reply_sample(&query, sample).await; + if let Err(e) = query.reply_sample(sample.clone()).res_async().await { + log::warn!("Error replying to query: {}", e); + } } } } diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index eb6d6e9516..5c302840b8 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -664,13 +664,8 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { log::trace!("Sample received while fetch in progress: push it to merge_queue"); // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. - let s = if s.timestamp().is_none() { - SampleBuilder::from(s) - .with_timestamp(new_reception_timestamp()) - .res_sync() - } else { - s - }; + let timestamp = s.timestamp().cloned().unwrap_or(new_reception_timestamp()); + let s = SampleBuilder::from(s).with_timestamp(timestamp).res_sync(); state.merge_queue.push(s); } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 6f71cd7fb7..14e9d09068 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -106,20 +106,19 @@ impl Query { self.inner.attachment.as_ref() } - /// Sends a reply or delete reply to this Query + /// Sends a reply in the form of [`Sample`] to this Query. /// - /// This function is useful when resending the samples which can be of [`SampleKind::Put`] or [`SampleKind::Delete`] - /// It allows to build the reply with same common parameters, like timestamp, attachment, source_info, etc. - /// and only on final step to choose the kind of reply by calling [`ReplySampleBuilder::put`] or [`ReplySampleBuilder::delete`] methods. + /// By default, queries only accept replies whose key expression intersects with the query's. + /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), + /// replying on a disjoint key expression will result in an error when resolving the reply. + /// This api is for internal use only. #[inline(always)] - pub fn reply_sample(&self, key_expr: IntoKeyExpr) -> ReplySampleBuilder - where - IntoKeyExpr: Into>, - { - let sample_builder = SampleBuilder::new(key_expr); + #[cfg(feature = "unstable")] + #[doc(hidden)] + pub fn reply_sample(&self, sample: Sample) -> ReplySampleBuilder<'_> { ReplySampleBuilder { query: self, - sample_builder, + sample_builder: sample.into(), } } @@ -302,6 +301,25 @@ impl QoSBuilderTrait for ReplySampleBuilder<'_> { } } +impl Resolvable for ReplySampleBuilder<'_> { + type To = ZResult<()>; +} + +impl SyncResolve for ReplySampleBuilder<'_> { + fn res_sync(self) -> ::To { + let sample = self.sample_builder.res_sync(); + self.query._reply_sample(sample) + } +} + +impl AsyncResolve for ReplySampleBuilder<'_> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + /// A builder returned by [`Query::reply()`](Query::reply) #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 1710cbc85b..7e38e84afd 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -87,7 +87,7 @@ impl SampleBuilder { }) } /// Allows to change keyexpr of [`Sample`] - pub fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + pub fn keyexpr(self, key_expr: IntoKeyExpr) -> Self where IntoKeyExpr: Into>, { @@ -189,7 +189,7 @@ impl PutSampleBuilder { where IntoKeyExpr: Into>, { - Self(self.0.with_keyexpr(key_expr)) + Self(self.0.keyexpr(key_expr)) } // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. pub(crate) fn with_qos(self, qos: QoS) -> Self { @@ -283,7 +283,7 @@ impl DeleteSampleBuilder { where IntoKeyExpr: Into>, { - Self(self.0.with_keyexpr(key_expr)) + Self(self.0.keyexpr(key_expr)) } // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. pub(crate) fn with_qos(self, qos: QoS) -> Self { From ea4020ddd3bba3402bba7a4fe172cc2518333066 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 27 Mar 2024 11:29:05 +0100 Subject: [PATCH 055/598] build fixes --- zenoh/src/publication.rs | 2 ++ zenoh/src/session.rs | 1 + 2 files changed, 3 insertions(+) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 1e1c0cb509..8772319593 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -73,6 +73,7 @@ pub struct DeleteBuilder<'a, 'b> { /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; +/// use zenoh::sample_builder::{PutSampleBuilderTrait, QoSBuilderTrait}; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session @@ -951,6 +952,7 @@ impl<'a> Sink for Publisher<'a> { /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; +/// use zenoh::sample_builder::{PutSampleBuilderTrait, QoSBuilderTrait}; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let publisher = session diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index a2371d1bfa..ffe7036050 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -683,6 +683,7 @@ impl Session { /// ``` /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; + /// use zenoh::sample_builder::PutSampleBuilderTrait; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session From 5f0b531041ace6a303533ac0fbc56227ba121617 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 27 Mar 2024 12:37:22 +0100 Subject: [PATCH 056/598] clippy warning fix --- commons/zenoh-macros/build.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/commons/zenoh-macros/build.rs b/commons/zenoh-macros/build.rs index 557593d00e..d3c6b4e55b 100644 --- a/commons/zenoh-macros/build.rs +++ b/commons/zenoh-macros/build.rs @@ -24,6 +24,7 @@ fn main() { let mut version_rs = OpenOptions::new() .create(true) .write(true) + .truncate(true) .open(version_rs) .unwrap(); version_rs.write_all(&output.stdout).unwrap(); From 5a9bf0aacbba65295489110f6c6d645b9c50811b Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 27 Mar 2024 12:44:48 +0100 Subject: [PATCH 057/598] Payload API (#866) * Remove Deref/DerefMut to ZBuf from Payload. * Use reader in payload deserializer * Remove payload writer * Replace deserialize::() with deserialize::Cow() * Fix cargo clippy * Remove blank lifetime --- commons/zenoh-macros/build.rs | 1 + plugins/zenoh-plugin-example/src/lib.rs | 3 +- plugins/zenoh-plugin-rest/src/lib.rs | 22 +++- .../src/replica/storage.rs | 20 ++- zenoh-ext/src/group.rs | 7 +- zenoh/src/payload.rs | 121 ++++++++++-------- zenoh/tests/attachments.rs | 16 +-- 7 files changed, 108 insertions(+), 82 deletions(-) diff --git a/commons/zenoh-macros/build.rs b/commons/zenoh-macros/build.rs index 557593d00e..d5ce6632dc 100644 --- a/commons/zenoh-macros/build.rs +++ b/commons/zenoh-macros/build.rs @@ -23,6 +23,7 @@ fn main() { let version_rs = std::path::PathBuf::from(env::var_os("OUT_DIR").unwrap()).join("version.rs"); let mut version_rs = OpenOptions::new() .create(true) + .truncate(true) .write(true) .open(version_rs) .unwrap(); diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 04f49b4739..ad254278e3 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -15,6 +15,7 @@ use futures::select; use log::{debug, info}; +use std::borrow::Cow; use std::collections::HashMap; use std::convert::TryFrom; use std::sync::{ @@ -164,7 +165,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { // on sample received by the Subscriber sample = sub.recv_async() => { let sample = sample.unwrap(); - let payload = sample.payload().deserialize::().unwrap_or_else(|e| format!("{}", e)); + let payload = sample.payload().deserialize::>().unwrap_or_else(|e| Cow::from(e.to_string())); info!("Received data ('{}': '{}')", sample.key_expr(), payload); stored.insert(sample.key_expr().to_string(), sample); }, diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index e2718f6579..12c0dd6405 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -70,9 +70,11 @@ fn payload_to_json(payload: &Payload, encoding: &Encoding) -> serde_json::Value match encoding { // If it is a JSON try to deserialize as json, if it fails fallback to base64 &Encoding::APPLICATION_JSON | &Encoding::TEXT_JSON | &Encoding::TEXT_JSON5 => { - serde_json::from_slice::(&payload.contiguous()).unwrap_or( - serde_json::Value::String(StringOrBase64::from(payload).into_string()), - ) + payload + .deserialize::() + .unwrap_or_else(|_| { + serde_json::Value::String(StringOrBase64::from(payload).into_string()) + }) } // otherwise convert to JSON string _ => serde_json::Value::String(StringOrBase64::from(payload).into_string()), @@ -124,7 +126,10 @@ fn sample_to_html(sample: Sample) -> String { format!( "
{}
\n
{}
\n", sample.key_expr().as_str(), - String::from_utf8_lossy(&sample.payload().contiguous()) + sample + .payload() + .deserialize::>() + .unwrap_or_default() ) } @@ -134,7 +139,7 @@ fn result_to_html(sample: Result) -> String { Err(err) => { format!( "
ERROR
\n
{}
\n", - String::from_utf8_lossy(&err.payload.contiguous()) + err.payload.deserialize::>().unwrap_or_default() ) } } @@ -160,12 +165,15 @@ async fn to_raw_response(results: flume::Receiver) -> Response { Ok(sample) => response( StatusCode::Ok, Cow::from(sample.encoding()).as_ref(), - String::from_utf8_lossy(&sample.payload().contiguous()).as_ref(), + &sample + .payload() + .deserialize::>() + .unwrap_or_default(), ), Err(value) => response( StatusCode::Ok, Cow::from(&value.encoding).as_ref(), - String::from_utf8_lossy(&value.payload.contiguous()).as_ref(), + &value.payload.deserialize::>().unwrap_or_default(), ), }, Err(_) => response(StatusCode::Ok, "", ""), diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 35134dfe43..108beaabb2 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -665,11 +665,23 @@ impl StorageService { } fn serialize_update(update: &Update) -> String { + let Update { + kind, + data: + StoredData { + value: Value { + payload, encoding, .. + }, + timestamp, + }, + } = update; + let zbuf: ZBuf = payload.into(); + let result = ( - update.kind.to_string(), - update.data.timestamp.to_string(), - update.data.value.encoding.to_string(), - update.data.value.payload.slices().collect::>(), + kind.to_string(), + timestamp.to_string(), + encoding.to_string(), + zbuf.slices().collect::>(), ); serde_json::to_string_pretty(&result).unwrap() } diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 41007d8b87..ec96a8b373 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -25,6 +25,7 @@ use std::convert::TryInto; use std::ops::Add; use std::sync::Arc; use std::time::{Duration, Instant}; +use zenoh::payload::PayloadReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; @@ -248,7 +249,7 @@ async fn net_event_handler(z: Arc, state: Arc) { .await .unwrap(); while let Ok(s) = sub.recv_async().await { - match bincode::deserialize::(&(s.payload().contiguous())) { + match bincode::deserialize_from::(s.payload().reader()) { Ok(evt) => match evt { GroupNetEvent::Join(je) => { log::debug!("Member join: {:?}", &je.member); @@ -307,8 +308,8 @@ async fn net_event_handler(z: Arc, state: Arc) { while let Ok(reply) = receiver.recv_async().await { match reply.sample { Ok(sample) => { - match bincode::deserialize::( - &sample.payload().contiguous(), + match bincode::deserialize_from::( + sample.payload().reader(), ) { Ok(m) => { let mut expiry = Instant::now(); diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index aab8235249..ed2a58145c 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -15,14 +15,12 @@ //! Payload primitives. use crate::buffers::ZBuf; use std::{ - borrow::Cow, - convert::Infallible, - fmt::Debug, - ops::{Deref, DerefMut}, - string::FromUtf8Error, - sync::Arc, + borrow::Cow, convert::Infallible, fmt::Debug, ops::Deref, string::FromUtf8Error, sync::Arc, +}; +use zenoh_buffers::buffer::Buffer; +use zenoh_buffers::{ + buffer::SplitBuffer, reader::HasReader, writer::HasWriter, ZBufReader, ZSlice, }; -use zenoh_buffers::{buffer::SplitBuffer, reader::HasReader, writer::HasWriter, ZSlice}; use zenoh_result::ZResult; #[cfg(feature = "shared-memory")] use zenoh_shm::SharedMemoryBuf; @@ -44,19 +42,29 @@ impl Payload { { Self(t.into()) } -} -impl Deref for Payload { - type Target = ZBuf; + /// Returns wether the payload is empty or not. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns the length of the payload. + pub fn len(&self) -> usize { + self.0.len() + } - fn deref(&self) -> &Self::Target { - &self.0 + /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. + pub fn reader(&self) -> PayloadReader<'_> { + PayloadReader(self.0.reader()) } } -impl DerefMut for Payload { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 +/// A reader that implements [`std::io::Read`] trait to read from a [`Payload`]. +pub struct PayloadReader<'a>(ZBufReader<'a>); + +impl std::io::Read for PayloadReader<'_> { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + self.0.read(buf) } } @@ -81,10 +89,10 @@ impl Payload { /// Decode an object of type `T` from a [`Value`] using the [`ZSerde`]. /// See [encode](Value::encode) for an example. - pub fn deserialize(&self) -> ZResult + pub fn deserialize<'a, T>(&'a self) -> ZResult where - ZSerde: Deserialize, - >::Error: Debug, + ZSerde: Deserialize<'a, T>, + >::Error: Debug, { let t: T = ZSerde.deserialize(self).map_err(|e| zerror!("{:?}", e))?; Ok(t) @@ -99,11 +107,11 @@ pub trait Serialize { fn serialize(self, t: T) -> Self::Output; } -pub trait Deserialize { +pub trait Deserialize<'a, T> { type Error; /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. - fn deserialize(self, t: &Payload) -> Result; + fn deserialize(self, t: &'a Payload) -> Result; } /// The default serializer for Zenoh payload. It supports primitives types, such as: vec, int, uint, float, string, bool. @@ -129,7 +137,7 @@ impl From for ZBuf { } } -impl Deserialize for ZSerde { +impl Deserialize<'_, ZBuf> for ZSerde { type Error = Infallible; fn deserialize(self, v: &Payload) -> Result { @@ -159,18 +167,17 @@ impl Serialize<&[u8]> for ZSerde { } } -impl Deserialize> for ZSerde { +impl Deserialize<'_, Vec> for ZSerde { type Error = Infallible; fn deserialize(self, v: &Payload) -> Result, Self::Error> { - let v: ZBuf = v.into(); - Ok(v.contiguous().to_vec()) + Ok(Vec::from(v)) } } impl From<&Payload> for Vec { fn from(value: &Payload) -> Self { - value.contiguous().to_vec() + Cow::from(value).to_vec() } } @@ -182,18 +189,17 @@ impl<'a> Serialize> for ZSerde { } } -impl<'a> Deserialize> for ZSerde { +impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { type Error = Infallible; - fn deserialize(self, v: &Payload) -> Result, Self::Error> { - let v: Vec = Self.deserialize(v)?; - Ok(Cow::Owned(v)) + fn deserialize(self, v: &'a Payload) -> Result, Self::Error> { + Ok(Cow::from(v)) } } impl<'a> From<&'a Payload> for Cow<'a, [u8]> { fn from(value: &'a Payload) -> Self { - value.contiguous() + value.0.contiguous() } } @@ -214,11 +220,11 @@ impl Serialize<&str> for ZSerde { } } -impl Deserialize for ZSerde { +impl Deserialize<'_, String> for ZSerde { type Error = FromUtf8Error; fn deserialize(self, v: &Payload) -> Result { - String::from_utf8(v.contiguous().to_vec()) + String::from_utf8(Vec::from(v)) } } @@ -246,7 +252,7 @@ impl<'a> Serialize> for ZSerde { } } -impl<'a> Deserialize> for ZSerde { +impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { type Error = FromUtf8Error; fn deserialize(self, v: &Payload) -> Result, Self::Error> { @@ -255,10 +261,10 @@ impl<'a> Deserialize> for ZSerde { } } -impl TryFrom<&Payload> for Cow<'_, str> { +impl<'a> TryFrom<&'a Payload> for Cow<'a, str> { type Error = FromUtf8Error; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &'a Payload) -> Result { ZSerde.deserialize(value) } } @@ -295,16 +301,19 @@ macro_rules! impl_int { } } - impl Deserialize<$t> for ZSerde { + impl<'a> Deserialize<'a, $t> for ZSerde { type Error = ZDeserializeError; fn deserialize(self, v: &Payload) -> Result<$t, Self::Error> { - let p = v.contiguous(); + use std::io::Read; + + let mut r = v.reader(); let mut bs = (0 as $t).to_le_bytes(); - if p.len() > bs.len() { + if v.len() > bs.len() { return Err(ZDeserializeError); } - bs[..p.len()].copy_from_slice(&p); + r.read_exact(&mut bs[..v.len()]) + .map_err(|_| ZDeserializeError)?; let t = <$t>::from_le_bytes(bs); Ok(t) } @@ -349,15 +358,12 @@ impl Serialize for ZSerde { } } -impl Deserialize for ZSerde { +impl Deserialize<'_, bool> for ZSerde { type Error = ZDeserializeError; fn deserialize(self, v: &Payload) -> Result { - let p = v.contiguous(); - if p.len() != 1 { - return Err(ZDeserializeError); - } - match p[0] { + let p = v.deserialize::().map_err(|_| ZDeserializeError)?; + match p { 0 => Ok(false), 1 => Ok(true), _ => Err(ZDeserializeError), @@ -380,7 +386,7 @@ impl Serialize<&serde_json::Value> for ZSerde { fn serialize(self, t: &serde_json::Value) -> Self::Output { let mut payload = Payload::empty(); - serde_json::to_writer(payload.writer(), t)?; + serde_json::to_writer(payload.0.writer(), t)?; Ok(payload) } } @@ -393,7 +399,7 @@ impl Serialize for ZSerde { } } -impl Deserialize for ZSerde { +impl Deserialize<'_, serde_json::Value> for ZSerde { type Error = serde_json::Error; fn deserialize(self, v: &Payload) -> Result { @@ -415,7 +421,7 @@ impl Serialize<&serde_yaml::Value> for ZSerde { fn serialize(self, t: &serde_yaml::Value) -> Self::Output { let mut payload = Payload::empty(); - serde_yaml::to_writer(payload.writer(), t)?; + serde_yaml::to_writer(payload.0.writer(), t)?; Ok(payload) } } @@ -428,7 +434,7 @@ impl Serialize for ZSerde { } } -impl Deserialize for ZSerde { +impl Deserialize<'_, serde_yaml::Value> for ZSerde { type Error = serde_yaml::Error; fn deserialize(self, v: &Payload) -> Result { @@ -450,7 +456,7 @@ impl Serialize<&serde_cbor::Value> for ZSerde { fn serialize(self, t: &serde_cbor::Value) -> Self::Output { let mut payload = Payload::empty(); - serde_cbor::to_writer(payload.writer(), t)?; + serde_cbor::to_writer(payload.0.writer(), t)?; Ok(payload) } } @@ -463,7 +469,7 @@ impl Serialize for ZSerde { } } -impl Deserialize for ZSerde { +impl Deserialize<'_, serde_cbor::Value> for ZSerde { type Error = serde_cbor::Error; fn deserialize(self, v: &Payload) -> Result { @@ -486,7 +492,7 @@ impl Serialize<&serde_pickle::Value> for ZSerde { fn serialize(self, t: &serde_pickle::Value) -> Self::Output { let mut payload = Payload::empty(); serde_pickle::value_to_writer( - &mut payload.writer(), + &mut payload.0.writer(), t, serde_pickle::SerOptions::default(), )?; @@ -502,7 +508,7 @@ impl Serialize for ZSerde { } } -impl Deserialize for ZSerde { +impl Deserialize<'_, serde_pickle::Value> for ZSerde { type Error = serde_pickle::Error; fn deserialize(self, v: &Payload) -> Result { @@ -590,9 +596,12 @@ impl std::fmt::Display for StringOrBase64 { impl From<&Payload> for StringOrBase64 { fn from(v: &Payload) -> Self { use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; - match v.deserialize::() { - Ok(s) => StringOrBase64::String(s), - Err(_) => StringOrBase64::Base64(b64_std_engine.encode(v.contiguous())), + match v.deserialize::>() { + Ok(s) => StringOrBase64::String(s.into_owned()), + Err(_) => { + let cow: Cow<'_, [u8]> = Cow::from(v); + StringOrBase64::Base64(b64_std_engine.encode(cow)) + } } } } diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 603939bc0e..e6a3356559 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -20,10 +20,7 @@ fn pubsub() { let _sub = zenoh .declare_subscriber("test/attachment") .callback(|sample| { - println!( - "{}", - std::str::from_utf8(&sample.payload().contiguous()).unwrap() - ); + println!("{}", sample.payload().deserialize::().unwrap()); for (k, v) in sample.attachment().unwrap() { assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)) } @@ -72,13 +69,10 @@ fn queries() { .callback(|query| { println!( "{}", - std::str::from_utf8( - &query - .value() - .map(|q| q.payload.contiguous()) - .unwrap_or_default() - ) - .unwrap() + query + .value() + .map(|q| q.payload.deserialize::().unwrap()) + .unwrap_or_default() ); let mut attachment = Attachment::new(); for (k, v) in query.attachment().unwrap() { From 2be4fa90ada9eff64827ef24da3ded1de919f7fc Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 27 Mar 2024 12:45:42 +0100 Subject: [PATCH 058/598] clippy warning fix (#867) --- commons/zenoh-macros/build.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/commons/zenoh-macros/build.rs b/commons/zenoh-macros/build.rs index d5ce6632dc..be5abe870b 100644 --- a/commons/zenoh-macros/build.rs +++ b/commons/zenoh-macros/build.rs @@ -25,6 +25,7 @@ fn main() { .create(true) .truncate(true) .write(true) + .truncate(true) .open(version_rs) .unwrap(); version_rs.write_all(&output.stdout).unwrap(); From 448535495d2b901c673e9d839908f646700f9719 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 27 Mar 2024 13:13:34 +0100 Subject: [PATCH 059/598] removed extra truncate appeared from different PRs --- commons/zenoh-macros/build.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/commons/zenoh-macros/build.rs b/commons/zenoh-macros/build.rs index be5abe870b..d5ce6632dc 100644 --- a/commons/zenoh-macros/build.rs +++ b/commons/zenoh-macros/build.rs @@ -25,7 +25,6 @@ fn main() { .create(true) .truncate(true) .write(true) - .truncate(true) .open(version_rs) .unwrap(); version_rs.write_all(&output.stdout).unwrap(); From ce5b6108537599424f5ab0d6da9887b05f966e59 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 11:20:29 +0100 Subject: [PATCH 060/598] sample api for GetBuilder --- examples/examples/z_ping.rs | 2 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_pub_thr.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 2 +- .../src/replica/align_queryable.rs | 4 +- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 4 +- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/liveliness.rs | 5 +- zenoh/src/net/runtime/adminspace.rs | 4 +- zenoh/src/publication.rs | 38 ++++--- zenoh/src/query.rs | 100 ++++++++++++++---- zenoh/src/queryable.rs | 32 +++--- zenoh/src/sample.rs | 75 +++++++++---- zenoh/src/sample_builder.rs | 62 ++++++----- zenoh/src/session.rs | 9 +- zenoh/src/value.rs | 26 +++-- 17 files changed, 249 insertions(+), 122 deletions(-) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index b40afc1f53..79a1e16514 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -36,7 +36,7 @@ fn main() { let publisher = session .declare_publisher(key_expr_ping) .congestion_control(CongestionControl::Block) - .express(express) + .is_express(express) .res() .unwrap(); diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index 0003958b5d..a629cce3cf 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -35,7 +35,7 @@ fn main() { let publisher = session .declare_publisher(key_expr_pong) .congestion_control(CongestionControl::Block) - .express(express) + .is_express(express) .res() .unwrap(); diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 7e7c1ac9b5..c9b9fe64f3 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -42,7 +42,7 @@ fn main() { .declare_publisher("test/thr") .congestion_control(CongestionControl::Block) .priority(prio) - .express(args.express) + .is_express(args.express) .res() .unwrap(); diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 8a85f14caa..74da23679f 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -34,7 +34,7 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; -use zenoh::sample_builder::PutSampleBuilderTrait; +use zenoh::sample_builder::ValueBuilderTrait; use zenoh::selector::TIME_RANGE_KEY; use zenoh::Session; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 691fabd7a7..e5c4840666 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -20,8 +20,8 @@ use std::str; use std::str::FromStr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::PutSampleBuilderTrait; -use zenoh::sample_builder::SampleBuilderTrait; +use zenoh::sample_builder::TimestampBuilderTrait; +use zenoh::sample_builder::ValueBuilderTrait; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index a899196e7e..4119a941e5 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -21,7 +21,7 @@ use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::{PutSampleBuilder, PutSampleBuilderTrait, SampleBuilderTrait}; +use zenoh::sample_builder::{PutSampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; use zenoh::time::Timestamp; use zenoh::Session; use zenoh_core::{AsyncResolve, SyncResolve}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 04a707bfda..69c973de39 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -27,7 +27,7 @@ use zenoh::key_expr::KeyExpr; use zenoh::query::{ConsolidationMode, QueryTarget}; use zenoh::sample::{Sample, SampleKind}; use zenoh::sample_builder::{ - DeleteSampleBuilder, PutSampleBuilder, PutSampleBuilderTrait, SampleBuilder, SampleBuilderTrait, + DeleteSampleBuilder, PutSampleBuilder, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait, }; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; @@ -719,7 +719,7 @@ fn construct_update(data: String) -> Update { for slice in result.3 { payload.push_zslice(slice.to_vec().into()); } - let value = Value::new(payload).with_encoding(result.2); + let value = Value::new(payload).with_encoding(result.2.into()); let data = StoredData { value, timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 5c302840b8..e6b269cfbd 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -20,7 +20,7 @@ use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample_builder::{SampleBuilder, SampleBuilderTrait}; +use zenoh::sample_builder::{SampleBuilder, TimestampBuilderTrait}; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 425aa62592..6aac3d3908 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -15,6 +15,8 @@ //! Liveliness primitives. //! //! see [`Liveliness`] +use zenoh_protocol::network::request; + use crate::{query::Reply, Id}; #[zenoh_macros::unstable] @@ -740,18 +742,19 @@ where { fn res_sync(self) -> ::To { let (callback, receiver) = self.handler.into_handler(); - self.session .query( &self.key_expr?.into(), &Some(KeyExpr::from(*KE_PREFIX_LIVELINESS)), QueryTarget::DEFAULT, QueryConsolidation::DEFAULT, + request::ext::QoSType::REQUEST.into(), Locality::default(), self.timeout, None, #[cfg(feature = "unstable")] None, + SourceInfo::empty(), callback, ) .map(|_| receiver) diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 2a2b318cde..9047e8b112 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -20,7 +20,7 @@ use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::SyncResolve; use crate::queryable::Query; use crate::queryable::QueryInner; -use crate::sample_builder::PutSampleBuilderTrait; +use crate::sample_builder::ValueBuilderTrait; use crate::value::Value; use async_std::task; use log::{error, trace}; @@ -426,7 +426,7 @@ impl Primitives for AdminSpace { parameters, value: query .ext_body - .map(|b| Value::from(b.payload).with_encoding(b.encoding)), + .map(|b| Value::from(b.payload).with_encoding(b.encoding.into())), qid: msg.id, zid, primitives, diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 8772319593..81a12133ed 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -19,7 +19,7 @@ use crate::prelude::*; use crate::sample::Attachment; use crate::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; use crate::sample_builder::{ - DeleteSampleBuilderTrait, PutSampleBuilderTrait, QoSBuilderTrait, SampleBuilderTrait, + QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; use crate::SessionRef; use crate::Undeclarable; @@ -114,9 +114,9 @@ impl QoSBuilderTrait for PutBuilder<'_, '_> { } } #[inline] - fn express(self, is_express: bool) -> Self { + fn is_express(self, is_express: bool) -> Self { Self { - publisher: self.publisher.express(is_express), + publisher: self.publisher.is_express(is_express), ..self } } @@ -138,15 +138,15 @@ impl QoSBuilderTrait for DeleteBuilder<'_, '_> { } } #[inline] - fn express(self, is_express: bool) -> Self { + fn is_express(self, is_express: bool) -> Self { Self { - publisher: self.publisher.express(is_express), + publisher: self.publisher.is_express(is_express), ..self } } } -impl SampleBuilderTrait for PutBuilder<'_, '_> { +impl TimestampBuilderTrait for PutBuilder<'_, '_> { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { timestamp, ..self } } @@ -156,6 +156,9 @@ impl SampleBuilderTrait for PutBuilder<'_, '_> { ..self } } +} + +impl SampleBuilderTrait for PutBuilder<'_, '_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { @@ -176,7 +179,7 @@ impl SampleBuilderTrait for PutBuilder<'_, '_> { } } -impl SampleBuilderTrait for DeleteBuilder<'_, '_> { +impl TimestampBuilderTrait for DeleteBuilder<'_, '_> { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { timestamp, ..self } } @@ -186,6 +189,9 @@ impl SampleBuilderTrait for DeleteBuilder<'_, '_> { ..self } } +} + +impl SampleBuilderTrait for DeleteBuilder<'_, '_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { @@ -206,7 +212,7 @@ impl SampleBuilderTrait for DeleteBuilder<'_, '_> { } } -impl PutSampleBuilderTrait for PutBuilder<'_, '_> { +impl ValueBuilderTrait for PutBuilder<'_, '_> { fn with_encoding(self, encoding: Encoding) -> Self { Self { encoding, ..self } } @@ -222,8 +228,6 @@ impl PutSampleBuilderTrait for PutBuilder<'_, '_> { } } -impl DeleteSampleBuilderTrait for DeleteBuilder<'_, '_> {} - impl PutBuilder<'_, '_> { /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). @@ -761,7 +765,7 @@ pub struct DeletePublication<'a> { pub(crate) attachment: Option, } -impl SampleBuilderTrait for PutPublication<'_> { +impl TimestampBuilderTrait for PutPublication<'_> { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { timestamp, ..self } } @@ -772,7 +776,9 @@ impl SampleBuilderTrait for PutPublication<'_> { ..self } } +} +impl SampleBuilderTrait for PutPublication<'_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { @@ -795,7 +801,7 @@ impl SampleBuilderTrait for PutPublication<'_> { } } -impl PutSampleBuilderTrait for PutPublication<'_> { +impl ValueBuilderTrait for PutPublication<'_> { fn with_encoding(self, encoding: Encoding) -> Self { Self { encoding, ..self } } @@ -811,7 +817,7 @@ impl PutSampleBuilderTrait for PutPublication<'_> { } } -impl SampleBuilderTrait for DeletePublication<'_> { +impl TimestampBuilderTrait for DeletePublication<'_> { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { timestamp, ..self } } @@ -822,7 +828,9 @@ impl SampleBuilderTrait for DeletePublication<'_> { ..self } } +} +impl SampleBuilderTrait for DeletePublication<'_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { @@ -845,8 +853,6 @@ impl SampleBuilderTrait for DeletePublication<'_> { } } -impl DeleteSampleBuilderTrait for DeletePublication<'_> {} - impl Resolvable for PutPublication<'_> { type To = ZResult<()>; } @@ -1010,7 +1016,7 @@ impl QoSBuilderTrait for PublisherBuilder<'_, '_> { /// When express is set to `true`, then the message will not be batched. /// This usually has a positive impact on latency but negative impact on throughput. #[inline] - fn express(self, is_express: bool) -> Self { + fn is_express(self, is_express: bool) -> Self { Self { is_express, ..self } } } diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index fe48748ad4..6a0c4b1933 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -17,6 +17,8 @@ use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; +use crate::sample::QoSBuilder; +use crate::sample_builder::{QoSBuilderTrait, SampleBuilderTrait, ValueBuilderTrait}; use crate::Session; use std::collections::HashMap; use std::future::Ready; @@ -120,12 +122,70 @@ pub struct GetBuilder<'a, 'b, Handler> { pub(crate) scope: ZResult>>, pub(crate) target: QueryTarget, pub(crate) consolidation: QueryConsolidation, + pub(crate) qos: QoSBuilder, pub(crate) destination: Locality, pub(crate) timeout: Duration, pub(crate) handler: Handler, pub(crate) value: Option, #[cfg(feature = "unstable")] pub(crate) attachment: Option, + #[cfg(feature = "unstable")] + pub(crate) source_info: SourceInfo, +} + +impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { + #[cfg(feature = "unstable")] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info, + ..self + } + } + + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { attachment, ..self } + } + + #[cfg(feature = "unstable")] + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + attachment: Some(attachment), + ..self + } + } +} + +impl QoSBuilderTrait for GetBuilder<'_, '_, DefaultHandler> { + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + let qos = self.qos.congestion_control(congestion_control); + Self { qos, ..self } + } + + fn priority(self, priority: Priority) -> Self { + let qos = self.qos.priority(priority); + Self { qos, ..self } + } + + fn is_express(self, is_express: bool) -> Self { + let qos = self.qos.is_express(is_express); + Self { qos, ..self } + } +} + +impl ValueBuilderTrait for GetBuilder<'_, '_, Handler> { + fn with_encoding(self, encoding: Encoding) -> Self { + let value = Some(self.value.unwrap_or_default().with_encoding(encoding)); + Self { value, ..self } + } + + fn with_payload(self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + let value = Some(self.value.unwrap_or_default().with_payload(payload)); + Self { value, ..self } + } } impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { @@ -156,11 +216,14 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { scope, target, consolidation, + qos, destination, timeout, value, #[cfg(feature = "unstable")] attachment, + #[cfg(feature = "unstable")] + source_info, handler: _, } = self; GetBuilder { @@ -169,11 +232,14 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { scope, target, consolidation, + qos, destination, timeout, value, #[cfg(feature = "unstable")] attachment, + #[cfg(feature = "unstable")] + source_info, handler: callback, } } @@ -239,11 +305,14 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { scope, target, consolidation, + qos, destination, timeout, value, #[cfg(feature = "unstable")] attachment, + #[cfg(feature = "unstable")] + source_info, handler: _, } = self; GetBuilder { @@ -252,11 +321,14 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { scope, target, consolidation, + qos, destination, timeout, value, #[cfg(feature = "unstable")] attachment, + #[cfg(feature = "unstable")] + source_info, handler, } } @@ -315,29 +387,11 @@ impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { /// expressions that don't intersect with the query's. #[zenoh_macros::unstable] pub fn accept_replies(self, accept: ReplyKeyExpr) -> Self { - let Self { - session, - selector, - scope, - target, - consolidation, - destination, - timeout, - value, - attachment, - handler, - } = self; Self { - session, - selector: selector.and_then(|s| s.accept_any_keyexpr(accept == ReplyKeyExpr::Any)), - scope, - target, - consolidation, - destination, - timeout, - value, - attachment, - handler, + selector: self + .selector + .and_then(|s| s.accept_any_keyexpr(accept == ReplyKeyExpr::Any)), + ..self } } } @@ -382,11 +436,13 @@ where &self.scope?, self.target, self.consolidation, + self.qos.into(), self.destination, self.timeout, self.value, #[cfg(feature = "unstable")] self.attachment, + self.source_info, callback, ) .map(|_| receiver) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 14e9d09068..a9b469a340 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -20,8 +20,8 @@ use crate::net::primitives::Primitives; use crate::prelude::*; use crate::sample::SourceInfo; use crate::sample_builder::{ - DeleteSampleBuilder, DeleteSampleBuilderTrait, PutSampleBuilder, PutSampleBuilderTrait, - QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, + DeleteSampleBuilder, PutSampleBuilder, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, + TimestampBuilderTrait, ValueBuilderTrait, }; use crate::Id; use crate::SessionRef; @@ -238,7 +238,7 @@ impl<'a> ReplySampleBuilder<'a> { } } -impl SampleBuilderTrait for ReplySampleBuilder<'_> { +impl TimestampBuilderTrait for ReplySampleBuilder<'_> { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp_opt(timestamp), @@ -252,7 +252,9 @@ impl SampleBuilderTrait for ReplySampleBuilder<'_> { ..self } } +} +impl SampleBuilderTrait for ReplySampleBuilder<'_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { @@ -293,9 +295,9 @@ impl QoSBuilderTrait for ReplySampleBuilder<'_> { } } - fn express(self, is_express: bool) -> Self { + fn is_express(self, is_express: bool) -> Self { Self { - sample_builder: self.sample_builder.express(is_express), + sample_builder: self.sample_builder.is_express(is_express), ..self } } @@ -328,7 +330,7 @@ pub struct ReplyBuilder<'a> { sample_builder: PutSampleBuilder, } -impl SampleBuilderTrait for ReplyBuilder<'_> { +impl TimestampBuilderTrait for ReplyBuilder<'_> { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp_opt(timestamp), @@ -342,7 +344,9 @@ impl SampleBuilderTrait for ReplyBuilder<'_> { ..self } } +} +impl SampleBuilderTrait for ReplyBuilder<'_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { @@ -383,15 +387,15 @@ impl QoSBuilderTrait for ReplyBuilder<'_> { } } - fn express(self, is_express: bool) -> Self { + fn is_express(self, is_express: bool) -> Self { Self { - sample_builder: self.sample_builder.express(is_express), + sample_builder: self.sample_builder.is_express(is_express), ..self } } } -impl PutSampleBuilderTrait for ReplyBuilder<'_> { +impl ValueBuilderTrait for ReplyBuilder<'_> { fn with_encoding(self, encoding: Encoding) -> Self { Self { sample_builder: self.sample_builder.with_encoding(encoding), @@ -418,7 +422,7 @@ pub struct ReplyDelBuilder<'a> { sample_builder: DeleteSampleBuilder, } -impl SampleBuilderTrait for ReplyDelBuilder<'_> { +impl TimestampBuilderTrait for ReplyDelBuilder<'_> { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp_opt(timestamp), @@ -432,7 +436,9 @@ impl SampleBuilderTrait for ReplyDelBuilder<'_> { ..self } } +} +impl SampleBuilderTrait for ReplyDelBuilder<'_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { @@ -473,16 +479,14 @@ impl QoSBuilderTrait for ReplyDelBuilder<'_> { } } - fn express(self, is_express: bool) -> Self { + fn is_express(self, is_express: bool) -> Self { Self { - sample_builder: self.sample_builder.express(is_express), + sample_builder: self.sample_builder.is_express(is_express), ..self } } } -impl DeleteSampleBuilderTrait for ReplyDelBuilder<'_> {} - /// A builder returned by [`Query::reply_err()`](Query::reply_err). #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 1998f3e844..2dbeebe717 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -16,13 +16,16 @@ use crate::encoding::Encoding; use crate::payload::Payload; use crate::prelude::{KeyExpr, Value}; +use crate::sample_builder::{QoSBuilderTrait, ValueBuilderTrait}; use crate::time::Timestamp; use crate::Priority; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; +use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::{core::CongestionControl, network::push::ext::QoSType, zenoh}; +use zenoh_protocol::network::declare::ext::QoSType; +use zenoh_protocol::{core::CongestionControl, zenoh}; pub type SourceSn = u64; @@ -566,6 +569,58 @@ pub struct QoS { inner: QoSType, } +#[derive(Debug)] +pub struct QoSBuilder(QoS); + +impl From for QoSBuilder { + fn from(qos: QoS) -> Self { + QoSBuilder(qos) + } +} + +impl From for QoS { + fn from(builder: QoSBuilder) -> Self { + builder.0 + } +} + +impl Resolvable for QoSBuilder { + type To = QoS; +} + +impl SyncResolve for QoSBuilder { + fn res_sync(self) -> ::To { + self.0 + } +} + +impl AsyncResolve for QoSBuilder { + type Future = futures::future::Ready; + fn res_async(self) -> Self::Future { + futures::future::ready(self.0) + } +} + +impl QoSBuilderTrait for QoSBuilder { + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + let mut inner = self.0.inner; + inner.set_congestion_control(congestion_control); + Self(QoS { inner }) + } + + fn priority(self, priority: Priority) -> Self { + let mut inner = self.0.inner; + inner.set_priority(priority.into()); + Self(QoS { inner }) + } + + fn is_express(self, is_express: bool) -> Self { + let mut inner = self.0.inner; + inner.set_is_express(is_express); + Self(QoS { inner }) + } +} + impl QoS { /// Gets priority of the message. pub fn priority(&self) -> Priority { @@ -590,24 +645,6 @@ impl QoS { pub fn express(&self) -> bool { self.inner.is_express() } - - /// Sets priority value. - pub fn with_priority(mut self, priority: Priority) -> Self { - self.inner.set_priority(priority.into()); - self - } - - /// Sets congestion control value. - pub fn with_congestion_control(mut self, congestion_control: CongestionControl) -> Self { - self.inner.set_congestion_control(congestion_control); - self - } - - /// Sets express flag vlaue. - pub fn with_express(mut self, is_express: bool) -> Self { - self.inner.set_is_express(is_express); - self - } } impl From for QoS { diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 7e38e84afd..5aca7ff1da 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -14,6 +14,7 @@ use crate::sample::Attachment; use crate::sample::QoS; +use crate::sample::QoSBuilder; use crate::sample::SourceInfo; use crate::Encoding; use crate::KeyExpr; @@ -36,14 +37,17 @@ pub trait QoSBuilderTrait { /// Change the `express` policy to apply when routing the data. /// When express is set to `true`, then the message will not be batched. /// This usually has a positive impact on latency but negative impact on throughput. - fn express(self, is_express: bool) -> Self; + fn is_express(self, is_express: bool) -> Self; } -pub trait SampleBuilderTrait { +pub trait TimestampBuilderTrait { /// Sets of clears timestamp fn with_timestamp_opt(self, timestamp: Option) -> Self; /// Sets timestamp fn with_timestamp(self, timestamp: Timestamp) -> Self; +} + +pub trait SampleBuilderTrait { /// Attach source information #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self; @@ -55,16 +59,15 @@ pub trait SampleBuilderTrait { fn with_attachment(self, attachment: Attachment) -> Self; } -pub trait PutSampleBuilderTrait: SampleBuilderTrait { +pub trait ValueBuilderTrait { /// Set the [`Encoding`] fn with_encoding(self, encoding: Encoding) -> Self; + /// Sets the payload fn with_payload(self, payload: IntoPayload) -> Self where IntoPayload: Into; } -pub trait DeleteSampleBuilderTrait: SampleBuilderTrait {} - #[derive(Debug)] pub struct SampleBuilder(Sample); @@ -98,7 +101,7 @@ impl SampleBuilder { } } -impl SampleBuilderTrait for SampleBuilder { +impl TimestampBuilderTrait for SampleBuilder { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self(Sample { timestamp, @@ -109,7 +112,9 @@ impl SampleBuilderTrait for SampleBuilder { fn with_timestamp(self, timestamp: Timestamp) -> Self { self.with_timestamp_opt(Some(timestamp)) } +} +impl SampleBuilderTrait for SampleBuilder { #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self { Self(Sample { @@ -134,22 +139,19 @@ impl SampleBuilderTrait for SampleBuilder { impl QoSBuilderTrait for SampleBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self(Sample { - qos: self.0.qos.with_congestion_control(congestion_control), - ..self.0 - }) + let qos: QoSBuilder = self.0.qos.into(); + let qos = qos.congestion_control(congestion_control).res_sync(); + Self(Sample { qos, ..self.0 }) } fn priority(self, priority: Priority) -> Self { - Self(Sample { - qos: self.0.qos.with_priority(priority), - ..self.0 - }) + let qos: QoSBuilder = self.0.qos.into(); + let qos = qos.priority(priority).res_sync(); + Self(Sample { qos, ..self.0 }) } - fn express(self, is_express: bool) -> Self { - Self(Sample { - qos: self.0.qos.with_express(is_express), - ..self.0 - }) + fn is_express(self, is_express: bool) -> Self { + let qos: QoSBuilder = self.0.qos.into(); + let qos = qos.is_express(is_express).res_sync(); + Self(Sample { qos, ..self.0 }) } } @@ -197,13 +199,16 @@ impl PutSampleBuilder { } } -impl SampleBuilderTrait for PutSampleBuilder { +impl TimestampBuilderTrait for PutSampleBuilder { fn with_timestamp(self, timestamp: Timestamp) -> Self { Self(self.0.with_timestamp(timestamp)) } fn with_timestamp_opt(self, timestamp: Option) -> Self { Self(self.0.with_timestamp_opt(timestamp)) } +} + +impl SampleBuilderTrait for PutSampleBuilder { #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self { Self(self.0.with_source_info(source_info)) @@ -225,12 +230,12 @@ impl QoSBuilderTrait for PutSampleBuilder { fn priority(self, priority: Priority) -> Self { Self(self.0.priority(priority)) } - fn express(self, is_express: bool) -> Self { - Self(self.0.express(is_express)) + fn is_express(self, is_express: bool) -> Self { + Self(self.0.is_express(is_express)) } } -impl PutSampleBuilderTrait for PutSampleBuilder { +impl ValueBuilderTrait for PutSampleBuilder { fn with_encoding(self, encoding: Encoding) -> Self { Self(SampleBuilder(Sample { encoding, @@ -291,13 +296,16 @@ impl DeleteSampleBuilder { } } -impl SampleBuilderTrait for DeleteSampleBuilder { +impl TimestampBuilderTrait for DeleteSampleBuilder { fn with_timestamp(self, timestamp: Timestamp) -> Self { Self(self.0.with_timestamp(timestamp)) } fn with_timestamp_opt(self, timestamp: Option) -> Self { Self(self.0.with_timestamp_opt(timestamp)) } +} + +impl SampleBuilderTrait for DeleteSampleBuilder { #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self { Self(self.0.with_source_info(source_info)) @@ -319,13 +327,11 @@ impl QoSBuilderTrait for DeleteSampleBuilder { fn priority(self, priority: Priority) -> Self { Self(self.0.priority(priority)) } - fn express(self, is_express: bool) -> Self { - Self(self.0.express(is_express)) + fn is_express(self, is_express: bool) -> Self { + Self(self.0.is_express(is_express)) } } -impl DeleteSampleBuilderTrait for DeleteSampleBuilder {} - impl From for SampleBuilder { fn from(sample: Sample) -> Self { SampleBuilder(sample) diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index ffe7036050..5b80adb0e5 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -784,18 +784,21 @@ impl Session { let conf = self.runtime.config().lock(); Duration::from_millis(unwrap_or_default!(conf.queries_default_timeout())) }; + let qos: QoS = request::ext::QoSType::REQUEST.into(); GetBuilder { session: self, selector, scope: Ok(None), target: QueryTarget::DEFAULT, consolidation: QueryConsolidation::DEFAULT, + qos: qos.into(), destination: Locality::default(), timeout, value: None, #[cfg(feature = "unstable")] attachment: None, handler: DefaultHandler, + source_info: SourceInfo::empty(), } } } @@ -1567,10 +1570,12 @@ impl Session { scope: &Option>, target: QueryTarget, consolidation: QueryConsolidation, + qos: QoS, destination: Locality, timeout: Duration, value: Option, #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] source: SourceInfo, callback: Callback<'static, Reply>, ) -> ZResult<()> { log::trace!("get({}, {:?}, {:?})", selector, target, consolidation); @@ -1649,7 +1654,7 @@ impl Session { primitives.send_request(Request { id: qid, wire_expr: wexpr.clone(), - ext_qos: request::ext::QoSType::REQUEST, + ext_qos: qos.into(), ext_tstamp: None, ext_nodeid: request::ext::NodeIdType::DEFAULT, ext_target: target, @@ -1658,7 +1663,7 @@ impl Session { payload: RequestBody::Query(zenoh_protocol::zenoh::Query { consolidation, parameters: selector.parameters().to_string(), - ext_sinfo: None, + ext_sinfo: source.into(), ext_body: value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 128f0ff605..2d98cbf398 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -13,7 +13,7 @@ // //! Value primitives. -use crate::{encoding::Encoding, payload::Payload}; +use crate::{encoding::Encoding, payload::Payload, sample_builder::ValueBuilderTrait}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] @@ -36,7 +36,6 @@ impl Value { encoding: Encoding::default(), } } - /// Creates an empty [`Value`]. pub const fn empty() -> Self { Value { @@ -44,15 +43,20 @@ impl Value { encoding: Encoding::default(), } } +} - /// Sets the encoding of this [`Value`]`. - #[inline(always)] - pub fn with_encoding(mut self, encoding: IntoEncoding) -> Self +impl ValueBuilderTrait for Value { + fn with_encoding(self, encoding: Encoding) -> Self { + Self { encoding, ..self } + } + fn with_payload(self, payload: IntoPayload) -> Self where - IntoEncoding: Into, + IntoPayload: Into, { - self.encoding = encoding.into(); - self + Self { + payload: payload.into(), + ..self + } } } @@ -67,3 +71,9 @@ where } } } + +impl Default for Value { + fn default() -> Self { + Value::empty() + } +} From 0bce160e13947dacb6ed110b571578a93b70b8ae Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 11:23:52 +0100 Subject: [PATCH 061/598] restored "express" name --- examples/examples/z_ping.rs | 2 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_pub_thr.rs | 2 +- zenoh/src/publication.rs | 10 +++++----- zenoh/src/query.rs | 4 ++-- zenoh/src/queryable.rs | 12 ++++++------ zenoh/src/sample.rs | 2 +- zenoh/src/sample_builder.rs | 14 +++++++------- 8 files changed, 24 insertions(+), 24 deletions(-) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 79a1e16514..b40afc1f53 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -36,7 +36,7 @@ fn main() { let publisher = session .declare_publisher(key_expr_ping) .congestion_control(CongestionControl::Block) - .is_express(express) + .express(express) .res() .unwrap(); diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index a629cce3cf..0003958b5d 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -35,7 +35,7 @@ fn main() { let publisher = session .declare_publisher(key_expr_pong) .congestion_control(CongestionControl::Block) - .is_express(express) + .express(express) .res() .unwrap(); diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index c9b9fe64f3..7e7c1ac9b5 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -42,7 +42,7 @@ fn main() { .declare_publisher("test/thr") .congestion_control(CongestionControl::Block) .priority(prio) - .is_express(args.express) + .express(args.express) .res() .unwrap(); diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 81a12133ed..e60e40d295 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -114,9 +114,9 @@ impl QoSBuilderTrait for PutBuilder<'_, '_> { } } #[inline] - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { Self { - publisher: self.publisher.is_express(is_express), + publisher: self.publisher.express(is_express), ..self } } @@ -138,9 +138,9 @@ impl QoSBuilderTrait for DeleteBuilder<'_, '_> { } } #[inline] - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { Self { - publisher: self.publisher.is_express(is_express), + publisher: self.publisher.express(is_express), ..self } } @@ -1016,7 +1016,7 @@ impl QoSBuilderTrait for PublisherBuilder<'_, '_> { /// When express is set to `true`, then the message will not be batched. /// This usually has a positive impact on latency but negative impact on throughput. #[inline] - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { Self { is_express, ..self } } } diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 6a0c4b1933..db17715a89 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -167,8 +167,8 @@ impl QoSBuilderTrait for GetBuilder<'_, '_, DefaultHandler> { Self { qos, ..self } } - fn is_express(self, is_express: bool) -> Self { - let qos = self.qos.is_express(is_express); + fn express(self, is_express: bool) -> Self { + let qos = self.qos.express(is_express); Self { qos, ..self } } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index a9b469a340..d9327415f5 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -295,9 +295,9 @@ impl QoSBuilderTrait for ReplySampleBuilder<'_> { } } - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { Self { - sample_builder: self.sample_builder.is_express(is_express), + sample_builder: self.sample_builder.express(is_express), ..self } } @@ -387,9 +387,9 @@ impl QoSBuilderTrait for ReplyBuilder<'_> { } } - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { Self { - sample_builder: self.sample_builder.is_express(is_express), + sample_builder: self.sample_builder.express(is_express), ..self } } @@ -479,9 +479,9 @@ impl QoSBuilderTrait for ReplyDelBuilder<'_> { } } - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { Self { - sample_builder: self.sample_builder.is_express(is_express), + sample_builder: self.sample_builder.express(is_express), ..self } } diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 2dbeebe717..d774e5e007 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -614,7 +614,7 @@ impl QoSBuilderTrait for QoSBuilder { Self(QoS { inner }) } - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { let mut inner = self.0.inner; inner.set_is_express(is_express); Self(QoS { inner }) diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 5aca7ff1da..b13bfce346 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -37,7 +37,7 @@ pub trait QoSBuilderTrait { /// Change the `express` policy to apply when routing the data. /// When express is set to `true`, then the message will not be batched. /// This usually has a positive impact on latency but negative impact on throughput. - fn is_express(self, is_express: bool) -> Self; + fn express(self, is_express: bool) -> Self; } pub trait TimestampBuilderTrait { @@ -148,9 +148,9 @@ impl QoSBuilderTrait for SampleBuilder { let qos = qos.priority(priority).res_sync(); Self(Sample { qos, ..self.0 }) } - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { let qos: QoSBuilder = self.0.qos.into(); - let qos = qos.is_express(is_express).res_sync(); + let qos = qos.express(is_express).res_sync(); Self(Sample { qos, ..self.0 }) } } @@ -230,8 +230,8 @@ impl QoSBuilderTrait for PutSampleBuilder { fn priority(self, priority: Priority) -> Self { Self(self.0.priority(priority)) } - fn is_express(self, is_express: bool) -> Self { - Self(self.0.is_express(is_express)) + fn express(self, is_express: bool) -> Self { + Self(self.0.express(is_express)) } } @@ -327,8 +327,8 @@ impl QoSBuilderTrait for DeleteSampleBuilder { fn priority(self, priority: Priority) -> Self { Self(self.0.priority(priority)) } - fn is_express(self, is_express: bool) -> Self { - Self(self.0.is_express(is_express)) + fn express(self, is_express: bool) -> Self { + Self(self.0.express(is_express)) } } From 3620c3a7d057c312ff8354bffef40f79424aee80 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 11:32:11 +0100 Subject: [PATCH 062/598] removed 'timestamp_opt' --- zenoh/src/publication.rs | 34 ++++------------------------------ zenoh/src/queryable.rs | 27 +++------------------------ zenoh/src/sample_builder.rs | 20 ++++---------------- 3 files changed, 11 insertions(+), 70 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index e60e40d295..f8a42077b9 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -147,15 +147,9 @@ impl QoSBuilderTrait for DeleteBuilder<'_, '_> { } impl TimestampBuilderTrait for PutBuilder<'_, '_> { - fn with_timestamp_opt(self, timestamp: Option) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self { timestamp, ..self } } - fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { - Self { - timestamp: Some(timestamp), - ..self - } - } } impl SampleBuilderTrait for PutBuilder<'_, '_> { @@ -180,15 +174,9 @@ impl SampleBuilderTrait for PutBuilder<'_, '_> { } impl TimestampBuilderTrait for DeleteBuilder<'_, '_> { - fn with_timestamp_opt(self, timestamp: Option) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self { timestamp, ..self } } - fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { - Self { - timestamp: Some(timestamp), - ..self - } - } } impl SampleBuilderTrait for DeleteBuilder<'_, '_> { @@ -766,16 +754,9 @@ pub struct DeletePublication<'a> { } impl TimestampBuilderTrait for PutPublication<'_> { - fn with_timestamp_opt(self, timestamp: Option) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self { timestamp, ..self } } - - fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { - Self { - timestamp: Some(timestamp), - ..self - } - } } impl SampleBuilderTrait for PutPublication<'_> { @@ -818,16 +799,9 @@ impl ValueBuilderTrait for PutPublication<'_> { } impl TimestampBuilderTrait for DeletePublication<'_> { - fn with_timestamp_opt(self, timestamp: Option) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self { timestamp, ..self } } - - fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { - Self { - timestamp: Some(timestamp), - ..self - } - } } impl SampleBuilderTrait for DeletePublication<'_> { diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index d9327415f5..625ae6f25f 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -239,14 +239,7 @@ impl<'a> ReplySampleBuilder<'a> { } impl TimestampBuilderTrait for ReplySampleBuilder<'_> { - fn with_timestamp_opt(self, timestamp: Option) -> Self { - Self { - sample_builder: self.sample_builder.with_timestamp_opt(timestamp), - ..self - } - } - - fn with_timestamp(self, timestamp: Timestamp) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp(timestamp), ..self @@ -331,14 +324,7 @@ pub struct ReplyBuilder<'a> { } impl TimestampBuilderTrait for ReplyBuilder<'_> { - fn with_timestamp_opt(self, timestamp: Option) -> Self { - Self { - sample_builder: self.sample_builder.with_timestamp_opt(timestamp), - ..self - } - } - - fn with_timestamp(self, timestamp: Timestamp) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp(timestamp), ..self @@ -423,14 +409,7 @@ pub struct ReplyDelBuilder<'a> { } impl TimestampBuilderTrait for ReplyDelBuilder<'_> { - fn with_timestamp_opt(self, timestamp: Option) -> Self { - Self { - sample_builder: self.sample_builder.with_timestamp_opt(timestamp), - ..self - } - } - - fn with_timestamp(self, timestamp: Timestamp) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp(timestamp), ..self diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index b13bfce346..990586ca0f 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -42,9 +42,7 @@ pub trait QoSBuilderTrait { pub trait TimestampBuilderTrait { /// Sets of clears timestamp - fn with_timestamp_opt(self, timestamp: Option) -> Self; - /// Sets timestamp - fn with_timestamp(self, timestamp: Timestamp) -> Self; + fn with_timestamp(self, timestamp: Option) -> Self; } pub trait SampleBuilderTrait { @@ -102,16 +100,12 @@ impl SampleBuilder { } impl TimestampBuilderTrait for SampleBuilder { - fn with_timestamp_opt(self, timestamp: Option) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self(Sample { timestamp, ..self.0 }) } - - fn with_timestamp(self, timestamp: Timestamp) -> Self { - self.with_timestamp_opt(Some(timestamp)) - } } impl SampleBuilderTrait for SampleBuilder { @@ -200,12 +194,9 @@ impl PutSampleBuilder { } impl TimestampBuilderTrait for PutSampleBuilder { - fn with_timestamp(self, timestamp: Timestamp) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self(self.0.with_timestamp(timestamp)) } - fn with_timestamp_opt(self, timestamp: Option) -> Self { - Self(self.0.with_timestamp_opt(timestamp)) - } } impl SampleBuilderTrait for PutSampleBuilder { @@ -297,12 +288,9 @@ impl DeleteSampleBuilder { } impl TimestampBuilderTrait for DeleteSampleBuilder { - fn with_timestamp(self, timestamp: Timestamp) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self(self.0.with_timestamp(timestamp)) } - fn with_timestamp_opt(self, timestamp: Option) -> Self { - Self(self.0.with_timestamp_opt(timestamp)) - } } impl SampleBuilderTrait for DeleteSampleBuilder { From aafd2a4761b8b4df5089d19ef74f71bfe28aa644 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 13:10:32 +0100 Subject: [PATCH 063/598] with removed, into> added --- examples/examples/z_pub.rs | 2 +- .../src/replica/align_queryable.rs | 2 +- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 10 +-- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/publication.rs | 70 +++++++++---------- zenoh/src/query.rs | 11 +-- zenoh/src/queryable.rs | 60 +++++----------- zenoh/src/sample.rs | 11 +++ zenoh/src/sample_builder.rs | 58 ++++++--------- zenoh/tests/attachments.rs | 10 +-- 11 files changed, 99 insertions(+), 139 deletions(-) diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index d22d4d55ee..416ff31f46 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -39,7 +39,7 @@ async fn main() { println!("Putting Data ('{}': '{}')...", &key_expr, buf); let mut put = publisher.put(buf); if let Some(attachment) = &attachment { - put = put.with_attachment( + put = put.attachment( attachment .split('&') .map(|pair| split_once(pair, '=')) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index e5c4840666..973fb89abe 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -130,7 +130,7 @@ impl AlignQueryable { query .reply(k, v.payload) .with_encoding(v.encoding) - .with_timestamp(ts) + .timestamp(ts) .res() .await .unwrap(); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 4119a941e5..9d5257e53f 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -112,7 +112,7 @@ impl Aligner { } = value; let sample = PutSampleBuilder::new(key, payload) .with_encoding(encoding) - .with_timestamp(ts) + .timestamp(ts) .res_sync(); log::debug!("[ALIGNER] Adding {:?} to storage", sample); self.tx_sample.send_async(sample).await.unwrap_or_else(|e| { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 69c973de39..de76ade51d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -239,7 +239,7 @@ impl StorageService { } }; let timestamp = sample.timestamp().cloned().unwrap_or(new_reception_timestamp()); - let sample = SampleBuilder::from(sample).with_timestamp(timestamp).res_sync(); + let sample = SampleBuilder::from(sample).timestamp(timestamp).res_sync(); self.process_sample(sample).await; }, // on query on key_expr @@ -316,14 +316,14 @@ impl StorageService { } = data.value; PutSampleBuilder::new(KeyExpr::from(k.clone()), payload) .with_encoding(encoding) - .with_timestamp(data.timestamp) + .timestamp(data.timestamp) .res_sync() } Some(Update { kind: SampleKind::Delete, data, }) => DeleteSampleBuilder::new(KeyExpr::from(k.clone())) - .with_timestamp(data.timestamp) + .timestamp(data.timestamp) .res_sync(), None => SampleBuilder::from(sample.clone()) .keyexpr(k.clone()) @@ -533,7 +533,7 @@ impl StorageService { if let Err(e) = q .reply(key.clone(), payload) .with_encoding(encoding) - .with_timestamp(entry.timestamp) + .timestamp(entry.timestamp) .res_async() .await { @@ -568,7 +568,7 @@ impl StorageService { if let Err(e) = q .reply(q.key_expr().clone(), payload) .with_encoding(encoding) - .with_timestamp(entry.timestamp) + .timestamp(entry.timestamp) .res_async() .await { diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index e6b269cfbd..52a4263396 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -665,7 +665,7 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. let timestamp = s.timestamp().cloned().unwrap_or(new_reception_timestamp()); - let s = SampleBuilder::from(s).with_timestamp(timestamp).res_sync(); + let s = SampleBuilder::from(s).timestamp(timestamp).res_sync(); state.merge_queue.push(s); } } diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index f8a42077b9..cd68530bf7 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -147,54 +147,52 @@ impl QoSBuilderTrait for DeleteBuilder<'_, '_> { } impl TimestampBuilderTrait for PutBuilder<'_, '_> { - fn with_timestamp(self, timestamp: Option) -> Self { - Self { timestamp, ..self } + fn timestamp>>(self, timestamp: T) -> Self { + Self { + timestamp: timestamp.into(), + ..self + } } } impl SampleBuilderTrait for PutBuilder<'_, '_> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { source_info, ..self } } #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { - Self { attachment, ..self } - } - #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - attachment: Some(attachment), + attachment: attachment.into(), ..self } } } impl TimestampBuilderTrait for DeleteBuilder<'_, '_> { - fn with_timestamp(self, timestamp: Option) -> Self { - Self { timestamp, ..self } + fn timestamp>>(self, timestamp: T) -> Self { + Self { + timestamp: timestamp.into(), + ..self + } } } impl SampleBuilderTrait for DeleteBuilder<'_, '_> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { source_info, ..self } } #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { - Self { attachment, ..self } - } - #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - attachment: Some(attachment), + attachment: attachment.into(), ..self } } @@ -754,14 +752,17 @@ pub struct DeletePublication<'a> { } impl TimestampBuilderTrait for PutPublication<'_> { - fn with_timestamp(self, timestamp: Option) -> Self { - Self { timestamp, ..self } + fn timestamp>>(self, timestamp: T) -> Self { + Self { + timestamp: timestamp.into(), + ..self + } } } impl SampleBuilderTrait for PutPublication<'_> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { source_info, ..self @@ -769,14 +770,9 @@ impl SampleBuilderTrait for PutPublication<'_> { } #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { - Self { attachment, ..self } - } - - #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - attachment: Some(attachment), + attachment: attachment.into(), ..self } } @@ -799,14 +795,17 @@ impl ValueBuilderTrait for PutPublication<'_> { } impl TimestampBuilderTrait for DeletePublication<'_> { - fn with_timestamp(self, timestamp: Option) -> Self { - Self { timestamp, ..self } + fn timestamp>>(self, timestamp: T) -> Self { + Self { + timestamp: timestamp.into(), + ..self + } } } impl SampleBuilderTrait for DeletePublication<'_> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { source_info, ..self @@ -814,14 +813,9 @@ impl SampleBuilderTrait for DeletePublication<'_> { } #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { - Self { attachment, ..self } - } - - #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - attachment: Some(attachment), + attachment: attachment.into(), ..self } } diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index db17715a89..2d4e5e1ee3 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -135,7 +135,7 @@ pub struct GetBuilder<'a, 'b, Handler> { impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { source_info, ..self @@ -143,14 +143,9 @@ impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { } #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { - Self { attachment, ..self } - } - - #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - attachment: Some(attachment), + attachment: attachment.into(), ..self } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 625ae6f25f..66cb34459b 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -239,9 +239,9 @@ impl<'a> ReplySampleBuilder<'a> { } impl TimestampBuilderTrait for ReplySampleBuilder<'_> { - fn with_timestamp(self, timestamp: Option) -> Self { + fn timestamp>>(self, timestamp: T) -> Self { Self { - sample_builder: self.sample_builder.with_timestamp(timestamp), + sample_builder: self.sample_builder.timestamp(timestamp), ..self } } @@ -249,25 +249,17 @@ impl TimestampBuilderTrait for ReplySampleBuilder<'_> { impl SampleBuilderTrait for ReplySampleBuilder<'_> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { - sample_builder: self.sample_builder.with_source_info(source_info), + sample_builder: self.sample_builder.source_info(source_info), ..self } } #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - sample_builder: self.sample_builder.with_attachment_opt(attachment), - ..self - } - } - - #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { - Self { - sample_builder: self.sample_builder.with_attachment(attachment), + sample_builder: self.sample_builder.attachment(attachment), ..self } } @@ -324,9 +316,9 @@ pub struct ReplyBuilder<'a> { } impl TimestampBuilderTrait for ReplyBuilder<'_> { - fn with_timestamp(self, timestamp: Option) -> Self { + fn timestamp>>(self, timestamp: T) -> Self { Self { - sample_builder: self.sample_builder.with_timestamp(timestamp), + sample_builder: self.sample_builder.timestamp(timestamp), ..self } } @@ -334,25 +326,17 @@ impl TimestampBuilderTrait for ReplyBuilder<'_> { impl SampleBuilderTrait for ReplyBuilder<'_> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { - Self { - sample_builder: self.sample_builder.with_source_info(source_info), - ..self - } - } - - #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { - sample_builder: self.sample_builder.with_attachment_opt(attachment), + sample_builder: self.sample_builder.source_info(source_info), ..self } } #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - sample_builder: self.sample_builder.with_attachment(attachment), + sample_builder: self.sample_builder.attachment(attachment), ..self } } @@ -409,9 +393,9 @@ pub struct ReplyDelBuilder<'a> { } impl TimestampBuilderTrait for ReplyDelBuilder<'_> { - fn with_timestamp(self, timestamp: Option) -> Self { + fn timestamp>>(self, timestamp: T) -> Self { Self { - sample_builder: self.sample_builder.with_timestamp(timestamp), + sample_builder: self.sample_builder.timestamp(timestamp), ..self } } @@ -419,25 +403,17 @@ impl TimestampBuilderTrait for ReplyDelBuilder<'_> { impl SampleBuilderTrait for ReplyDelBuilder<'_> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { - Self { - sample_builder: self.sample_builder.with_source_info(source_info), - ..self - } - } - - #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { - sample_builder: self.sample_builder.with_attachment_opt(attachment), + sample_builder: self.sample_builder.source_info(source_info), ..self } } #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - sample_builder: self.sample_builder.with_attachment(attachment), + sample_builder: self.sample_builder.attachment(attachment), ..self } } diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index d774e5e007..163ae2090a 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -263,6 +263,17 @@ mod attachment { } } } + #[zenoh_macros::unstable] + impl From for Option { + fn from(value: AttachmentBuilder) -> Self { + if value.inner.is_empty() { + None + } else { + Some(value.into()) + } + } + } + #[zenoh_macros::unstable] #[derive(Clone)] pub struct Attachment { diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 990586ca0f..2d7277506d 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -41,20 +41,17 @@ pub trait QoSBuilderTrait { } pub trait TimestampBuilderTrait { - /// Sets of clears timestamp - fn with_timestamp(self, timestamp: Option) -> Self; + /// Sets of clears timestamp + fn timestamp>>(self, timestamp: T) -> Self; } pub trait SampleBuilderTrait { /// Attach source information #[zenoh_macros::unstable] - fn with_source_info(self, source_info: SourceInfo) -> Self; - /// Attach or remove user-provided data in key-value format - #[zenoh_macros::unstable] - fn with_attachment_opt(self, attachment: Option) -> Self; + fn source_info(self, source_info: SourceInfo) -> Self; /// Attach user-provided data in key-value format #[zenoh_macros::unstable] - fn with_attachment(self, attachment: Attachment) -> Self; + fn attachment>>(self, attachment: T) -> Self; } pub trait ValueBuilderTrait { @@ -100,9 +97,9 @@ impl SampleBuilder { } impl TimestampBuilderTrait for SampleBuilder { - fn with_timestamp(self, timestamp: Option) -> Self { + fn timestamp>>(self, timestamp: T) -> Self { Self(Sample { - timestamp, + timestamp: timestamp.into(), ..self.0 }) } @@ -110,7 +107,7 @@ impl TimestampBuilderTrait for SampleBuilder { impl SampleBuilderTrait for SampleBuilder { #[zenoh_macros::unstable] - fn with_source_info(self, source_info: SourceInfo) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self(Sample { source_info, ..self.0 @@ -118,17 +115,12 @@ impl SampleBuilderTrait for SampleBuilder { } #[zenoh_macros::unstable] - fn with_attachment_opt(self, attachment: Option) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self(Sample { - attachment, + attachment: attachment.into(), ..self.0 }) } - - #[zenoh_macros::unstable] - fn with_attachment(self, attachment: Attachment) -> Self { - self.with_attachment_opt(Some(attachment)) - } } impl QoSBuilderTrait for SampleBuilder { @@ -194,23 +186,19 @@ impl PutSampleBuilder { } impl TimestampBuilderTrait for PutSampleBuilder { - fn with_timestamp(self, timestamp: Option) -> Self { - Self(self.0.with_timestamp(timestamp)) + fn timestamp>>(self, timestamp: T) -> Self { + Self(self.0.timestamp(timestamp)) } } impl SampleBuilderTrait for PutSampleBuilder { #[zenoh_macros::unstable] - fn with_source_info(self, source_info: SourceInfo) -> Self { - Self(self.0.with_source_info(source_info)) + fn source_info(self, source_info: SourceInfo) -> Self { + Self(self.0.source_info(source_info)) } #[zenoh_macros::unstable] - fn with_attachment(self, attachment: Attachment) -> Self { - Self(self.0.with_attachment(attachment)) - } - #[zenoh_macros::unstable] - fn with_attachment_opt(self, attachment: Option) -> Self { - Self(self.0.with_attachment_opt(attachment)) + fn attachment>>(self, attachment: T) -> Self { + Self(self.0.attachment(attachment)) } } @@ -288,23 +276,19 @@ impl DeleteSampleBuilder { } impl TimestampBuilderTrait for DeleteSampleBuilder { - fn with_timestamp(self, timestamp: Option) -> Self { - Self(self.0.with_timestamp(timestamp)) + fn timestamp>>(self, timestamp: T) -> Self { + Self(self.0.timestamp(timestamp)) } } impl SampleBuilderTrait for DeleteSampleBuilder { #[zenoh_macros::unstable] - fn with_source_info(self, source_info: SourceInfo) -> Self { - Self(self.0.with_source_info(source_info)) - } - #[zenoh_macros::unstable] - fn with_attachment(self, attachment: Attachment) -> Self { - Self(self.0.with_attachment(attachment)) + fn source_info(self, source_info: SourceInfo) -> Self { + Self(self.0.source_info(source_info)) } #[zenoh_macros::unstable] - fn with_attachment_opt(self, attachment: Option) -> Self { - Self(self.0.with_attachment_opt(attachment)) + fn attachment>>(self, attachment: T) -> Self { + Self(self.0.attachment(attachment)) } } diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index ba4c8a7d7c..e87fc5243b 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -38,22 +38,22 @@ fn pubsub() { } zenoh .put("test/attachment", "put") - .with_attachment( + .attachment(Some( backer .iter() .map(|b| (b.0.as_slice(), b.1.as_slice())) .collect(), - ) + )) .res() .unwrap(); publisher .put("publisher") - .with_attachment( + .attachment(Some( backer .iter() .map(|b| (b.0.as_slice(), b.1.as_slice())) .collect(), - ) + )) .res() .unwrap(); } @@ -84,7 +84,7 @@ fn queries() { query.key_expr().clone(), query.value().unwrap().payload.clone(), ) - .with_attachment(attachment) + .attachment(attachment) .res() .unwrap(); }) From fb6509df61afccf4cd983e460553e9f07ce77d25 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 13:21:24 +0100 Subject: [PATCH 064/598] into to encoding returned --- .../src/replica/storage.rs | 2 +- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/publication.rs | 14 ++++++++++---- zenoh/src/query.rs | 7 ++----- zenoh/src/queryable.rs | 7 ++----- zenoh/src/sample_builder.rs | 17 ++++++----------- zenoh/src/value.rs | 12 ++++++------ 7 files changed, 28 insertions(+), 33 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index de76ade51d..6d31c9710a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -719,7 +719,7 @@ fn construct_update(data: String) -> Update { for slice in result.3 { payload.push_zslice(slice.to_vec().into()); } - let value = Value::new(payload).with_encoding(result.2.into()); + let value = Value::new(payload).with_encoding(result.2); let data = StoredData { value, timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 9047e8b112..caeeb5c89b 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -426,7 +426,7 @@ impl Primitives for AdminSpace { parameters, value: query .ext_body - .map(|b| Value::from(b.payload).with_encoding(b.encoding.into())), + .map(|b| Value::from(b.payload).with_encoding(b.encoding)), qid: msg.id, zid, primitives, diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index cd68530bf7..0e93350222 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -199,8 +199,11 @@ impl SampleBuilderTrait for DeleteBuilder<'_, '_> { } impl ValueBuilderTrait for PutBuilder<'_, '_> { - fn with_encoding(self, encoding: Encoding) -> Self { - Self { encoding, ..self } + fn with_encoding>(self, encoding: T) -> Self { + Self { + encoding: encoding.into(), + ..self + } } fn with_payload(self, payload: IntoPayload) -> Self @@ -779,8 +782,11 @@ impl SampleBuilderTrait for PutPublication<'_> { } impl ValueBuilderTrait for PutPublication<'_> { - fn with_encoding(self, encoding: Encoding) -> Self { - Self { encoding, ..self } + fn with_encoding>(self, encoding: T) -> Self { + Self { + encoding: encoding.into(), + ..self + } } fn with_payload(self, payload: IntoPayload) -> Self diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 2d4e5e1ee3..05f9a3557f 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -169,15 +169,12 @@ impl QoSBuilderTrait for GetBuilder<'_, '_, DefaultHandler> { } impl ValueBuilderTrait for GetBuilder<'_, '_, Handler> { - fn with_encoding(self, encoding: Encoding) -> Self { + fn with_encoding>(self, encoding: T) -> Self { let value = Some(self.value.unwrap_or_default().with_encoding(encoding)); Self { value, ..self } } - fn with_payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { + fn with_payload>(self, payload: T) -> Self { let value = Some(self.value.unwrap_or_default().with_payload(payload)); Self { value, ..self } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 66cb34459b..4f478e1ce7 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -366,17 +366,14 @@ impl QoSBuilderTrait for ReplyBuilder<'_> { } impl ValueBuilderTrait for ReplyBuilder<'_> { - fn with_encoding(self, encoding: Encoding) -> Self { + fn with_encoding>(self, encoding: T) -> Self { Self { sample_builder: self.sample_builder.with_encoding(encoding), ..self } } - fn with_payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { + fn with_payload>(self, payload: T) -> Self { Self { sample_builder: self.sample_builder.with_payload(payload), ..self diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 2d7277506d..a113a9c953 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -41,7 +41,7 @@ pub trait QoSBuilderTrait { } pub trait TimestampBuilderTrait { - /// Sets of clears timestamp + /// Sets of clears timestamp fn timestamp>>(self, timestamp: T) -> Self; } @@ -56,11 +56,9 @@ pub trait SampleBuilderTrait { pub trait ValueBuilderTrait { /// Set the [`Encoding`] - fn with_encoding(self, encoding: Encoding) -> Self; + fn with_encoding>(self, encoding: T) -> Self; /// Sets the payload - fn with_payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into; + fn with_payload>(self, payload: T) -> Self; } #[derive(Debug)] @@ -215,16 +213,13 @@ impl QoSBuilderTrait for PutSampleBuilder { } impl ValueBuilderTrait for PutSampleBuilder { - fn with_encoding(self, encoding: Encoding) -> Self { + fn with_encoding>(self, encoding: T) -> Self { Self(SampleBuilder(Sample { - encoding, + encoding: encoding.into(), ..self.0 .0 })) } - fn with_payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { + fn with_payload>(self, payload: T) -> Self { Self(SampleBuilder(Sample { payload: payload.into(), ..self.0 .0 diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 2d98cbf398..2e288c64ad 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -46,13 +46,13 @@ impl Value { } impl ValueBuilderTrait for Value { - fn with_encoding(self, encoding: Encoding) -> Self { - Self { encoding, ..self } + fn with_encoding>(self, encoding: T) -> Self { + Self { + encoding: encoding.into(), + ..self + } } - fn with_payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { + fn with_payload>(self, payload: T) -> Self { Self { payload: payload.into(), ..self From 2ff6bc22f79f5ab373e1073ae5be1744b646ab49 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 13:23:17 +0100 Subject: [PATCH 065/598] example build fix --- examples/examples/z_pub.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 416ff31f46..7166981e72 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -39,12 +39,12 @@ async fn main() { println!("Putting Data ('{}': '{}')...", &key_expr, buf); let mut put = publisher.put(buf); if let Some(attachment) = &attachment { - put = put.attachment( + put = put.attachment(Some( attachment .split('&') .map(|pair| split_once(pair, '=')) .collect(), - ) + )) } put.res().await.unwrap(); } From 5bbef9c7d4643259a23cde58a20ea08f4a8a464f Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 13:24:50 +0100 Subject: [PATCH 066/598] with removed --- plugins/zenoh-plugin-rest/src/lib.rs | 10 ++-------- .../src/replica/align_queryable.rs | 2 +- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 10 +++++----- zenoh/src/net/runtime/adminspace.rs | 4 ++-- zenoh/src/publication.rs | 8 ++++---- zenoh/src/query.rs | 8 ++++---- zenoh/src/queryable.rs | 10 +++++----- zenoh/src/sample.rs | 2 +- zenoh/src/sample_builder.rs | 8 ++++---- zenoh/src/value.rs | 4 ++-- 11 files changed, 31 insertions(+), 37 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 74da23679f..c90bbe5ac1 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -420,7 +420,7 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result { @@ -464,13 +464,7 @@ async fn write(mut req: Request<(Arc, String)>) -> tide::Result { - session - .put(&key_expr, bytes) - .with_encoding(encoding) - .res() - .await - } + SampleKind::Put => session.put(&key_expr, bytes).encoding(encoding).res().await, SampleKind::Delete => session.delete(&key_expr).res().await, }; match res { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 973fb89abe..b2d2bdc399 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -129,7 +129,7 @@ impl AlignQueryable { AlignData::Data(k, (v, ts)) => { query .reply(k, v.payload) - .with_encoding(v.encoding) + .encoding(v.encoding) .timestamp(ts) .res() .await diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 9d5257e53f..6527d54c66 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -111,7 +111,7 @@ impl Aligner { payload, encoding, .. } = value; let sample = PutSampleBuilder::new(key, payload) - .with_encoding(encoding) + .encoding(encoding) .timestamp(ts) .res_sync(); log::debug!("[ALIGNER] Adding {:?} to storage", sample); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 6d31c9710a..8e60ee320e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -315,7 +315,7 @@ impl StorageService { payload, encoding, .. } = data.value; PutSampleBuilder::new(KeyExpr::from(k.clone()), payload) - .with_encoding(encoding) + .encoding(encoding) .timestamp(data.timestamp) .res_sync() } @@ -344,7 +344,7 @@ impl StorageService { .put( stripped_key, Value::new(sample_to_store.payload().clone()) - .with_encoding(sample_to_store.encoding().clone()), + .encoding(sample_to_store.encoding().clone()), *sample_to_store.timestamp().unwrap(), ) .await @@ -532,7 +532,7 @@ impl StorageService { } = entry.value; if let Err(e) = q .reply(key.clone(), payload) - .with_encoding(encoding) + .encoding(encoding) .timestamp(entry.timestamp) .res_async() .await @@ -567,7 +567,7 @@ impl StorageService { } = entry.value; if let Err(e) = q .reply(q.key_expr().clone(), payload) - .with_encoding(encoding) + .encoding(encoding) .timestamp(entry.timestamp) .res_async() .await @@ -719,7 +719,7 @@ fn construct_update(data: String) -> Update { for slice in result.3 { payload.push_zslice(slice.to_vec().into()); } - let value = Value::new(payload).with_encoding(result.2); + let value = Value::new(payload).encoding(result.2); let data = StoredData { value, timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index caeeb5c89b..070b3bcd3a 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -426,7 +426,7 @@ impl Primitives for AdminSpace { parameters, value: query .ext_body - .map(|b| Value::from(b.payload).with_encoding(b.encoding)), + .map(|b| Value::from(b.payload).encoding(b.encoding)), qid: msg.id, zid, primitives, @@ -578,7 +578,7 @@ fn router_data(context: &AdminContext, query: Query) { }; if let Err(e) = query .reply(reply_key, payload) - .with_encoding(Encoding::APPLICATION_JSON) + .encoding(Encoding::APPLICATION_JSON) .res_sync() { log::error!("Error sending AdminSpace reply: {:?}", e); diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 0e93350222..8f52d5e4fa 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -199,14 +199,14 @@ impl SampleBuilderTrait for DeleteBuilder<'_, '_> { } impl ValueBuilderTrait for PutBuilder<'_, '_> { - fn with_encoding>(self, encoding: T) -> Self { + fn encoding>(self, encoding: T) -> Self { Self { encoding: encoding.into(), ..self } } - fn with_payload(self, payload: IntoPayload) -> Self + fn payload(self, payload: IntoPayload) -> Self where IntoPayload: Into, { @@ -782,14 +782,14 @@ impl SampleBuilderTrait for PutPublication<'_> { } impl ValueBuilderTrait for PutPublication<'_> { - fn with_encoding>(self, encoding: T) -> Self { + fn encoding>(self, encoding: T) -> Self { Self { encoding: encoding.into(), ..self } } - fn with_payload(self, payload: IntoPayload) -> Self + fn payload(self, payload: IntoPayload) -> Self where IntoPayload: Into, { diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 05f9a3557f..837ed69f22 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -169,13 +169,13 @@ impl QoSBuilderTrait for GetBuilder<'_, '_, DefaultHandler> { } impl ValueBuilderTrait for GetBuilder<'_, '_, Handler> { - fn with_encoding>(self, encoding: T) -> Self { - let value = Some(self.value.unwrap_or_default().with_encoding(encoding)); + fn encoding>(self, encoding: T) -> Self { + let value = Some(self.value.unwrap_or_default().encoding(encoding)); Self { value, ..self } } - fn with_payload>(self, payload: T) -> Self { - let value = Some(self.value.unwrap_or_default().with_payload(payload)); + fn payload>(self, payload: T) -> Self { + let value = Some(self.value.unwrap_or_default().payload(payload)); Self { value, ..self } } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 4f478e1ce7..37f914d0e0 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -228,7 +228,7 @@ impl<'a> ReplySampleBuilder<'a> { query: self.query, sample_builder: self.sample_builder.into(), }; - builder.with_payload(payload) + builder.payload(payload) } pub fn delete(self) -> ReplyDelBuilder<'a> { ReplyDelBuilder { @@ -366,16 +366,16 @@ impl QoSBuilderTrait for ReplyBuilder<'_> { } impl ValueBuilderTrait for ReplyBuilder<'_> { - fn with_encoding>(self, encoding: T) -> Self { + fn encoding>(self, encoding: T) -> Self { Self { - sample_builder: self.sample_builder.with_encoding(encoding), + sample_builder: self.sample_builder.encoding(encoding), ..self } } - fn with_payload>(self, payload: T) -> Self { + fn payload>(self, payload: T) -> Self { Self { - sample_builder: self.sample_builder.with_payload(payload), + sample_builder: self.sample_builder.payload(payload), ..self } } diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 163ae2090a..813bc1c63e 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -570,7 +570,7 @@ impl Sample { impl From for Value { fn from(sample: Sample) -> Self { - Value::new(sample.payload).with_encoding(sample.encoding) + Value::new(sample.payload).encoding(sample.encoding) } } diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index a113a9c953..0996f17cf9 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -56,9 +56,9 @@ pub trait SampleBuilderTrait { pub trait ValueBuilderTrait { /// Set the [`Encoding`] - fn with_encoding>(self, encoding: T) -> Self; + fn encoding>(self, encoding: T) -> Self; /// Sets the payload - fn with_payload>(self, payload: T) -> Self; + fn payload>(self, payload: T) -> Self; } #[derive(Debug)] @@ -213,13 +213,13 @@ impl QoSBuilderTrait for PutSampleBuilder { } impl ValueBuilderTrait for PutSampleBuilder { - fn with_encoding>(self, encoding: T) -> Self { + fn encoding>(self, encoding: T) -> Self { Self(SampleBuilder(Sample { encoding: encoding.into(), ..self.0 .0 })) } - fn with_payload>(self, payload: T) -> Self { + fn payload>(self, payload: T) -> Self { Self(SampleBuilder(Sample { payload: payload.into(), ..self.0 .0 diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 2e288c64ad..6d4de1366c 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -46,13 +46,13 @@ impl Value { } impl ValueBuilderTrait for Value { - fn with_encoding>(self, encoding: T) -> Self { + fn encoding>(self, encoding: T) -> Self { Self { encoding: encoding.into(), ..self } } - fn with_payload>(self, payload: T) -> Self { + fn payload>(self, payload: T) -> Self { Self { payload: payload.into(), ..self From 9809799b36a6210a9e1f2bbb5e5314540ddb0589 Mon Sep 17 00:00:00 2001 From: Alexander Date: Thu, 28 Mar 2024 13:25:01 +0100 Subject: [PATCH 067/598] Add protocol version to error message (#871) --- io/zenoh-transport/src/unicast/establishment/accept.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index 72e676f6ec..7648f16e7d 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -167,9 +167,11 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Check if the version is supported if init_syn.version != input.mine_version { let e = zerror!( - "Rejecting InitSyn on {} because of unsupported Zenoh version from peer: {}", + "Rejecting InitSyn on {} because of unsupported Zenoh protocol version (expected: {}, received: {}) from: {}", self.link, - init_syn.zid + input.mine_version, + init_syn.version, + init_syn.zid, ); return Err((e.into(), Some(close::reason::INVALID))); } From c427ac732861fd775f1b275ca7948719f16fbad5 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 13:42:52 +0100 Subject: [PATCH 068/598] resolvable removed from simple builders --- .../src/replica/aligner.rs | 5 +- .../src/replica/storage.rs | 29 ++++----- zenoh-ext/src/querying_subscriber.rs | 5 +- zenoh/src/queryable.rs | 9 +-- zenoh/src/sample.rs | 18 ------ zenoh/src/sample_builder.rs | 60 ++++--------------- 6 files changed, 32 insertions(+), 94 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 6527d54c66..3a6cc0444d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -24,7 +24,6 @@ use zenoh::prelude::r#async::*; use zenoh::sample_builder::{PutSampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; use zenoh::time::Timestamp; use zenoh::Session; -use zenoh_core::{AsyncResolve, SyncResolve}; pub struct Aligner { session: Arc, @@ -113,7 +112,7 @@ impl Aligner { let sample = PutSampleBuilder::new(key, payload) .encoding(encoding) .timestamp(ts) - .res_sync(); + .into(); log::debug!("[ALIGNER] Adding {:?} to storage", sample); self.tx_sample.send_async(sample).await.unwrap_or_else(|e| { log::error!("[ALIGNER] Error adding sample to storage: {}", e) @@ -331,7 +330,7 @@ impl Aligner { .get(&selector) .consolidation(zenoh::query::ConsolidationMode::None) .accept_replies(zenoh::query::ReplyKeyExpr::Any) - .res_async() + .res() .await { Ok(replies) => { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 8e60ee320e..9e9f8914d0 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -24,6 +24,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::buffer::SplitBuffer; use zenoh::buffers::ZBuf; use zenoh::key_expr::KeyExpr; +use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; use zenoh::sample::{Sample, SampleKind}; use zenoh::sample_builder::{ @@ -34,7 +35,6 @@ use zenoh::value::Value; use zenoh::{Result as ZResult, Session, SessionDeclarations}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; -use zenoh_core::{AsyncResolve, SyncResolve}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; @@ -144,12 +144,7 @@ impl StorageService { t.add_async(gc).await; // subscribe on key_expr - let storage_sub = match self - .session - .declare_subscriber(&self.key_expr) - .res_async() - .await - { + let storage_sub = match self.session.declare_subscriber(&self.key_expr).res().await { Ok(storage_sub) => storage_sub, Err(e) => { log::error!("Error starting storage '{}': {}", self.name, e); @@ -162,7 +157,7 @@ impl StorageService { .session .declare_queryable(&self.key_expr) .complete(self.complete) - .res_async() + .res() .await { Ok(storage_queryable) => storage_queryable, @@ -239,7 +234,7 @@ impl StorageService { } }; let timestamp = sample.timestamp().cloned().unwrap_or(new_reception_timestamp()); - let sample = SampleBuilder::from(sample).timestamp(timestamp).res_sync(); + let sample = SampleBuilder::from(sample).timestamp(timestamp).into(); self.process_sample(sample).await; }, // on query on key_expr @@ -303,7 +298,7 @@ impl StorageService { ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store = match self + let sample_to_store: Sample = match self .ovderriding_wild_update(&k, sample.timestamp().unwrap()) .await { @@ -317,17 +312,17 @@ impl StorageService { PutSampleBuilder::new(KeyExpr::from(k.clone()), payload) .encoding(encoding) .timestamp(data.timestamp) - .res_sync() + .into() } Some(Update { kind: SampleKind::Delete, data, }) => DeleteSampleBuilder::new(KeyExpr::from(k.clone())) .timestamp(data.timestamp) - .res_sync(), + .into(), None => SampleBuilder::from(sample.clone()) .keyexpr(k.clone()) - .res_sync(), + .into(), }; let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { @@ -534,7 +529,7 @@ impl StorageService { .reply(key.clone(), payload) .encoding(encoding) .timestamp(entry.timestamp) - .res_async() + .res() .await { log::warn!( @@ -569,7 +564,7 @@ impl StorageService { .reply(q.key_expr().clone(), payload) .encoding(encoding) .timestamp(entry.timestamp) - .res_async() + .res() .await { log::warn!( @@ -584,7 +579,7 @@ impl StorageService { let err_message = format!("Storage '{}' raised an error on query: {}", self.name, e); log::warn!("{}", err_message); - if let Err(e) = q.reply_err(err_message).res_async().await { + if let Err(e) = q.reply_err(err_message).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -666,7 +661,7 @@ impl StorageService { .get(KeyExpr::from(&self.key_expr).with_parameters("_time=[..]")) .target(QueryTarget::All) .consolidation(ConsolidationMode::None) - .res_async() + .res() .await { Ok(replies) => replies, diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 52a4263396..728e9cfa51 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -665,8 +665,9 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. let timestamp = s.timestamp().cloned().unwrap_or(new_reception_timestamp()); - let s = SampleBuilder::from(s).timestamp(timestamp).res_sync(); - state.merge_queue.push(s); + state + .merge_queue + .push(SampleBuilder::from(s).timestamp(timestamp).into()); } } }; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 37f914d0e0..a52c96c871 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -294,8 +294,7 @@ impl Resolvable for ReplySampleBuilder<'_> { impl SyncResolve for ReplySampleBuilder<'_> { fn res_sync(self) -> ::To { - let sample = self.sample_builder.res_sync(); - self.query._reply_sample(sample) + self.query._reply_sample(self.sample_builder.into()) } } @@ -453,8 +452,7 @@ impl<'a> Resolvable for ReplyBuilder<'a> { impl SyncResolve for ReplyBuilder<'_> { fn res_sync(self) -> ::To { - let sample = self.sample_builder.res_sync(); - self.query._reply_sample(sample) + self.query._reply_sample(self.sample_builder.into()) } } @@ -464,8 +462,7 @@ impl<'a> Resolvable for ReplyDelBuilder<'a> { impl SyncResolve for ReplyDelBuilder<'_> { fn res_sync(self) -> ::To { - let sample = self.sample_builder.res_sync(); - self.query._reply_sample(sample) + self.query._reply_sample(self.sample_builder.into()) } } diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 813bc1c63e..870b25768e 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -22,7 +22,6 @@ use crate::Priority; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::network::declare::ext::QoSType; use zenoh_protocol::{core::CongestionControl, zenoh}; @@ -595,23 +594,6 @@ impl From for QoS { } } -impl Resolvable for QoSBuilder { - type To = QoS; -} - -impl SyncResolve for QoSBuilder { - fn res_sync(self) -> ::To { - self.0 - } -} - -impl AsyncResolve for QoSBuilder { - type Future = futures::future::Ready; - fn res_async(self) -> Self::Future { - futures::future::ready(self.0) - } -} - impl QoSBuilderTrait for QoSBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { let mut inner = self.0.inner; diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 0996f17cf9..f74afdf2b3 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -24,9 +24,6 @@ use crate::Sample; use crate::SampleKind; use uhlc::Timestamp; use zenoh_core::zresult; -use zenoh_core::AsyncResolve; -use zenoh_core::Resolvable; -use zenoh_core::SyncResolve; use zenoh_protocol::core::CongestionControl; pub trait QoSBuilderTrait { @@ -124,17 +121,17 @@ impl SampleBuilderTrait for SampleBuilder { impl QoSBuilderTrait for SampleBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { let qos: QoSBuilder = self.0.qos.into(); - let qos = qos.congestion_control(congestion_control).res_sync(); + let qos = qos.congestion_control(congestion_control).into(); Self(Sample { qos, ..self.0 }) } fn priority(self, priority: Priority) -> Self { let qos: QoSBuilder = self.0.qos.into(); - let qos = qos.priority(priority).res_sync(); + let qos = qos.priority(priority).into(); Self(Sample { qos, ..self.0 }) } fn express(self, is_express: bool) -> Self { let qos: QoSBuilder = self.0.qos.into(); - let qos = qos.express(is_express).res_sync(); + let qos = qos.express(is_express).into(); Self(Sample { qos, ..self.0 }) } } @@ -325,53 +322,20 @@ impl TryFrom for DeleteSampleBuilder { } } -impl Resolvable for SampleBuilder { - type To = Sample; -} - -impl Resolvable for PutSampleBuilder { - type To = Sample; -} - -impl Resolvable for DeleteSampleBuilder { - type To = Sample; -} - -impl SyncResolve for SampleBuilder { - fn res_sync(self) -> Self::To { - self.0 - } -} - -impl SyncResolve for PutSampleBuilder { - fn res_sync(self) -> Self::To { - self.0.res_sync() - } -} - -impl SyncResolve for DeleteSampleBuilder { - fn res_sync(self) -> Self::To { - self.0.res_sync() - } -} - -impl AsyncResolve for SampleBuilder { - type Future = futures::future::Ready; - fn res_async(self) -> Self::Future { - futures::future::ready(self.0) +impl From for Sample { + fn from(sample_builder: SampleBuilder) -> Self { + sample_builder.0 } } -impl AsyncResolve for PutSampleBuilder { - type Future = futures::future::Ready; - fn res_async(self) -> Self::Future { - self.0.res_async() +impl From for Sample { + fn from(put_sample_builder: PutSampleBuilder) -> Self { + put_sample_builder.0 .0 } } -impl AsyncResolve for DeleteSampleBuilder { - type Future = futures::future::Ready; - fn res_async(self) -> Self::Future { - self.0.res_async() +impl From for Sample { + fn from(delete_sample_builder: DeleteSampleBuilder) -> Self { + delete_sample_builder.0 .0 } } From 6c6050b477a9f69040bd0f67748e15b7eeca242a Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 28 Mar 2024 15:15:26 +0100 Subject: [PATCH 069/598] Fix cargo clippy --- zenoh/tests/connection_retry.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index db84d7bd5d..4f789e6db1 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -36,9 +36,7 @@ fn retry_config_overriding() { .insert_json5("listen/exit_on_failure", "false") .unwrap(); - let expected = vec![ - // global value - ConnectionRetryConf { + let expected = [ConnectionRetryConf { period_init_ms: 3000, period_max_ms: 6000, period_increase_factor: 1.5, @@ -57,8 +55,7 @@ fn retry_config_overriding() { period_max_ms: 60000, period_increase_factor: 15., exit_on_failure: true, - }, - ]; + }]; for (i, endpoint) in config.listen().endpoints().iter().enumerate() { let retry_config = zenoh_config::get_retry_config(&config, Some(endpoint), true); From 1a2ba1a75358d3703265dccbb3707680988a2647 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 28 Mar 2024 15:21:43 +0100 Subject: [PATCH 070/598] Fix code format --- zenoh/tests/connection_retry.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index 4f789e6db1..fcb071b489 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -36,7 +36,8 @@ fn retry_config_overriding() { .insert_json5("listen/exit_on_failure", "false") .unwrap(); - let expected = [ConnectionRetryConf { + let expected = [ + ConnectionRetryConf { period_init_ms: 3000, period_max_ms: 6000, period_increase_factor: 1.5, @@ -55,7 +56,8 @@ fn retry_config_overriding() { period_max_ms: 60000, period_increase_factor: 15., exit_on_failure: true, - }]; + }, + ]; for (i, endpoint) in config.listen().endpoints().iter().enumerate() { let retry_config = zenoh_config::get_retry_config(&config, Some(endpoint), true); From 7162ff13f34a27ff7455b447536522adc23bf7a5 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 28 Mar 2024 15:43:35 +0100 Subject: [PATCH 071/598] Fix cargo clippy --- zenoh/src/sample.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 5f0234f723..2af8fb7106 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -69,8 +69,12 @@ pub struct SourceInfo { #[test] #[cfg(feature = "unstable")] -#[cfg(not(all(target_os = "macos", target_arch = "aarch64")))] fn source_info_stack_size() { + use crate::{ + sample::{SourceInfo, SourceSn}, + ZenohId, + }; + assert_eq!(std::mem::size_of::(), 16); assert_eq!(std::mem::size_of::>(), 17); assert_eq!(std::mem::size_of::>(), 16); From 10baf8c9cf6050dc6c7f682a3d444710fdb93aea Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 16:04:11 +0100 Subject: [PATCH 072/598] doctests fixed --- zenoh/src/publication.rs | 6 +++--- zenoh/src/session.rs | 5 +++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 8f52d5e4fa..f8f15eca56 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -73,12 +73,12 @@ pub struct DeleteBuilder<'a, 'b> { /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; -/// use zenoh::sample_builder::{PutSampleBuilderTrait, QoSBuilderTrait}; +/// use zenoh::sample_builder::{ValueBuilderTrait, QoSBuilderTrait}; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session /// .put("key/expression", "payload") -/// .with_encoding(Encoding::TEXT_PLAIN) +/// .encoding(Encoding::TEXT_PLAIN) /// .congestion_control(CongestionControl::Block) /// .res() /// .await @@ -932,7 +932,7 @@ impl<'a> Sink for Publisher<'a> { /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; -/// use zenoh::sample_builder::{PutSampleBuilderTrait, QoSBuilderTrait}; +/// use zenoh::sample_builder::QoSBuilderTrait; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let publisher = session diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 5b80adb0e5..cc30e12293 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -683,12 +683,13 @@ impl Session { /// ``` /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; - /// use zenoh::sample_builder::PutSampleBuilderTrait; + /// use zenoh::sample_builder::SampleBuilderTrait; + /// use zenoh::sample_builder::ValueBuilderTrait; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session /// .put("key/expression", "payload") - /// .with_encoding(Encoding::TEXT_PLAIN) + /// .encoding(Encoding::TEXT_PLAIN) /// .res() /// .await /// .unwrap(); From 48cb96ba7ab43c13a212fe4bb5943edb38089b9b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 16:41:55 +0100 Subject: [PATCH 073/598] sample bulider in separarte module --- zenoh/src/{sample_builder.rs => sample/builder.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{sample_builder.rs => sample/builder.rs} (100%) diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample/builder.rs similarity index 100% rename from zenoh/src/sample_builder.rs rename to zenoh/src/sample/builder.rs From ddb93a2364bbe4db227d54b1107539b717fa0d83 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 16:42:05 +0100 Subject: [PATCH 074/598] separate module --- examples/examples/z_ping.rs | 2 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_pub.rs | 2 +- examples/examples/z_pub_shm_thr.rs | 2 +- examples/examples/z_pub_thr.rs | 2 +- plugins/zenoh-plugin-rest/examples/z_serve_sse.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 2 +- .../src/replica/align_queryable.rs | 4 ++-- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 4 ++-- zenoh-ext/src/group.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/lib.rs | 1 - zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/prelude.rs | 4 ++++ zenoh/src/publication.rs | 10 +++++----- zenoh/src/query.rs | 2 +- zenoh/src/queryable.rs | 4 ++-- zenoh/src/sample.rs | 4 +++- zenoh/src/session.rs | 3 +-- zenoh/src/value.rs | 2 +- zenoh/tests/attachments.rs | 4 ++-- zenoh/tests/qos.rs | 2 +- zenoh/tests/routing.rs | 2 +- zenoh/tests/session.rs | 2 +- zenoh/tests/unicity.rs | 2 +- 26 files changed, 38 insertions(+), 34 deletions(-) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index b40afc1f53..59bcaddadc 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -16,7 +16,7 @@ use std::time::{Duration, Instant}; use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index 0003958b5d..e0fa079629 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -15,7 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 7166981e72..c4c592b47c 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -16,7 +16,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::SampleBuilderTrait; +use zenoh::sample::builder::SampleBuilderTrait; use zenoh_examples::CommonArgs; #[async_std::main] diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 5230ea3ce6..a784429906 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -15,7 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh::shm::SharedMemoryManager; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 7e7c1ac9b5..78d54111a8 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -16,7 +16,7 @@ use clap::Parser; use std::convert::TryInto; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 48f152e488..c353826fab 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -15,7 +15,7 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh::{config::Config, key_expr::keyexpr}; const HTML: &str = r#" diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index c90bbe5ac1..94796c518d 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -34,7 +34,7 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; -use zenoh::sample_builder::ValueBuilderTrait; +use zenoh::sample::builder::ValueBuilderTrait; use zenoh::selector::TIME_RANGE_KEY; use zenoh::Session; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index b2d2bdc399..729572601c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -20,8 +20,8 @@ use std::str; use std::str::FromStr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::TimestampBuilderTrait; -use zenoh::sample_builder::ValueBuilderTrait; +use zenoh::sample::builder::TimestampBuilderTrait; +use zenoh::sample::builder::ValueBuilderTrait; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 3a6cc0444d..1b7f945cee 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -21,7 +21,7 @@ use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::{PutSampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; +use zenoh::sample::builder::{PutSampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 9e9f8914d0..62468ac6a1 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -26,10 +26,10 @@ use zenoh::buffers::ZBuf; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; -use zenoh::sample::{Sample, SampleKind}; -use zenoh::sample_builder::{ +use zenoh::sample::builder::{ DeleteSampleBuilder, PutSampleBuilder, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait, }; +use zenoh::sample::{Sample, SampleKind}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; use zenoh::{Result as ZResult, Session, SessionDeclarations}; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 39cd982c41..4ae3c77c9f 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -29,7 +29,7 @@ use zenoh::payload::PayloadReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh::Error as ZError; use zenoh::Result as ZResult; use zenoh::Session; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 728e9cfa51..5e80cb704c 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -20,7 +20,7 @@ use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample_builder::{SampleBuilder, TimestampBuilderTrait}; +use zenoh::sample::builder::{SampleBuilder, TimestampBuilderTrait}; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 24b21496ec..ed2f01f180 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -145,7 +145,6 @@ pub mod publication; pub mod query; pub mod queryable; pub mod sample; -pub mod sample_builder; pub mod subscriber; pub mod value; #[cfg(feature = "shared-memory")] diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 070b3bcd3a..41295f6cd0 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -20,7 +20,7 @@ use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::SyncResolve; use crate::queryable::Query; use crate::queryable::QueryInner; -use crate::sample_builder::ValueBuilderTrait; +use crate::sample::builder::ValueBuilderTrait; use crate::value::Value; use async_std::task; use log::{error, trace}; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 26c93e1801..850148f506 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -60,6 +60,10 @@ pub(crate) mod common { #[zenoh_macros::unstable] pub use crate::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; + + pub use crate::sample::builder::{ + QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, + }; } /// Prelude to import when using Zenoh's sync API. diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index f8f15eca56..d2463610fb 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -15,12 +15,12 @@ //! Publishing primitives. use crate::net::primitives::Primitives; use crate::prelude::*; +use crate::sample::builder::{ + QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, +}; #[zenoh_macros::unstable] use crate::sample::Attachment; use crate::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; -use crate::sample_builder::{ - QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, -}; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] @@ -73,7 +73,7 @@ pub struct DeleteBuilder<'a, 'b> { /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; -/// use zenoh::sample_builder::{ValueBuilderTrait, QoSBuilderTrait}; +/// use zenoh::sample::builder::{ValueBuilderTrait, QoSBuilderTrait}; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session @@ -932,7 +932,7 @@ impl<'a> Sink for Publisher<'a> { /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; -/// use zenoh::sample_builder::QoSBuilderTrait; +/// use zenoh::sample::builder::QoSBuilderTrait; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let publisher = session diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 837ed69f22..3a7ee771b3 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -15,10 +15,10 @@ //! Query primitives. use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; +use crate::sample::builder::{QoSBuilderTrait, SampleBuilderTrait, ValueBuilderTrait}; #[zenoh_macros::unstable] use crate::sample::Attachment; use crate::sample::QoSBuilder; -use crate::sample_builder::{QoSBuilderTrait, SampleBuilderTrait, ValueBuilderTrait}; use crate::Session; use std::collections::HashMap; use std::future::Ready; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index a52c96c871..2e3a1f585a 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -18,11 +18,11 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::SourceInfo; -use crate::sample_builder::{ +use crate::sample::builder::{ DeleteSampleBuilder, PutSampleBuilder, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; +use crate::sample::SourceInfo; use crate::Id; use crate::SessionRef; use crate::Undeclarable; diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 870b25768e..455d54318b 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -16,7 +16,7 @@ use crate::encoding::Encoding; use crate::payload::Payload; use crate::prelude::{KeyExpr, Value}; -use crate::sample_builder::{QoSBuilderTrait, ValueBuilderTrait}; +use crate::sample::builder::{QoSBuilderTrait, ValueBuilderTrait}; use crate::time::Timestamp; use crate::Priority; #[zenoh_macros::unstable] @@ -26,6 +26,8 @@ use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::network::declare::ext::QoSType; use zenoh_protocol::{core::CongestionControl, zenoh}; +pub mod builder; + pub type SourceSn = u64; /// The locality of samples to be received by subscribers or targeted by publishers. diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index cc30e12293..2f24673b5e 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -683,8 +683,7 @@ impl Session { /// ``` /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; - /// use zenoh::sample_builder::SampleBuilderTrait; - /// use zenoh::sample_builder::ValueBuilderTrait; + /// use zenoh::prelude::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 6d4de1366c..8ea5aef19f 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -13,7 +13,7 @@ // //! Value primitives. -use crate::{encoding::Encoding, payload::Payload, sample_builder::ValueBuilderTrait}; +use crate::{encoding::Encoding, payload::Payload, sample::builder::ValueBuilderTrait}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index e87fc5243b..f50e33cf6f 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,7 +14,7 @@ #[cfg(feature = "unstable")] #[test] fn pubsub() { - use zenoh::{prelude::sync::*, sample_builder::SampleBuilderTrait}; + use zenoh::{prelude::sync::*, sample::builder::SampleBuilderTrait}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh @@ -61,7 +61,7 @@ fn pubsub() { #[cfg(feature = "unstable")] #[test] fn queries() { - use zenoh::{prelude::sync::*, sample::Attachment, sample_builder::SampleBuilderTrait}; + use zenoh::{prelude::sync::*, sample::builder::SampleBuilderTrait, sample::Attachment}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 8dc39423cb..46896e5432 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -15,7 +15,7 @@ use async_std::prelude::FutureExt; use async_std::task; use std::time::Duration; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh::{publication::Priority, SessionDeclarations}; use zenoh_core::zasync_executor_init; diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 123550852e..6585f8aae4 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -20,7 +20,7 @@ use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh::{value::Value, Result}; use zenoh_core::zasync_executor_init; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 955ec7a73f..436643ac25 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -17,7 +17,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh_core::zasync_executor_init; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 3d1327398d..80f722205b 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -17,7 +17,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh_core::zasync_executor_init; const TIMEOUT: Duration = Duration::from_secs(60); From ab96aab5345e7556c0c6ae1329c46efe45a31b63 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 16:53:43 +0100 Subject: [PATCH 075/598] SampleBuilder put/delete --- .../src/replica/aligner.rs | 4 +-- .../src/replica/storage.rs | 8 +++--- zenoh/src/queryable.rs | 4 +-- zenoh/src/sample/builder.rs | 27 +++++++++---------- 4 files changed, 20 insertions(+), 23 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 1b7f945cee..5121f0b445 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -21,7 +21,7 @@ use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::{PutSampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; +use zenoh::sample::builder::{SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; use zenoh::time::Timestamp; use zenoh::Session; @@ -109,7 +109,7 @@ impl Aligner { let Value { payload, encoding, .. } = value; - let sample = PutSampleBuilder::new(key, payload) + let sample = SampleBuilder::put(key, payload) .encoding(encoding) .timestamp(ts) .into(); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 62468ac6a1..feebfb588a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -26,9 +26,7 @@ use zenoh::buffers::ZBuf; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; -use zenoh::sample::builder::{ - DeleteSampleBuilder, PutSampleBuilder, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait, -}; +use zenoh::sample::builder::{SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; use zenoh::sample::{Sample, SampleKind}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; @@ -309,7 +307,7 @@ impl StorageService { let Value { payload, encoding, .. } = data.value; - PutSampleBuilder::new(KeyExpr::from(k.clone()), payload) + SampleBuilder::put(KeyExpr::from(k.clone()), payload) .encoding(encoding) .timestamp(data.timestamp) .into() @@ -317,7 +315,7 @@ impl StorageService { Some(Update { kind: SampleKind::Delete, data, - }) => DeleteSampleBuilder::new(KeyExpr::from(k.clone())) + }) => SampleBuilder::delete(KeyExpr::from(k.clone())) .timestamp(data.timestamp) .into(), None => SampleBuilder::from(sample.clone()) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 2e3a1f585a..c9492394c4 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -137,8 +137,8 @@ impl Query { IntoKeyExpr: Into>, IntoPayload: Into, { - let sample_builder = PutSampleBuilder::new(key_expr, payload) - .with_qos(response::ext::QoSType::RESPONSE.into()); + let sample_builder = + SampleBuilder::put(key_expr, payload).with_qos(response::ext::QoSType::RESPONSE.into()); ReplyBuilder { query: self, sample_builder, diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index f74afdf2b3..8c507c8119 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -62,22 +62,21 @@ pub trait ValueBuilderTrait { pub struct SampleBuilder(Sample); impl SampleBuilder { - pub fn new(key_expr: IntoKeyExpr) -> Self + pub fn put( + key_expr: IntoKeyExpr, + payload: IntoPayload, + ) -> PutSampleBuilder where IntoKeyExpr: Into>, + IntoPayload: Into, { - Self(Sample { - key_expr: key_expr.into(), - payload: Payload::empty(), - kind: SampleKind::default(), - encoding: Encoding::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - }) + PutSampleBuilder::new(key_expr, payload) + } + pub fn delete(key_expr: IntoKeyExpr) -> DeleteSampleBuilder + where + IntoKeyExpr: Into>, + { + DeleteSampleBuilder::new(key_expr) } /// Allows to change keyexpr of [`Sample`] pub fn keyexpr(self, key_expr: IntoKeyExpr) -> Self @@ -149,7 +148,7 @@ impl From for PutSampleBuilder { } impl PutSampleBuilder { - pub fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self + fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self where IntoKeyExpr: Into>, IntoPayload: Into, From 82c1c999d0f73cc2cc09121e56067591971f5146 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 17:28:56 +0100 Subject: [PATCH 076/598] set value api --- examples/examples/z_get.rs | 21 +++++++----- plugins/zenoh-plugin-rest/src/lib.rs | 2 +- zenoh/src/publication.rs | 17 ++++++++++ zenoh/src/query.rs | 49 ++++++++++++---------------- zenoh/src/queryable.rs | 7 ++++ zenoh/src/sample/builder.rs | 12 +++++++ zenoh/src/value.rs | 18 ++++++++++ zenoh/tests/attachments.rs | 6 ++-- zenoh/tests/handler.rs | 4 +-- 9 files changed, 93 insertions(+), 43 deletions(-) diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index dce74d367b..074f931eff 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -28,15 +28,18 @@ async fn main() { let session = zenoh::open(config).res().await.unwrap(); println!("Sending Query '{selector}'..."); - let replies = match value { - Some(value) => session.get(&selector).with_value(value), - None => session.get(&selector), - } - .target(target) - .timeout(timeout) - .res() - .await - .unwrap(); + // let replies = match value { + // Some(value) => session.get(&selector).payload(value), + // None => session.get(&selector), + // } + let replies = session + .get(&selector) + .value(value.map(Value::from)) + .target(target) + .timeout(timeout) + .res() + .await + .unwrap(); while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 94796c518d..f78c541eff 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -420,7 +420,7 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result { diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index d2463610fb..103a65e782 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -215,6 +215,14 @@ impl ValueBuilderTrait for PutBuilder<'_, '_> { ..self } } + fn value>(self, value: T) -> Self { + let Value { payload, encoding } = value.into(); + Self { + payload, + encoding, + ..self + } + } } impl PutBuilder<'_, '_> { @@ -798,6 +806,15 @@ impl ValueBuilderTrait for PutPublication<'_> { ..self } } + + fn value>(self, value: T) -> Self { + let Value { payload, encoding } = value.into(); + Self { + payload, + encoding, + ..self + } + } } impl TimestampBuilderTrait for DeletePublication<'_> { diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 3a7ee771b3..5a1d443463 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -178,6 +178,13 @@ impl ValueBuilderTrait for GetBuilder<'_, '_, Handler> { let value = Some(self.value.unwrap_or_default().payload(payload)); Self { value, ..self } } + fn value>(self, value: T) -> Self { + let value: Value = value.into(); + Self { + value: if value.is_empty() { None } else { Some(value) }, + ..self + } + } } impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { @@ -328,48 +335,34 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { /// Change the target of the query. #[inline] - pub fn target(mut self, target: QueryTarget) -> Self { - self.target = target; - self + pub fn target(self, target: QueryTarget) -> Self { + Self { target, ..self } } /// Change the consolidation mode of the query. #[inline] - pub fn consolidation>(mut self, consolidation: QC) -> Self { - self.consolidation = consolidation.into(); - self + pub fn consolidation>(self, consolidation: QC) -> Self { + Self { + consolidation: consolidation.into(), + ..self + } } /// Restrict the matching queryables that will receive the query /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { - self.destination = destination; - self + pub fn allowed_destination(self, destination: Locality) -> Self { + Self { + destination, + ..self + } } /// Set query timeout. #[inline] - pub fn timeout(mut self, timeout: Duration) -> Self { - self.timeout = timeout; - self - } - - /// Set query value. - #[inline] - pub fn with_value(mut self, value: IntoValue) -> Self - where - IntoValue: Into, - { - self.value = Some(value.into()); - self - } - - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.attachment = Some(attachment); - self + pub fn timeout(self, timeout: Duration) -> Self { + Self { timeout, ..self } } /// By default, `get` guarantees that it will only receive replies whose key expressions intersect diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index c9492394c4..aa5f041a2b 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -378,6 +378,13 @@ impl ValueBuilderTrait for ReplyBuilder<'_> { ..self } } + fn value>(self, value: T) -> Self { + let Value { payload, encoding } = value.into(); + Self { + sample_builder: self.sample_builder.payload(payload).encoding(encoding), + ..self + } + } } /// A builder returned by [`Query::reply_del()`](Query::reply) diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 8c507c8119..1bd50e7f69 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -22,6 +22,7 @@ use crate::Payload; use crate::Priority; use crate::Sample; use crate::SampleKind; +use crate::Value; use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; @@ -56,6 +57,9 @@ pub trait ValueBuilderTrait { fn encoding>(self, encoding: T) -> Self; /// Sets the payload fn payload>(self, payload: T) -> Self; + /// Sets both payload and encoding at once. + /// This is convenient for passing user type which supports `Into` when both payload and encoding depends on user type + fn value>(self, value: T) -> Self; } #[derive(Debug)] @@ -221,6 +225,14 @@ impl ValueBuilderTrait for PutSampleBuilder { ..self.0 .0 })) } + fn value>(self, value: T) -> Self { + let Value { payload, encoding } = value.into(); + Self(SampleBuilder(Sample { + payload, + encoding, + ..self.0 .0 + })) + } } #[derive(Debug)] diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 8ea5aef19f..92a87cb6c5 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -43,6 +43,11 @@ impl Value { encoding: Encoding::default(), } } + /// Checks if the [`Value`] is empty. + /// Value is considered empty if its payload is empty and encoding is default. + pub fn is_empty(&self) -> bool { + self.payload.is_empty() && self.encoding == Encoding::default() + } } impl ValueBuilderTrait for Value { @@ -58,6 +63,10 @@ impl ValueBuilderTrait for Value { ..self } } + fn value>(self, value: T) -> Self { + let Value { payload, encoding } = value.into(); + Self { payload, encoding } + } } impl From for Value @@ -72,6 +81,15 @@ where } } +impl From> for Value +where + T: Into, +{ + fn from(t: Option) -> Self { + t.map_or_else(Value::empty, Into::into) + } +} + impl Default for Value { fn default() -> Self { Value::empty() diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index f50e33cf6f..2725351ab0 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -100,13 +100,13 @@ fn queries() { } let get = zenoh .get("test/attachment") - .with_value("query") - .with_attachment( + .payload("query") + .attachment(Some( backer .iter() .map(|b| (b.0.as_slice(), b.1.as_slice())) .collect(), - ) + )) .res() .unwrap(); while let Ok(reply) = get.recv() { diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index c1e912fc75..ceed15e2c3 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -57,12 +57,12 @@ fn query_with_ringbuffer() { let _reply1 = zenoh .get("test/ringbuffer_query") - .with_value("query1") + .payload("query1") .res() .unwrap(); let _reply2 = zenoh .get("test/ringbuffer_query") - .with_value("query2") + .payload("query2") .res() .unwrap(); From b5a1f6b1eb3fd3310f233d54abc9135449d4630a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 17:32:48 +0100 Subject: [PATCH 077/598] with removed --- zenoh/src/queryable.rs | 2 +- zenoh/src/sample/builder.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index aa5f041a2b..aec45c46df 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -138,7 +138,7 @@ impl Query { IntoPayload: Into, { let sample_builder = - SampleBuilder::put(key_expr, payload).with_qos(response::ext::QoSType::RESPONSE.into()); + SampleBuilder::put(key_expr, payload).qos(response::ext::QoSType::RESPONSE.into()); ReplyBuilder { query: self, sample_builder, diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 1bd50e7f69..920bd2b7b7 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -171,14 +171,14 @@ impl PutSampleBuilder { })) } /// Allows to change keyexpr of [`Sample`] - pub fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + pub fn keyexpr(self, key_expr: IntoKeyExpr) -> Self where IntoKeyExpr: Into>, { Self(self.0.keyexpr(key_expr)) } // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. - pub(crate) fn with_qos(self, qos: QoS) -> Self { + pub(crate) fn qos(self, qos: QoS) -> Self { Self(SampleBuilder(Sample { qos, ..self.0 .0 })) } } From 1c9515704f25020468a22bf0dfe52d8cc0fb17cb Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 17:37:48 +0100 Subject: [PATCH 078/598] commented code removed --- examples/examples/z_get.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 074f931eff..259137ee4a 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -28,10 +28,6 @@ async fn main() { let session = zenoh::open(config).res().await.unwrap(); println!("Sending Query '{selector}'..."); - // let replies = match value { - // Some(value) => session.get(&selector).payload(value), - // None => session.get(&selector), - // } let replies = session .get(&selector) .value(value.map(Value::from)) From d9eb96a8d86c232513f6c93b1d8a3d2f57ef9f1a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 17:45:07 +0100 Subject: [PATCH 079/598] map-from removed --- examples/examples/z_get.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 259137ee4a..542f94ba63 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -30,7 +30,7 @@ async fn main() { println!("Sending Query '{selector}'..."); let replies = session .get(&selector) - .value(value.map(Value::from)) + .value(value) .target(target) .timeout(timeout) .res() From e4501f403f11837a9d143dc9f3f91801498b33fa Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 18:01:14 +0100 Subject: [PATCH 080/598] build warnings fixed --- zenoh/tests/routing.rs | 16 +--------------- zenoh/tests/session.rs | 1 - zenoh/tests/unicity.rs | 1 - 3 files changed, 1 insertion(+), 17 deletions(-) diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 056680ffd4..830f22a475 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -11,29 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::FutureExt; -use futures::future::try_join_all; -use futures::FutureExt as _; use std::str::FromStr; use std::sync::atomic::Ordering; use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; -use std::{ - str::FromStr, - sync::{atomic::AtomicUsize, atomic::Ordering, Arc}, - time::Duration, -}; use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; use zenoh::sample::builder::QoSBuilderTrait; -use zenoh::{ - config::{Config, ModeDependentValue}, - prelude::r#async::*, - Result, -}; -use zenoh::{value::Value, Result}; -use zenoh_core::zasync_executor_init; +use zenoh::Result; use zenoh_core::ztimeout; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; use zenoh_result::bail; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 640f23da52..cd7335c28e 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -16,7 +16,6 @@ use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::sample::builder::QoSBuilderTrait; -use zenoh_core::zasync_executor_init; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index f92a26d6c0..a71a0a8034 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -17,7 +17,6 @@ use std::time::Duration; use tokio::runtime::Handle; use zenoh::prelude::r#async::*; use zenoh::sample::builder::QoSBuilderTrait; -use zenoh_core::zasync_executor_init; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); From 1562a17b7a8a515e4a4ef98be7b23e9da47fbd48 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 28 Mar 2024 19:14:00 +0100 Subject: [PATCH 081/598] Protocol interest (#870) * Add InterestId in Declare message * Improve comments * Update commons/zenoh-protocol/src/network/declare.rs Co-authored-by: Mahmoud Mazouz * Update commons/zenoh-protocol/src/network/declare.rs Co-authored-by: Mahmoud Mazouz --------- Co-authored-by: Mahmoud Mazouz --- commons/zenoh-codec/src/network/declare.rs | 17 ++++++++++++++++- commons/zenoh-protocol/src/network/declare.rs | 17 +++++++++++------ zenoh/src/key_expr.rs | 1 + zenoh/src/net/routing/dispatcher/resource.rs | 1 + zenoh/src/net/routing/hat/client/pubsub.rs | 4 ++++ zenoh/src/net/routing/hat/client/queries.rs | 3 +++ .../net/routing/hat/linkstate_peer/pubsub.rs | 6 ++++++ .../net/routing/hat/linkstate_peer/queries.rs | 6 ++++++ zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 4 ++++ zenoh/src/net/routing/hat/p2p_peer/queries.rs | 3 +++ zenoh/src/net/routing/hat/router/pubsub.rs | 10 ++++++++++ zenoh/src/net/routing/hat/router/queries.rs | 10 ++++++++++ zenoh/src/net/runtime/adminspace.rs | 3 +++ zenoh/src/net/tests/tables.rs | 5 +++++ zenoh/src/session.rs | 7 +++++++ 15 files changed, 90 insertions(+), 7 deletions(-) diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index c81514ab3e..d7a25ea0a9 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -95,6 +95,7 @@ where fn write(self, writer: &mut W, x: &Declare) -> Self::Output { let Declare { + interest_id, ext_qos, ext_tstamp, ext_nodeid, @@ -103,6 +104,9 @@ where // Header let mut header = id::DECLARE; + if x.interest_id.is_some() { + header |= declare::flag::I; + } let mut n_exts = ((ext_qos != &declare::ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) + ((ext_nodeid != &declare::ext::NodeIdType::DEFAULT) as u8); @@ -111,6 +115,11 @@ where } self.write(&mut *writer, header)?; + // Body + if let Some(interest_id) = interest_id { + self.write(&mut *writer, interest_id)?; + } + // Extensions if ext_qos != &declare::ext::QoSType::DEFAULT { n_exts -= 1; @@ -157,6 +166,11 @@ where return Err(DidntRead); } + let mut interest_id = None; + if imsg::has_flag(self.header, declare::flag::I) { + interest_id = Some(self.codec.read(&mut *reader)?); + } + // Extensions let mut ext_qos = declare::ext::QoSType::DEFAULT; let mut ext_tstamp = None; @@ -192,10 +206,11 @@ where let body: DeclareBody = self.codec.read(&mut *reader)?; Ok(Declare { - body, + interest_id, ext_qos, ext_tstamp, ext_nodeid, + body, }) } } diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index d41d8bf67f..10027259c2 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -25,20 +25,22 @@ pub use subscriber::*; pub use token::*; pub mod flag { - // pub const X: u8 = 1 << 5; // 0x20 Reserved - // pub const X: u8 = 1 << 6; // 0x40 Reserved + pub const I: u8 = 1 << 5; // 0x20 Interest if I==1 then the declare is in a response to an Interest with future==false + // pub const X: u8 = 1 << 6; // 0x40 Reserved pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } /// Flags: -/// - X: Reserved +/// - I: Interest If I==1 then the declare is in a response to an Interest with future==false /// - X: Reserved /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|X|X| DECLARE | +/// |Z|X|I| DECLARE | /// +-+-+-+---------+ +/// ~interest_id:z32~ if I==1 +/// +---------------+ /// ~ [decl_exts] ~ if Z==1 /// +---------------+ /// ~ declaration ~ @@ -46,6 +48,7 @@ pub mod flag { /// #[derive(Debug, Clone, PartialEq, Eq)] pub struct Declare { + pub interest_id: Option, pub ext_qos: ext::QoSType, pub ext_tstamp: Option, pub ext_nodeid: ext::NodeIdType, @@ -132,16 +135,18 @@ impl Declare { let mut rng = rand::thread_rng(); - let body = DeclareBody::rand(); + let interest_id = rng.gen_bool(0.5).then_some(rng.gen::()); let ext_qos = ext::QoSType::rand(); let ext_tstamp = rng.gen_bool(0.5).then(ext::TimestampType::rand); let ext_nodeid = ext::NodeIdType::rand(); + let body = DeclareBody::rand(); Self { - body, + interest_id, ext_qos, ext_tstamp, ext_nodeid, + body, } } } diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index f340f24cf1..aaa1d13724 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -664,6 +664,7 @@ impl SyncResolve for KeyExprUndeclaration<'_> { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(zenoh_protocol::network::Declare { + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 0450dab38a..194b97fca8 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -452,6 +452,7 @@ impl Resource { .insert(expr_id, nonwild_prefix.clone()); face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 290f90f95f..e85bb77bf9 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -53,6 +53,7 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -136,6 +137,7 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -169,6 +171,7 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -203,6 +206,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 2ac3f1b993..5c0bc5349b 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -93,6 +93,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -164,6 +165,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -414,6 +418,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -455,6 +460,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 9fba744a9c..150c12a632 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -126,6 +126,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -169,6 +170,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -337,6 +339,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -362,6 +365,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index a722176292..b495248788 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -53,6 +53,7 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -136,6 +137,7 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -169,6 +171,7 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -203,6 +206,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 38f77bec45..72c32b9217 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -93,6 +93,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -164,6 +165,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -408,6 +412,7 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -559,6 +564,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -600,6 +606,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -628,6 +635,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -766,6 +774,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: if forget { dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -791,6 +800,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: }; dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 61abaa7c55..99e787beb5 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -194,6 +194,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -247,6 +248,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -471,6 +473,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -496,6 +499,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -768,6 +775,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -866,6 +874,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links if forget { dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -891,6 +900,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 166ff16bd0..d460ee3f1c 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -276,6 +276,8 @@ impl AdminSpace { zlock!(admin.primitives).replace(primitives.clone()); primitives.send_declare(Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -287,6 +289,7 @@ impl AdminSpace { }); primitives.send_declare(Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 516bcd0109..4067f2ad8f 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -579,6 +579,7 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -606,6 +607,7 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -627,6 +629,7 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -654,6 +657,7 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -675,6 +679,7 @@ fn client_test() { Primitives::send_declare( primitives2.as_ref(), Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index b9e20a4e68..addb757807 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -872,6 +872,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1084,6 +1085,7 @@ impl Session { // }; primitives.send_declare(Declare { + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1140,6 +1142,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1191,6 +1194,7 @@ impl Session { distance: 0, }; primitives.send_declare(Declare { + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1212,6 +1216,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1247,6 +1252,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1271,6 +1277,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, From 21fb0832d9cfa904bf787ef9d511572b5ce81755 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 29 Mar 2024 10:43:07 +0100 Subject: [PATCH 082/598] Protocol batchsize (#873) * Use BatchSize typedef instead of u16 * Use BatchSize typedef instead of u16 for vsock --- commons/zenoh-codec/src/core/zint.rs | 68 ++++++++++--------- commons/zenoh-protocol/src/transport/init.rs | 4 +- commons/zenoh-protocol/src/transport/join.rs | 2 +- commons/zenoh-protocol/src/transport/mod.rs | 1 + io/zenoh-link-commons/src/lib.rs | 3 +- io/zenoh-link-commons/src/multicast.rs | 4 +- io/zenoh-link-commons/src/unicast.rs | 7 +- io/zenoh-links/zenoh-link-quic/src/lib.rs | 13 ++-- io/zenoh-links/zenoh-link-quic/src/unicast.rs | 3 +- io/zenoh-links/zenoh-link-serial/src/lib.rs | 7 +- .../zenoh-link-serial/src/unicast.rs | 3 +- io/zenoh-links/zenoh-link-tcp/src/lib.rs | 5 +- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 3 +- io/zenoh-links/zenoh-link-tls/src/lib.rs | 13 ++-- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 4 +- io/zenoh-links/zenoh-link-udp/src/lib.rs | 11 +-- .../zenoh-link-udp/src/multicast.rs | 3 +- io/zenoh-links/zenoh-link-udp/src/unicast.rs | 3 +- .../zenoh-link-unixpipe/src/unix/unicast.rs | 5 +- .../zenoh-link-unixsock_stream/src/lib.rs | 5 +- .../zenoh-link-unixsock_stream/src/unicast.rs | 3 +- io/zenoh-links/zenoh-link-vsock/src/lib.rs | 4 +- .../zenoh-link-vsock/src/unicast.rs | 8 ++- io/zenoh-links/zenoh-link-ws/src/lib.rs | 5 +- io/zenoh-links/zenoh-link-ws/src/unicast.rs | 3 +- io/zenoh-transport/src/common/pipeline.rs | 12 ++-- io/zenoh-transport/src/manager.rs | 6 +- .../src/unicast/establishment/cookie.rs | 9 ++- 28 files changed, 125 insertions(+), 92 deletions(-) diff --git a/commons/zenoh-codec/src/core/zint.rs b/commons/zenoh-codec/src/core/zint.rs index 0daff7348b..d5160e2ee6 100644 --- a/commons/zenoh-codec/src/core/zint.rs +++ b/commons/zenoh-codec/src/core/zint.rs @@ -17,38 +17,42 @@ use zenoh_buffers::{ writer::{DidntWrite, Writer}, }; -const VLE_LEN: usize = 9; +const VLE_LEN_MAX: usize = vle_len(u64::MAX); + +const fn vle_len(x: u64) -> usize { + const B1: u64 = u64::MAX << 7; + const B2: u64 = u64::MAX << (7 * 2); + const B3: u64 = u64::MAX << (7 * 3); + const B4: u64 = u64::MAX << (7 * 4); + const B5: u64 = u64::MAX << (7 * 5); + const B6: u64 = u64::MAX << (7 * 6); + const B7: u64 = u64::MAX << (7 * 7); + const B8: u64 = u64::MAX << (7 * 8); + + if (x & B1) == 0 { + 1 + } else if (x & B2) == 0 { + 2 + } else if (x & B3) == 0 { + 3 + } else if (x & B4) == 0 { + 4 + } else if (x & B5) == 0 { + 5 + } else if (x & B6) == 0 { + 6 + } else if (x & B7) == 0 { + 7 + } else if (x & B8) == 0 { + 8 + } else { + 9 + } +} impl LCodec for Zenoh080 { fn w_len(self, x: u64) -> usize { - const B1: u64 = u64::MAX << 7; - const B2: u64 = u64::MAX << (7 * 2); - const B3: u64 = u64::MAX << (7 * 3); - const B4: u64 = u64::MAX << (7 * 4); - const B5: u64 = u64::MAX << (7 * 5); - const B6: u64 = u64::MAX << (7 * 6); - const B7: u64 = u64::MAX << (7 * 7); - const B8: u64 = u64::MAX << (7 * 8); - - if (x & B1) == 0 { - 1 - } else if (x & B2) == 0 { - 2 - } else if (x & B3) == 0 { - 3 - } else if (x & B4) == 0 { - 4 - } else if (x & B5) == 0 { - 5 - } else if (x & B6) == 0 { - 6 - } else if (x & B7) == 0 { - 7 - } else if (x & B8) == 0 { - 8 - } else { - 9 - } + vle_len(x) } } @@ -107,7 +111,7 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, mut x: u64) -> Self::Output { - writer.with_slot(VLE_LEN, move |buffer| { + writer.with_slot(VLE_LEN_MAX, move |buffer| { let mut len = 0; while (x & !0x7f_u64) != 0 { // SAFETY: buffer is guaranteed to be VLE_LEN long where VLE_LEN is @@ -122,7 +126,7 @@ where } // In case len == VLE_LEN then all the bits have already been written in the latest iteration. // Else we haven't written all the necessary bytes yet. - if len != VLE_LEN { + if len != VLE_LEN_MAX { // SAFETY: buffer is guaranteed to be VLE_LEN long where VLE_LEN is // the maximum number of bytes a VLE can take once encoded. // I.e.: x is shifted 7 bits to the right every iteration, @@ -151,7 +155,7 @@ where let mut v = 0; let mut i = 0; // 7 * VLE_LEN is beyond the maximum number of shift bits - while (b & 0x80_u8) != 0 && i != 7 * (VLE_LEN - 1) { + while (b & 0x80_u8) != 0 && i != 7 * (VLE_LEN_MAX - 1) { v |= ((b & 0x7f_u8) as u64) << i; b = reader.read_u8()?; i += 7; diff --git a/commons/zenoh-protocol/src/transport/init.rs b/commons/zenoh-protocol/src/transport/init.rs index 1327288471..de517a353c 100644 --- a/commons/zenoh-protocol/src/transport/init.rs +++ b/commons/zenoh-protocol/src/transport/init.rs @@ -165,7 +165,7 @@ impl InitSyn { let whatami = WhatAmI::rand(); let zid = ZenohId::default(); let resolution = Resolution::rand(); - let batch_size: u16 = rng.gen(); + let batch_size: BatchSize = rng.gen(); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); let ext_shm = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); @@ -221,7 +221,7 @@ impl InitAck { } else { Resolution::rand() }; - let batch_size: u16 = rng.gen(); + let batch_size: BatchSize = rng.gen(); let cookie = ZSlice::rand(64); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); let ext_shm = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); diff --git a/commons/zenoh-protocol/src/transport/join.rs b/commons/zenoh-protocol/src/transport/join.rs index c5fbb98430..a5cf1422a6 100644 --- a/commons/zenoh-protocol/src/transport/join.rs +++ b/commons/zenoh-protocol/src/transport/join.rs @@ -141,7 +141,7 @@ impl Join { let whatami = WhatAmI::rand(); let zid = ZenohId::default(); let resolution = Resolution::rand(); - let batch_size: u16 = rng.gen(); + let batch_size: BatchSize = rng.gen(); let lease = if rng.gen_bool(0.5) { Duration::from_secs(rng.gen()) } else { diff --git a/commons/zenoh-protocol/src/transport/mod.rs b/commons/zenoh-protocol/src/transport/mod.rs index 1ea6fca144..e92860f441 100644 --- a/commons/zenoh-protocol/src/transport/mod.rs +++ b/commons/zenoh-protocol/src/transport/mod.rs @@ -39,6 +39,7 @@ use crate::network::NetworkMessage; /// the boundary of the serialized messages. The length is encoded as little-endian. /// In any case, the length of a message must not exceed 65_535 bytes. pub type BatchSize = u16; +pub type AtomicBatchSize = core::sync::atomic::AtomicU16; pub mod batch_size { use super::BatchSize; diff --git a/io/zenoh-link-commons/src/lib.rs b/io/zenoh-link-commons/src/lib.rs index f9ad7166ee..138726fd4f 100644 --- a/io/zenoh-link-commons/src/lib.rs +++ b/io/zenoh-link-commons/src/lib.rs @@ -32,6 +32,7 @@ pub use multicast::*; use serde::Serialize; pub use unicast::*; use zenoh_protocol::core::Locator; +use zenoh_protocol::transport::BatchSize; use zenoh_result::ZResult; /*************************************/ @@ -45,7 +46,7 @@ pub struct Link { pub src: Locator, pub dst: Locator, pub group: Option, - pub mtu: u16, + pub mtu: BatchSize, pub is_reliable: bool, pub is_streamed: bool, pub interfaces: Vec, diff --git a/io/zenoh-link-commons/src/multicast.rs b/io/zenoh-link-commons/src/multicast.rs index 65bc7195b6..ccfe6842c1 100644 --- a/io/zenoh-link-commons/src/multicast.rs +++ b/io/zenoh-link-commons/src/multicast.rs @@ -22,7 +22,7 @@ use zenoh_buffers::{reader::HasReader, writer::HasWriter}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{ core::{EndPoint, Locator}, - transport::TransportMessage, + transport::{BatchSize, TransportMessage}, }; use zenoh_result::{zerror, ZResult}; @@ -44,7 +44,7 @@ pub struct LinkMulticast(pub Arc); #[async_trait] pub trait LinkMulticastTrait: Send + Sync { - fn get_mtu(&self) -> u16; + fn get_mtu(&self) -> BatchSize; fn get_src(&self) -> &Locator; fn get_dst(&self) -> &Locator; fn is_reliable(&self) -> bool; diff --git a/io/zenoh-link-commons/src/unicast.rs b/io/zenoh-link-commons/src/unicast.rs index fe87e70e94..c21f4a008c 100644 --- a/io/zenoh-link-commons/src/unicast.rs +++ b/io/zenoh-link-commons/src/unicast.rs @@ -19,7 +19,10 @@ use core::{ ops::Deref, }; use std::net::SocketAddr; -use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::ZResult; pub type LinkManagerUnicast = Arc; @@ -41,7 +44,7 @@ pub struct LinkUnicast(pub Arc); #[async_trait] pub trait LinkUnicastTrait: Send + Sync { - fn get_mtu(&self) -> u16; + fn get_mtu(&self) -> BatchSize; fn get_src(&self) -> &Locator; fn get_dst(&self) -> &Locator; fn is_reliable(&self) -> bool; diff --git a/io/zenoh-links/zenoh-link-quic/src/lib.rs b/io/zenoh-links/zenoh-link-quic/src/lib.rs index c6d7e16087..4bcabaf5b6 100644 --- a/io/zenoh-links/zenoh-link-quic/src/lib.rs +++ b/io/zenoh-links/zenoh-link-quic/src/lib.rs @@ -28,9 +28,12 @@ use std::net::SocketAddr; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::core::{ - endpoint::{Address, Parameters}, - Locator, +use zenoh_protocol::{ + core::{ + endpoint::{Address, Parameters}, + Locator, + }, + transport::BatchSize, }; use zenoh_result::{bail, zerror, ZResult}; @@ -47,7 +50,7 @@ pub const ALPN_QUIC_HTTP: &[&[u8]] = &[b"hq-29"]; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the QUIC MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const QUIC_MAX_MTU: u16 = u16::MAX; +const QUIC_MAX_MTU: BatchSize = BatchSize::MAX; pub const QUIC_LOCATOR_PREFIX: &str = "quic"; #[derive(Default, Clone, Copy, Debug)] @@ -137,7 +140,7 @@ impl ConfigurationInspector for QuicConfigurator { zconfigurable! { // Default MTU (QUIC PDU) in bytes. - static ref QUIC_DEFAULT_MTU: u16 = QUIC_MAX_MTU; + static ref QUIC_DEFAULT_MTU: BatchSize = QUIC_MAX_MTU; // The LINGER option causes the shutdown() call to block until (1) all application data is delivered // to the remote end or (2) a timeout expires. The timeout is expressed in seconds. // More info on the LINGER option and its dynamics can be found at: diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index 33953d666d..14a01861ca 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -34,6 +34,7 @@ use zenoh_link_commons::{ ListenersUnicastIP, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, zerror, ZError, ZResult}; pub struct LinkUnicastQuic { @@ -135,7 +136,7 @@ impl LinkUnicastTrait for LinkUnicastQuic { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *QUIC_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-serial/src/lib.rs b/io/zenoh-links/zenoh-link-serial/src/lib.rs index fb4d7fcc12..f7b0b7afeb 100644 --- a/io/zenoh-links/zenoh-link-serial/src/lib.rs +++ b/io/zenoh-links/zenoh-link-serial/src/lib.rs @@ -25,10 +25,11 @@ pub use unicast::*; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::core::{endpoint::Address, EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::ZResult; // Maximum MTU (Serial PDU) in bytes. -const SERIAL_MAX_MTU: u16 = z_serial::MAX_MTU as u16; +const SERIAL_MAX_MTU: BatchSize = z_serial::MAX_MTU as BatchSize; const DEFAULT_BAUDRATE: u32 = 9_600; @@ -36,11 +37,11 @@ const DEFAULT_EXCLUSIVE: bool = true; pub const SERIAL_LOCATOR_PREFIX: &str = "serial"; -const SERIAL_MTU_LIMIT: u16 = SERIAL_MAX_MTU; +const SERIAL_MTU_LIMIT: BatchSize = SERIAL_MAX_MTU; zconfigurable! { // Default MTU (UDP PDU) in bytes. - static ref SERIAL_DEFAULT_MTU: u16 = SERIAL_MTU_LIMIT; + static ref SERIAL_DEFAULT_MTU: BatchSize = SERIAL_MTU_LIMIT; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref SERIAL_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-serial/src/unicast.rs b/io/zenoh-links/zenoh-link-serial/src/unicast.rs index 0efa40ee90..0a5bea3c18 100644 --- a/io/zenoh-links/zenoh-link-serial/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-serial/src/unicast.rs @@ -30,6 +30,7 @@ use zenoh_link_commons::{ NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{zerror, ZResult}; use z_serial::ZSerial; @@ -177,7 +178,7 @@ impl LinkUnicastTrait for LinkUnicastSerial { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *SERIAL_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-tcp/src/lib.rs b/io/zenoh-links/zenoh-link-tcp/src/lib.rs index 1a7d6ae705..0b075d9bf8 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/lib.rs @@ -22,6 +22,7 @@ use std::net::SocketAddr; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::core::{endpoint::Address, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{zerror, ZResult}; mod unicast; @@ -33,7 +34,7 @@ pub use unicast::*; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the TCP MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const TCP_MAX_MTU: u16 = u16::MAX; +const TCP_MAX_MTU: BatchSize = BatchSize::MAX; pub const TCP_LOCATOR_PREFIX: &str = "tcp"; @@ -52,7 +53,7 @@ impl LocatorInspector for TcpLocatorInspector { zconfigurable! { // Default MTU (TCP PDU) in bytes. - static ref TCP_DEFAULT_MTU: u16 = TCP_MAX_MTU; + static ref TCP_DEFAULT_MTU: BatchSize = TCP_MAX_MTU; // The LINGER option causes the shutdown() call to block until (1) all application data is delivered // to the remote end or (2) a timeout expires. The timeout is expressed in seconds. // More info on the LINGER option and its dynamics can be found at: diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index 7137ac0212..aaadcf3c23 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -25,6 +25,7 @@ use zenoh_link_commons::{ ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; use super::{ @@ -145,7 +146,7 @@ impl LinkUnicastTrait for LinkUnicastTcp { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *TCP_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-tls/src/lib.rs b/io/zenoh-links/zenoh-link-tls/src/lib.rs index 95d59104b4..7faebb4cd9 100644 --- a/io/zenoh-links/zenoh-link-tls/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tls/src/lib.rs @@ -30,9 +30,12 @@ use std::{convert::TryFrom, net::SocketAddr}; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::core::{ - endpoint::{self, Address}, - Locator, +use zenoh_protocol::{ + core::{ + endpoint::{self, Address}, + Locator, + }, + transport::BatchSize, }; use zenoh_result::{bail, zerror, ZResult}; @@ -45,7 +48,7 @@ pub use unicast::*; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the TLS MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const TLS_MAX_MTU: u16 = u16::MAX; +const TLS_MAX_MTU: BatchSize = BatchSize::MAX; pub const TLS_LOCATOR_PREFIX: &str = "tls"; #[derive(Default, Clone, Copy)] @@ -172,7 +175,7 @@ impl ConfigurationInspector for TlsConfigurator { zconfigurable! { // Default MTU (TLS PDU) in bytes. - static ref TLS_DEFAULT_MTU: u16 = TLS_MAX_MTU; + static ref TLS_DEFAULT_MTU: BatchSize = TLS_MAX_MTU; // The LINGER option causes the shutdown() call to block until (1) all application data is delivered // to the remote end or (2) a timeout expires. The timeout is expressed in seconds. // More info on the LINGER option and its dynamics can be found at: diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 7da711161e..a58e7372dd 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -42,8 +42,8 @@ use zenoh_link_commons::{ get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, }; -use zenoh_protocol::core::endpoint::Config; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::{core::endpoint::Config, transport::BatchSize}; use zenoh_result::{bail, zerror, ZError, ZResult}; pub struct LinkUnicastTls { @@ -180,7 +180,7 @@ impl LinkUnicastTrait for LinkUnicastTls { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *TLS_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-udp/src/lib.rs b/io/zenoh-links/zenoh-link-udp/src/lib.rs index 91d02cc13d..86db845d8f 100644 --- a/io/zenoh-links/zenoh-link-udp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-udp/src/lib.rs @@ -27,6 +27,7 @@ pub use unicast::*; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::core::{endpoint::Address, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{zerror, ZResult}; // NOTE: In case of using UDP in high-throughput scenarios, it is recommended to set the @@ -44,24 +45,24 @@ use zenoh_result::{zerror, ZResult}; // Although in IPv6 it is possible to have UDP datagrams of size greater than 65,535 bytes via // IPv6 Jumbograms, its usage in Zenoh is discouraged unless the consequences are very well // understood. -const UDP_MAX_MTU: u16 = 65_507; +const UDP_MAX_MTU: BatchSize = 65_507; pub const UDP_LOCATOR_PREFIX: &str = "udp"; #[cfg(any(target_os = "linux", target_os = "windows"))] // Linux default value of a maximum datagram size is set to UDP MAX MTU. -const UDP_MTU_LIMIT: u16 = UDP_MAX_MTU; +const UDP_MTU_LIMIT: BatchSize = UDP_MAX_MTU; #[cfg(target_os = "macos")] // Mac OS X default value of a maximum datagram size is set to 9216 bytes. -const UDP_MTU_LIMIT: u16 = 9_216; +const UDP_MTU_LIMIT: BatchSize = 9_216; #[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "windows")))] -const UDP_MTU_LIMIT: u16 = 8_192; +const UDP_MTU_LIMIT: BatchSize = 8_192; zconfigurable! { // Default MTU (UDP PDU) in bytes. - static ref UDP_DEFAULT_MTU: u16 = UDP_MTU_LIMIT; + static ref UDP_DEFAULT_MTU: BatchSize = UDP_MTU_LIMIT; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref UDP_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-udp/src/multicast.rs b/io/zenoh-links/zenoh-link-udp/src/multicast.rs index bc894bd296..a6e7977052 100644 --- a/io/zenoh-links/zenoh-link-udp/src/multicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/multicast.rs @@ -21,6 +21,7 @@ use std::{borrow::Cow, fmt}; use tokio::net::UdpSocket; use zenoh_link_commons::{LinkManagerMulticastTrait, LinkMulticast, LinkMulticastTrait}; use zenoh_protocol::core::{Config, EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; pub struct LinkMulticastUdp { @@ -119,7 +120,7 @@ impl LinkMulticastTrait for LinkMulticastUdp { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *UDP_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-udp/src/unicast.rs b/io/zenoh-links/zenoh-link-udp/src/unicast.rs index 1cd4a0b1ec..5021969bfa 100644 --- a/io/zenoh-links/zenoh-link-udp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/unicast.rs @@ -30,6 +30,7 @@ use zenoh_link_commons::{ LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; use zenoh_sync::Mvar; @@ -200,7 +201,7 @@ impl LinkUnicastTrait for LinkUnicastUdp { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *UDP_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index 0a0aebe730..3026d4e4b0 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -33,6 +33,7 @@ use tokio::io::Interest; use tokio_util::sync::CancellationToken; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_runtime::ZRuntime; use unix_named_pipe::{create, open_write}; @@ -45,7 +46,7 @@ use zenoh_result::{bail, ZResult}; use super::FILE_ACCESS_MASK; -const LINUX_PIPE_MAX_MTU: u16 = 65_535; +const LINUX_PIPE_MAX_MTU: BatchSize = BatchSize::MAX; const LINUX_PIPE_DEDICATE_TRIES: usize = 100; static PIPE_INVITATION: &[u8] = &[0xDE, 0xAD, 0xBE, 0xEF]; @@ -498,7 +499,7 @@ impl LinkUnicastTrait for UnicastPipe { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { LINUX_PIPE_MAX_MTU } diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs index b6c180cd8d..ce067c1aa2 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs @@ -21,6 +21,7 @@ use async_trait::async_trait; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::core::{endpoint::Address, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::ZResult; #[cfg(target_family = "unix")] mod unicast; @@ -33,13 +34,13 @@ pub use unicast::*; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the UNIXSOCKSTREAM MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const UNIXSOCKSTREAM_MAX_MTU: u16 = u16::MAX; +const UNIXSOCKSTREAM_MAX_MTU: BatchSize = BatchSize::MAX; pub const UNIXSOCKSTREAM_LOCATOR_PREFIX: &str = "unixsock-stream"; zconfigurable! { // Default MTU (UNIXSOCKSTREAM PDU) in bytes. - static ref UNIXSOCKSTREAM_DEFAULT_MTU: u16 = UNIXSOCKSTREAM_MAX_MTU; + static ref UNIXSOCKSTREAM_DEFAULT_MTU: BatchSize = UNIXSOCKSTREAM_MAX_MTU; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref UNIXSOCKSTREAM_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index 53441ab89c..a961c1aebb 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -32,6 +32,7 @@ use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{zerror, ZResult}; use super::{get_unix_path_as_string, UNIXSOCKSTREAM_DEFAULT_MTU, UNIXSOCKSTREAM_LOCATOR_PREFIX}; @@ -119,7 +120,7 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *UNIXSOCKSTREAM_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-vsock/src/lib.rs b/io/zenoh-links/zenoh-link-vsock/src/lib.rs index 7834050796..d58250fed3 100644 --- a/io/zenoh-links/zenoh-link-vsock/src/lib.rs +++ b/io/zenoh-links/zenoh-link-vsock/src/lib.rs @@ -22,7 +22,7 @@ use async_trait::async_trait; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::Locator; +use zenoh_protocol::{core::Locator, transport::BatchSize}; use zenoh_result::ZResult; #[cfg(target_os = "linux")] @@ -47,7 +47,7 @@ impl LocatorInspector for VsockLocatorInspector { zconfigurable! { // Default MTU in bytes. - static ref VSOCK_DEFAULT_MTU: u16 = u16::MAX; + static ref VSOCK_DEFAULT_MTU: BatchSize = BatchSize::MAX; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref VSOCK_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs index ced7b9dc15..59efa6f0e3 100644 --- a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs @@ -27,8 +27,10 @@ use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; -use zenoh_protocol::core::endpoint::Address; -use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::{ + core::{endpoint::Address, EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, zerror, ZResult}; use super::{VSOCK_ACCEPT_THROTTLE_TIME, VSOCK_DEFAULT_MTU, VSOCK_LOCATOR_PREFIX}; @@ -170,7 +172,7 @@ impl LinkUnicastTrait for LinkUnicastVsock { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *VSOCK_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-ws/src/lib.rs b/io/zenoh-links/zenoh-link-ws/src/lib.rs index f68a20d15d..d165b480a9 100644 --- a/io/zenoh-links/zenoh-link-ws/src/lib.rs +++ b/io/zenoh-links/zenoh-link-ws/src/lib.rs @@ -23,6 +23,7 @@ use url::Url; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::core::{endpoint::Address, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, ZResult}; mod unicast; pub use unicast::*; @@ -33,7 +34,7 @@ pub use unicast::*; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the TCP MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const WS_MAX_MTU: u16 = u16::MAX; +const WS_MAX_MTU: BatchSize = BatchSize::MAX; pub const WS_LOCATOR_PREFIX: &str = "ws"; @@ -51,7 +52,7 @@ impl LocatorInspector for WsLocatorInspector { zconfigurable! { // Default MTU (TCP PDU) in bytes. - static ref WS_DEFAULT_MTU: u16 = WS_MAX_MTU; + static ref WS_DEFAULT_MTU: BatchSize = WS_MAX_MTU; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref TCP_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-ws/src/unicast.rs b/io/zenoh-links/zenoh-link-ws/src/unicast.rs index 6a0cf64e6e..acf568f78c 100644 --- a/io/zenoh-links/zenoh-link-ws/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-ws/src/unicast.rs @@ -34,6 +34,7 @@ use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, zerror, ZResult}; use super::{get_ws_addr, get_ws_url, TCP_ACCEPT_THROTTLE_TIME, WS_DEFAULT_MTU, WS_LOCATOR_PREFIX}; @@ -200,7 +201,7 @@ impl LinkUnicastTrait for LinkUnicastWs { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *WS_DEFAULT_MTU } diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index fb95d709db..b74fa2990c 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -22,7 +22,7 @@ use ringbuffer_spsc::{RingBuffer, RingBufferReader, RingBufferWriter}; use std::sync::{Arc, Mutex, MutexGuard}; use std::time::Duration; use std::{ - sync::atomic::{AtomicBool, AtomicU16, Ordering}, + sync::atomic::{AtomicBool, Ordering}, time::Instant, }; use zenoh_buffers::{ @@ -40,7 +40,7 @@ use zenoh_protocol::{ transport::{ fragment::FragmentHeader, frame::{self, FrameHeader}, - BatchSize, TransportMessage, + AtomicBatchSize, BatchSize, TransportMessage, }, }; @@ -75,7 +75,7 @@ impl StageInRefill { struct StageInOut { n_out_w: Sender<()>, s_out_w: RingBufferWriter, - bytes: Arc, + bytes: Arc, backoff: Arc, } @@ -355,12 +355,12 @@ enum Pull { struct Backoff { retry_time: NanoSeconds, last_bytes: BatchSize, - bytes: Arc, + bytes: Arc, backoff: Arc, } impl Backoff { - fn new(bytes: Arc, backoff: Arc) -> Self { + fn new(bytes: Arc, backoff: Arc) -> Self { Self { retry_time: 0, last_bytes: 0, @@ -552,7 +552,7 @@ impl TransmissionPipeline { // This is a SPSC ring buffer let (s_out_w, s_out_r) = RingBuffer::::init(); let current = Arc::new(Mutex::new(None)); - let bytes = Arc::new(AtomicU16::new(0)); + let bytes = Arc::new(AtomicBatchSize::new(0)); let backoff = Arc::new(AtomicBool::new(false)); stage_in.push(Mutex::new(StageIn { diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index f16a68cfba..2d7961ed2b 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -93,7 +93,7 @@ pub struct TransportManagerConfig { pub zid: ZenohId, pub whatami: WhatAmI, pub resolution: Resolution, - pub batch_size: u16, + pub batch_size: BatchSize, pub wait_before_drop: Duration, pub queue_size: [usize; Priority::NUM], pub queue_backoff: Duration, @@ -122,7 +122,7 @@ pub struct TransportManagerBuilder { zid: ZenohId, whatami: WhatAmI, resolution: Resolution, - batch_size: u16, + batch_size: BatchSize, wait_before_drop: Duration, queue_size: QueueSizeConf, queue_backoff: Duration, @@ -151,7 +151,7 @@ impl TransportManagerBuilder { self } - pub fn batch_size(mut self, batch_size: u16) -> Self { + pub fn batch_size(mut self, batch_size: BatchSize) -> Self { self.batch_size = batch_size; self } diff --git a/io/zenoh-transport/src/unicast/establishment/cookie.rs b/io/zenoh-transport/src/unicast/establishment/cookie.rs index 0db9e1c93a..6f0295601c 100644 --- a/io/zenoh-transport/src/unicast/establishment/cookie.rs +++ b/io/zenoh-transport/src/unicast/establishment/cookie.rs @@ -19,14 +19,17 @@ use zenoh_buffers::{ }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_crypto::{BlockCipher, PseudoRng}; -use zenoh_protocol::core::{Resolution, WhatAmI, ZenohId}; +use zenoh_protocol::{ + core::{Resolution, WhatAmI, ZenohId}, + transport::BatchSize, +}; #[derive(Debug, PartialEq)] pub(crate) struct Cookie { pub(crate) zid: ZenohId, pub(crate) whatami: WhatAmI, pub(crate) resolution: Resolution, - pub(crate) batch_size: u16, + pub(crate) batch_size: BatchSize, pub(crate) nonce: u64, // Extensions pub(crate) ext_qos: ext::qos::StateAccept, @@ -82,7 +85,7 @@ where let whatami = WhatAmI::try_from(wai).map_err(|_| DidntRead)?; let resolution: u8 = self.read(&mut *reader)?; let resolution = Resolution::from(resolution); - let batch_size: u16 = self.read(&mut *reader)?; + let batch_size: BatchSize = self.read(&mut *reader)?; let nonce: u64 = self.read(&mut *reader)?; // Extensions let ext_qos: ext::qos::StateAccept = self.read(&mut *reader)?; From 312c03a2a79e0d8a06904008331148efd2a5475a Mon Sep 17 00:00:00 2001 From: DenisBiryukov91 <155981813+DenisBiryukov91@users.noreply.github.com> Date: Fri, 29 Mar 2024 16:57:52 +0100 Subject: [PATCH 083/598] Query.reply and reply_del, now accept TryIntoKeyExpr instead of IntoKeyExpr (#878) --- zenoh/src/queryable.rs | 41 ++++++++++++++++++++++------------------- zenoh/tests/routing.rs | 2 +- zenoh/tests/session.rs | 5 +---- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 599c0e13be..58589bfe8f 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -111,7 +111,7 @@ impl Query { #[inline(always)] #[cfg(feature = "unstable")] #[doc(hidden)] - pub fn reply_sample(&self, sample: Sample) -> ReplyBuilder<'_> { + pub fn reply_sample(&self, sample: Sample) -> ReplyBuilder<'_, 'static> { let Sample { key_expr, payload, @@ -126,7 +126,7 @@ impl Query { } = sample; ReplyBuilder { query: self, - key_expr, + key_expr: Ok(key_expr), payload, kind, encoding, @@ -145,18 +145,19 @@ impl Query { /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), /// replying on a disjoint key expression will result in an error when resolving the reply. #[inline(always)] - pub fn reply( + pub fn reply<'b, TryIntoKeyExpr, IntoPayload>( &self, - key_expr: IntoKeyExpr, + key_expr: TryIntoKeyExpr, payload: IntoPayload, - ) -> ReplyBuilder<'_> + ) -> ReplyBuilder<'_, 'b> where - IntoKeyExpr: Into>, + TryIntoKeyExpr: TryInto>, + >>::Error: Into, IntoPayload: Into, { ReplyBuilder { query: self, - key_expr: key_expr.into(), + key_expr: key_expr.try_into().map_err(Into::into), payload: payload.into(), kind: SampleKind::Put, timestamp: None, @@ -187,13 +188,14 @@ impl Query { /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), /// replying on a disjoint key expression will result in an error when resolving the reply. #[inline(always)] - pub fn reply_del(&self, key_expr: IntoKeyExpr) -> ReplyBuilder<'_> + pub fn reply_del<'b, TryIntoKeyExpr>(&self, key_expr: TryIntoKeyExpr) -> ReplyBuilder<'_, 'b> where - IntoKeyExpr: Into>, + TryIntoKeyExpr: TryInto>, + >>::Error: Into, { ReplyBuilder { query: self, - key_expr: key_expr.into(), + key_expr: key_expr.try_into().map_err(Into::into), payload: Payload::empty(), kind: SampleKind::Delete, timestamp: None, @@ -248,9 +250,9 @@ impl fmt::Display for Query { /// A builder returned by [`Query::reply()`](Query::reply) or [`Query::reply()`](Query::reply). #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct ReplyBuilder<'a> { +pub struct ReplyBuilder<'a, 'b> { query: &'a Query, - key_expr: KeyExpr<'static>, + key_expr: ZResult>, payload: Payload, kind: SampleKind, encoding: Encoding, @@ -270,7 +272,7 @@ pub struct ReplyErrBuilder<'a> { value: Value, } -impl<'a> ReplyBuilder<'a> { +impl<'a, 'b> ReplyBuilder<'a, 'b> { #[zenoh_macros::unstable] pub fn with_attachment(mut self, attachment: Attachment) -> Self { self.attachment = Some(attachment); @@ -292,16 +294,17 @@ impl<'a> ReplyBuilder<'a> { } } -impl<'a> Resolvable for ReplyBuilder<'a> { +impl<'a, 'b> Resolvable for ReplyBuilder<'a, 'b> { type To = ZResult<()>; } -impl SyncResolve for ReplyBuilder<'_> { +impl<'a, 'b> SyncResolve for ReplyBuilder<'a, 'b> { fn res_sync(self) -> ::To { + let key_expr = self.key_expr?; if !self.query._accepts_any_replies().unwrap_or(false) - && !self.query.key_expr().intersects(&self.key_expr) + && !self.query.key_expr().intersects(&key_expr) { - bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", self.key_expr, self.query.key_expr()) + bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", &key_expr, self.query.key_expr()) } #[allow(unused_mut)] // will be unused if feature = "unstable" is not enabled let mut ext_sinfo = None; @@ -318,7 +321,7 @@ impl SyncResolve for ReplyBuilder<'_> { rid: self.query.inner.qid, wire_expr: WireExpr { scope: 0, - suffix: std::borrow::Cow::Owned(self.key_expr.into()), + suffix: std::borrow::Cow::Owned(key_expr.into()), mapping: Mapping::Sender, }, payload: ResponseBody::Reply(zenoh::Reply { @@ -360,7 +363,7 @@ impl SyncResolve for ReplyBuilder<'_> { } } -impl<'a> AsyncResolve for ReplyBuilder<'a> { +impl<'a, 'b> AsyncResolve for ReplyBuilder<'a, 'b> { type Future = Ready; fn res_async(self) -> Self::Future { diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index c34d06690a..b90f0f568f 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -137,7 +137,7 @@ impl Task { tokio::select! { _ = token.cancelled() => break, query = queryable.recv_async() => { - query?.reply(KeyExpr::try_from(ke.to_owned())?, payload.clone()).res_async().await?; + query?.reply(ke.to_owned(), payload.clone()).res_async().await?; }, } } diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 5e86499bc7..8c2d2e9937 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -164,10 +164,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re "ok_del" => { tokio::task::block_in_place(|| { tokio::runtime::Handle::current().block_on(async { - ztimeout!(query - .reply_del(KeyExpr::try_from(key_expr).unwrap()) - .res_async()) - .unwrap() + ztimeout!(query.reply_del(key_expr).res_async()).unwrap() }) }); } From 43a49379c0f126032f89505789d158b908c62ad6 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 29 Mar 2024 18:18:28 +0100 Subject: [PATCH 084/598] SampleBuilder uses generics --- Cargo.lock | 50 ++--- zenoh/src/queryable.rs | 97 ++-------- zenoh/src/sample/builder.rs | 363 +++++++++++++++--------------------- 3 files changed, 189 insertions(+), 321 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d3ea8978b5..9dff82ad80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -165,9 +165,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.12" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" dependencies = [ "anstyle", "anstyle-parse", @@ -1103,9 +1103,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c012a26a7f605efc424dd53697843a72be7dc86ad2d01f7814337794a12231d" +checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" dependencies = [ "anstream", "anstyle", @@ -1122,9 +1122,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "erased-serde" -version = "0.3.31" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" +checksum = "2b73807008a3c7f171cc40312f37d95ef0396e048b5848d775f54b1a4dd4a0d3" dependencies = [ "serde", ] @@ -1541,9 +1541,9 @@ dependencies = [ [[package]] name = "http" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -1854,9 +1854,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" dependencies = [ "serde", "value-bag", @@ -2865,9 +2865,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" +checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c" dependencies = [ "log", "ring 0.17.6", @@ -2923,9 +2923,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.3.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" +checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" [[package]] name = "rustls-webpki" @@ -3701,9 +3701,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.36.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -3743,7 +3743,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" dependencies = [ - "rustls 0.22.2", + "rustls 0.22.3", "rustls-pki-types", "tokio", ] @@ -4030,9 +4030,9 @@ dependencies = [ [[package]] name = "value-bag" -version = "1.4.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d92ccd67fb88503048c01b59152a04effd0782d035a83a6d256ce6085f08f4a3" +checksum = "74797339c3b98616c009c7c3eb53a0ce41e85c8ec66bd3db96ed132d20cfdee8" dependencies = [ "value-bag-serde1", "value-bag-sval2", @@ -4040,9 +4040,9 @@ dependencies = [ [[package]] name = "value-bag-serde1" -version = "1.4.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0b9f3feef403a50d4d67e9741a6d8fc688bcbb4e4f31bd4aab72cc690284394" +checksum = "cc35703541cbccb5278ef7b589d79439fc808ff0b5867195a3230f9a47421d39" dependencies = [ "erased-serde", "serde", @@ -4051,9 +4051,9 @@ dependencies = [ [[package]] name = "value-bag-sval2" -version = "1.4.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b24f4146b6f3361e91cbf527d1fb35e9376c3c0cef72ca5ec5af6d640fad7d" +checksum = "285b43c29d0b4c0e65aad24561baee67a1b69dc9be9375d4a85138cbf556f7f8" dependencies = [ "sval", "sval_buffer", @@ -4676,7 +4676,7 @@ dependencies = [ "flume", "futures", "log", - "rustls 0.22.2", + "rustls 0.22.3", "rustls-webpki 0.102.2", "serde", "tokio", @@ -4763,7 +4763,7 @@ dependencies = [ "base64 0.21.4", "futures", "log", - "rustls 0.22.2", + "rustls 0.22.3", "rustls-pemfile 2.0.0", "rustls-pki-types", "rustls-webpki 0.102.2", diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 5df0d73d44..0e977f3def 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -19,8 +19,8 @@ use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; use crate::sample::builder::{ - DeleteSampleBuilder, PutSampleBuilder, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, - TimestampBuilderTrait, ValueBuilderTrait, + OpDelete, OpPut, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, + ValueBuilderTrait, }; use crate::sample::SourceInfo; use crate::Id; @@ -115,10 +115,10 @@ impl Query { #[inline(always)] #[cfg(feature = "unstable")] #[doc(hidden)] - pub fn reply_sample(&self, sample: Sample) -> ReplySampleBuilder<'_> { - ReplySampleBuilder { + pub fn reply_sample(&self, sample: Sample) -> ReplySample<'_> { + ReplySample { query: self, - sample_builder: sample.into(), + sample, } } @@ -168,7 +168,7 @@ impl Query { IntoKeyExpr: Into>, { let sample_builder = - DeleteSampleBuilder::new(key_expr).with_qos(response::ext::QoSType::RESPONSE.into()); + SampleBuilder::delete(key_expr).qos(response::ext::QoSType::RESPONSE.into()); ReplyDelBuilder { query: self, sample_builder, @@ -214,91 +214,22 @@ impl fmt::Display for Query { } } -pub struct ReplySampleBuilder<'a> { +pub struct ReplySample<'a> { query: &'a Query, - sample_builder: SampleBuilder, + sample: Sample, } -impl<'a> ReplySampleBuilder<'a> { - pub fn put(self, payload: IntoPayload) -> ReplyBuilder<'a> - where - IntoPayload: Into, - { - let builder = ReplyBuilder { - query: self.query, - sample_builder: self.sample_builder.into(), - }; - builder.payload(payload) - } - pub fn delete(self) -> ReplyDelBuilder<'a> { - ReplyDelBuilder { - query: self.query, - sample_builder: self.sample_builder.into(), - } - } -} - -impl TimestampBuilderTrait for ReplySampleBuilder<'_> { - fn timestamp>>(self, timestamp: T) -> Self { - Self { - sample_builder: self.sample_builder.timestamp(timestamp), - ..self - } - } -} - -impl SampleBuilderTrait for ReplySampleBuilder<'_> { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - sample_builder: self.sample_builder.source_info(source_info), - ..self - } - } - - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { - Self { - sample_builder: self.sample_builder.attachment(attachment), - ..self - } - } -} - -impl QoSBuilderTrait for ReplySampleBuilder<'_> { - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self { - sample_builder: self.sample_builder.congestion_control(congestion_control), - ..self - } - } - - fn priority(self, priority: Priority) -> Self { - Self { - sample_builder: self.sample_builder.priority(priority), - ..self - } - } - - fn express(self, is_express: bool) -> Self { - Self { - sample_builder: self.sample_builder.express(is_express), - ..self - } - } -} - -impl Resolvable for ReplySampleBuilder<'_> { +impl Resolvable for ReplySample<'_> { type To = ZResult<()>; } -impl SyncResolve for ReplySampleBuilder<'_> { +impl SyncResolve for ReplySample<'_> { fn res_sync(self) -> ::To { - self.query._reply_sample(self.sample_builder.into()) + self.query._reply_sample(self.sample) } } -impl AsyncResolve for ReplySampleBuilder<'_> { +impl AsyncResolve for ReplySample<'_> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -311,7 +242,7 @@ impl AsyncResolve for ReplySampleBuilder<'_> { #[derive(Debug)] pub struct ReplyBuilder<'a> { query: &'a Query, - sample_builder: PutSampleBuilder, + sample_builder: SampleBuilder, } impl TimestampBuilderTrait for ReplyBuilder<'_> { @@ -392,7 +323,7 @@ impl ValueBuilderTrait for ReplyBuilder<'_> { #[derive(Debug)] pub struct ReplyDelBuilder<'a> { query: &'a Query, - sample_builder: DeleteSampleBuilder, + sample_builder: SampleBuilder, } impl TimestampBuilderTrait for ReplyDelBuilder<'_> { diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 920bd2b7b7..cae58514ff 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -12,6 +12,8 @@ // ZettaScale Zenoh Team, // +use std::marker::PhantomData; + use crate::sample::Attachment; use crate::sample::QoS; use crate::sample::QoSBuilder; @@ -63,290 +65,225 @@ pub trait ValueBuilderTrait { } #[derive(Debug)] -pub struct SampleBuilder(Sample); +pub struct OpPut; +#[derive(Debug)] +pub struct OpDelete; +#[derive(Debug)] +pub struct OpAny; -impl SampleBuilder { +#[derive(Debug)] +pub struct SampleBuilder { + sample: Sample, + _t: PhantomData, +} + +impl SampleBuilder { pub fn put( key_expr: IntoKeyExpr, payload: IntoPayload, - ) -> PutSampleBuilder + ) -> SampleBuilder where IntoKeyExpr: Into>, IntoPayload: Into, { - PutSampleBuilder::new(key_expr, payload) + Self { + sample: Sample { + key_expr: key_expr.into(), + payload: payload.into(), + kind: SampleKind::Put, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + }, + _t: PhantomData::, + } } - pub fn delete(key_expr: IntoKeyExpr) -> DeleteSampleBuilder +} + +impl SampleBuilder { + pub fn delete(key_expr: IntoKeyExpr) -> SampleBuilder where IntoKeyExpr: Into>, { - DeleteSampleBuilder::new(key_expr) + Self { + sample: Sample { + key_expr: key_expr.into(), + payload: Payload::empty(), + kind: SampleKind::Delete, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + }, + _t: PhantomData::, + } } +} + +impl SampleBuilder { /// Allows to change keyexpr of [`Sample`] pub fn keyexpr(self, key_expr: IntoKeyExpr) -> Self where IntoKeyExpr: Into>, { - Self(Sample { - key_expr: key_expr.into(), - ..self.0 - }) + Self { + sample: Sample { + key_expr: key_expr.into(), + ..self.sample + }, + _t: PhantomData::, + } } } -impl TimestampBuilderTrait for SampleBuilder { - fn timestamp>>(self, timestamp: T) -> Self { - Self(Sample { - timestamp: timestamp.into(), - ..self.0 - }) +impl TimestampBuilderTrait for SampleBuilder { + fn timestamp>>(self, timestamp: U) -> Self { + Self { + sample: Sample { + timestamp: timestamp.into(), + ..self.sample + }, + _t: PhantomData::, + } } } -impl SampleBuilderTrait for SampleBuilder { +impl SampleBuilderTrait for SampleBuilder { #[zenoh_macros::unstable] fn source_info(self, source_info: SourceInfo) -> Self { - Self(Sample { - source_info, - ..self.0 - }) + Self { + sample: Sample { + source_info, + ..self.sample + }, + _t: PhantomData::, + } } #[zenoh_macros::unstable] - fn attachment>>(self, attachment: T) -> Self { - Self(Sample { - attachment: attachment.into(), - ..self.0 - }) - } -} - -impl QoSBuilderTrait for SampleBuilder { - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - let qos: QoSBuilder = self.0.qos.into(); - let qos = qos.congestion_control(congestion_control).into(); - Self(Sample { qos, ..self.0 }) - } - fn priority(self, priority: Priority) -> Self { - let qos: QoSBuilder = self.0.qos.into(); - let qos = qos.priority(priority).into(); - Self(Sample { qos, ..self.0 }) - } - fn express(self, is_express: bool) -> Self { - let qos: QoSBuilder = self.0.qos.into(); - let qos = qos.express(is_express).into(); - Self(Sample { qos, ..self.0 }) - } -} - -#[derive(Debug)] -pub struct PutSampleBuilder(SampleBuilder); - -impl From for PutSampleBuilder { - fn from(sample_builder: SampleBuilder) -> Self { - Self(SampleBuilder(Sample { - kind: SampleKind::Put, - ..sample_builder.0 - })) - } -} - -impl PutSampleBuilder { - fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self - where - IntoKeyExpr: Into>, - IntoPayload: Into, - { - Self(SampleBuilder::from(Sample { - key_expr: key_expr.into(), - payload: payload.into(), - kind: SampleKind::Put, - encoding: Encoding::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - })) - } - /// Allows to change keyexpr of [`Sample`] - pub fn keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self(self.0.keyexpr(key_expr)) - } - // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. - pub(crate) fn qos(self, qos: QoS) -> Self { - Self(SampleBuilder(Sample { qos, ..self.0 .0 })) - } -} - -impl TimestampBuilderTrait for PutSampleBuilder { - fn timestamp>>(self, timestamp: T) -> Self { - Self(self.0.timestamp(timestamp)) + fn attachment>>(self, attachment: U) -> Self { + Self { + sample: Sample { + attachment: attachment.into(), + ..self.sample + }, + _t: PhantomData::, + } } } -impl SampleBuilderTrait for PutSampleBuilder { - #[zenoh_macros::unstable] - fn source_info(self, source_info: SourceInfo) -> Self { - Self(self.0.source_info(source_info)) - } - #[zenoh_macros::unstable] - fn attachment>>(self, attachment: T) -> Self { - Self(self.0.attachment(attachment)) +impl SampleBuilder { + pub fn qos(self, qos: QoS) -> Self { + Self { + sample: Sample { qos, ..self.sample }, + _t: PhantomData::, + } } } -impl QoSBuilderTrait for PutSampleBuilder { +impl QoSBuilderTrait for SampleBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self(self.0.congestion_control(congestion_control)) + let qos: QoSBuilder = self.sample.qos.into(); + let qos = qos.congestion_control(congestion_control).into(); + Self { + sample: Sample { qos, ..self.sample }, + _t: PhantomData::, + } } fn priority(self, priority: Priority) -> Self { - Self(self.0.priority(priority)) + let qos: QoSBuilder = self.sample.qos.into(); + let qos = qos.priority(priority).into(); + Self { + sample: Sample { qos, ..self.sample }, + _t: PhantomData::, + } } fn express(self, is_express: bool) -> Self { - Self(self.0.express(is_express)) + let qos: QoSBuilder = self.sample.qos.into(); + let qos = qos.express(is_express).into(); + Self { + sample: Sample { qos, ..self.sample }, + _t: PhantomData::, + } } } -impl ValueBuilderTrait for PutSampleBuilder { +impl ValueBuilderTrait for SampleBuilder { fn encoding>(self, encoding: T) -> Self { - Self(SampleBuilder(Sample { - encoding: encoding.into(), - ..self.0 .0 - })) + Self { + sample: Sample { + encoding: encoding.into(), + ..self.sample + }, + _t: PhantomData::, + } } fn payload>(self, payload: T) -> Self { - Self(SampleBuilder(Sample { - payload: payload.into(), - ..self.0 .0 - })) + Self { + sample: Sample { + payload: payload.into(), + ..self.sample + }, + _t: PhantomData::, + } } fn value>(self, value: T) -> Self { let Value { payload, encoding } = value.into(); - Self(SampleBuilder(Sample { - payload, - encoding, - ..self.0 .0 - })) - } -} - -#[derive(Debug)] -pub struct DeleteSampleBuilder(SampleBuilder); - -impl From for DeleteSampleBuilder { - fn from(sample_builder: SampleBuilder) -> Self { - Self(SampleBuilder(Sample { - kind: SampleKind::Delete, - ..sample_builder.0 - })) - } -} - -impl DeleteSampleBuilder { - pub fn new(key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self(SampleBuilder::from(Sample { - key_expr: key_expr.into(), - payload: Payload::empty(), - kind: SampleKind::Delete, - encoding: Encoding::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - })) - } - /// Allows to change keyexpr of [`Sample`] - pub fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self(self.0.keyexpr(key_expr)) - } - // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. - pub(crate) fn with_qos(self, qos: QoS) -> Self { - Self(SampleBuilder(Sample { qos, ..self.0 .0 })) - } -} - -impl TimestampBuilderTrait for DeleteSampleBuilder { - fn timestamp>>(self, timestamp: T) -> Self { - Self(self.0.timestamp(timestamp)) - } -} - -impl SampleBuilderTrait for DeleteSampleBuilder { - #[zenoh_macros::unstable] - fn source_info(self, source_info: SourceInfo) -> Self { - Self(self.0.source_info(source_info)) - } - #[zenoh_macros::unstable] - fn attachment>>(self, attachment: T) -> Self { - Self(self.0.attachment(attachment)) - } -} - -impl QoSBuilderTrait for DeleteSampleBuilder { - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self(self.0.congestion_control(congestion_control)) - } - fn priority(self, priority: Priority) -> Self { - Self(self.0.priority(priority)) - } - fn express(self, is_express: bool) -> Self { - Self(self.0.express(is_express)) + Self { + sample: Sample { + payload, + encoding, + ..self.sample + }, + _t: PhantomData::, + } } } -impl From for SampleBuilder { +impl From for SampleBuilder { fn from(sample: Sample) -> Self { - SampleBuilder(sample) + SampleBuilder { + sample, + _t: PhantomData::, + } } } -impl TryFrom for PutSampleBuilder { +impl TryFrom for SampleBuilder { type Error = zresult::Error; fn try_from(sample: Sample) -> Result { if sample.kind != SampleKind::Put { bail!("Sample is not a put sample") } - Ok(Self(SampleBuilder(sample))) + Ok(SampleBuilder { + sample, + _t: PhantomData::, + }) } } -impl TryFrom for DeleteSampleBuilder { +impl TryFrom for SampleBuilder { type Error = zresult::Error; fn try_from(sample: Sample) -> Result { if sample.kind != SampleKind::Delete { bail!("Sample is not a delete sample") } - Ok(Self(SampleBuilder(sample))) - } -} - -impl From for Sample { - fn from(sample_builder: SampleBuilder) -> Self { - sample_builder.0 - } -} - -impl From for Sample { - fn from(put_sample_builder: PutSampleBuilder) -> Self { - put_sample_builder.0 .0 + Ok(SampleBuilder { + sample, + _t: PhantomData::, + }) } } -impl From for Sample { - fn from(delete_sample_builder: DeleteSampleBuilder) -> Self { - delete_sample_builder.0 .0 +impl From> for Sample { + fn from(sample_builder: SampleBuilder) -> Self { + sample_builder.sample } } From 6c305a130043a66ee58f3985eb4f71eb708ff5dc Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 29 Mar 2024 18:35:40 +0100 Subject: [PATCH 085/598] Improve Query builders with generics --- zenoh/src/queryable.rs | 178 +++++++++++++----------------------- zenoh/src/sample/builder.rs | 65 ++++++------- 2 files changed, 99 insertions(+), 144 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 0e977f3def..fea148e6e6 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -19,7 +19,7 @@ use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; use crate::sample::builder::{ - OpDelete, OpPut, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, + op, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; use crate::sample::SourceInfo; @@ -132,18 +132,19 @@ impl Query { &self, key_expr: IntoKeyExpr, payload: IntoPayload, - ) -> ReplyBuilder<'_> + ) -> ReplySampleBuilder<'_, op::Put> where IntoKeyExpr: Into>, IntoPayload: Into, { let sample_builder = SampleBuilder::put(key_expr, payload).qos(response::ext::QoSType::RESPONSE.into()); - ReplyBuilder { + ReplySampleBuilder { query: self, sample_builder, } } + /// Sends a error reply to this Query. /// #[inline(always)] @@ -163,13 +164,16 @@ impl Query { /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), /// replying on a disjoint key expression will result in an error when resolving the reply. #[inline(always)] - pub fn reply_del(&self, key_expr: IntoKeyExpr) -> ReplyDelBuilder<'_> + pub fn reply_del( + &self, + key_expr: IntoKeyExpr, + ) -> ReplySampleBuilder<'_, op::Delete> where IntoKeyExpr: Into>, { let sample_builder = SampleBuilder::delete(key_expr).qos(response::ext::QoSType::RESPONSE.into()); - ReplyDelBuilder { + ReplySampleBuilder { query: self, sample_builder, } @@ -240,13 +244,13 @@ impl AsyncResolve for ReplySample<'_> { /// A builder returned by [`Query::reply()`](Query::reply) #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct ReplyBuilder<'a> { +pub struct ReplySampleBuilder<'a, T> { query: &'a Query, - sample_builder: SampleBuilder, + sample_builder: SampleBuilder, } -impl TimestampBuilderTrait for ReplyBuilder<'_> { - fn timestamp>>(self, timestamp: T) -> Self { +impl TimestampBuilderTrait for ReplySampleBuilder<'_, T> { + fn timestamp>>(self, timestamp: U) -> Self { Self { sample_builder: self.sample_builder.timestamp(timestamp), ..self @@ -254,7 +258,7 @@ impl TimestampBuilderTrait for ReplyBuilder<'_> { } } -impl SampleBuilderTrait for ReplyBuilder<'_> { +impl SampleBuilderTrait for ReplySampleBuilder<'_, T> { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { Self { @@ -264,7 +268,7 @@ impl SampleBuilderTrait for ReplyBuilder<'_> { } #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { + fn attachment>>(self, attachment: U) -> Self { Self { sample_builder: self.sample_builder.attachment(attachment), ..self @@ -272,7 +276,7 @@ impl SampleBuilderTrait for ReplyBuilder<'_> { } } -impl QoSBuilderTrait for ReplyBuilder<'_> { +impl QoSBuilderTrait for ReplySampleBuilder<'_, T> { fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self { sample_builder: self.sample_builder.congestion_control(congestion_control), @@ -295,7 +299,7 @@ impl QoSBuilderTrait for ReplyBuilder<'_> { } } -impl ValueBuilderTrait for ReplyBuilder<'_> { +impl ValueBuilderTrait for ReplySampleBuilder<'_, op::Put> { fn encoding>(self, encoding: T) -> Self { Self { sample_builder: self.sample_builder.encoding(encoding), @@ -318,101 +322,86 @@ impl ValueBuilderTrait for ReplyBuilder<'_> { } } -/// A builder returned by [`Query::reply_del()`](Query::reply) -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[derive(Debug)] -pub struct ReplyDelBuilder<'a> { - query: &'a Query, - sample_builder: SampleBuilder, +impl<'a, T> Resolvable for ReplySampleBuilder<'a, T> { + type To = ZResult<()>; } -impl TimestampBuilderTrait for ReplyDelBuilder<'_> { - fn timestamp>>(self, timestamp: T) -> Self { - Self { - sample_builder: self.sample_builder.timestamp(timestamp), - ..self - } +impl SyncResolve for ReplySampleBuilder<'_, T> { + fn res_sync(self) -> ::To { + self.query._reply_sample(self.sample_builder.into()) } } -impl SampleBuilderTrait for ReplyDelBuilder<'_> { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - sample_builder: self.sample_builder.source_info(source_info), - ..self - } - } +impl<'a, T> AsyncResolve for ReplySampleBuilder<'a, T> { + type Future = Ready; - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { - Self { - sample_builder: self.sample_builder.attachment(attachment), - ..self - } + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) } } -impl QoSBuilderTrait for ReplyDelBuilder<'_> { - fn congestion_control(self, congestion_control: CongestionControl) -> Self { +/// A builder returned by [`Query::reply_err()`](Query::reply_err). +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug)] +pub struct ReplyErrBuilder<'a> { + query: &'a Query, + value: Value, +} + +impl ValueBuilderTrait for ReplyErrBuilder<'_> { + fn encoding>(self, encoding: T) -> Self { Self { - sample_builder: self.sample_builder.congestion_control(congestion_control), + value: self.value.encoding(encoding), ..self } } - fn priority(self, priority: Priority) -> Self { + fn payload>(self, payload: T) -> Self { Self { - sample_builder: self.sample_builder.priority(priority), + value: self.value.payload(payload), ..self } } - fn express(self, is_express: bool) -> Self { + fn value>(self, value: T) -> Self { Self { - sample_builder: self.sample_builder.express(is_express), + value: value.into(), ..self } } } -/// A builder returned by [`Query::reply_err()`](Query::reply_err). -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[derive(Debug)] -pub struct ReplyErrBuilder<'a> { - query: &'a Query, - value: Value, -} - -impl<'a> Resolvable for ReplyBuilder<'a> { - type To = ZResult<()>; -} - -impl SyncResolve for ReplyBuilder<'_> { - fn res_sync(self) -> ::To { - self.query._reply_sample(self.sample_builder.into()) - } -} - -impl<'a> Resolvable for ReplyDelBuilder<'a> { +impl<'a> Resolvable for ReplyErrBuilder<'a> { type To = ZResult<()>; } -impl SyncResolve for ReplyDelBuilder<'_> { +impl SyncResolve for ReplyErrBuilder<'_> { fn res_sync(self) -> ::To { - self.query._reply_sample(self.sample_builder.into()) - } -} - -impl<'a> AsyncResolve for ReplyBuilder<'a> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + self.query.inner.primitives.send_response(Response { + rid: self.query.inner.qid, + wire_expr: WireExpr { + scope: 0, + suffix: std::borrow::Cow::Owned(self.query.key_expr().as_str().to_owned()), + mapping: Mapping::Sender, + }, + payload: ResponseBody::Err(zenoh::Err { + encoding: self.value.encoding.into(), + ext_sinfo: None, + ext_unknown: vec![], + payload: self.value.payload.into(), + }), + ext_qos: response::ext::QoSType::RESPONSE, + ext_tstamp: None, + ext_respid: Some(response::ext::ResponderIdType { + zid: self.query.inner.zid, + eid: self.query.eid, + }), + }); + Ok(()) } } -impl<'a> AsyncResolve for ReplyDelBuilder<'a> { +impl<'a> AsyncResolve for ReplyErrBuilder<'a> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -477,43 +466,6 @@ impl Query { } } -impl<'a> Resolvable for ReplyErrBuilder<'a> { - type To = ZResult<()>; -} - -impl SyncResolve for ReplyErrBuilder<'_> { - fn res_sync(self) -> ::To { - self.query.inner.primitives.send_response(Response { - rid: self.query.inner.qid, - wire_expr: WireExpr { - scope: 0, - suffix: std::borrow::Cow::Owned(self.query.key_expr().as_str().to_owned()), - mapping: Mapping::Sender, - }, - payload: ResponseBody::Err(zenoh::Err { - encoding: self.value.encoding.into(), - ext_sinfo: None, - ext_unknown: vec![], - payload: self.value.payload.into(), - }), - ext_qos: response::ext::QoSType::RESPONSE, - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid: self.query.inner.zid, - eid: self.query.eid, - }), - }); - Ok(()) - } -} -impl<'a> AsyncResolve for ReplyErrBuilder<'a> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - pub(crate) struct QueryableState { pub(crate) id: Id, pub(crate) key_expr: WireExpr<'static>, diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index cae58514ff..1ec20209aa 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -64,12 +64,16 @@ pub trait ValueBuilderTrait { fn value>(self, value: T) -> Self; } -#[derive(Debug)] -pub struct OpPut; -#[derive(Debug)] -pub struct OpDelete; -#[derive(Debug)] -pub struct OpAny; +pub mod op { + #[derive(Debug)] + pub struct Put; + #[derive(Debug)] + pub struct Delete; + #[derive(Debug)] + pub struct Error; + #[derive(Debug)] + pub struct Any; +} #[derive(Debug)] pub struct SampleBuilder { @@ -77,11 +81,11 @@ pub struct SampleBuilder { _t: PhantomData, } -impl SampleBuilder { +impl SampleBuilder { pub fn put( key_expr: IntoKeyExpr, payload: IntoPayload, - ) -> SampleBuilder + ) -> SampleBuilder where IntoKeyExpr: Into>, IntoPayload: Into, @@ -99,13 +103,13 @@ impl SampleBuilder { #[cfg(feature = "unstable")] attachment: None, }, - _t: PhantomData::, + _t: PhantomData::, } } } -impl SampleBuilder { - pub fn delete(key_expr: IntoKeyExpr) -> SampleBuilder +impl SampleBuilder { + pub fn delete(key_expr: IntoKeyExpr) -> SampleBuilder where IntoKeyExpr: Into>, { @@ -122,7 +126,7 @@ impl SampleBuilder { #[cfg(feature = "unstable")] attachment: None, }, - _t: PhantomData::, + _t: PhantomData::, } } } @@ -141,6 +145,14 @@ impl SampleBuilder { _t: PhantomData::, } } + + // Allows to change qos as a whole of [`Sample`] + pub fn qos(self, qos: QoS) -> Self { + Self { + sample: Sample { qos, ..self.sample }, + _t: PhantomData::, + } + } } impl TimestampBuilderTrait for SampleBuilder { @@ -179,15 +191,6 @@ impl SampleBuilderTrait for SampleBuilder { } } -impl SampleBuilder { - pub fn qos(self, qos: QoS) -> Self { - Self { - sample: Sample { qos, ..self.sample }, - _t: PhantomData::, - } - } -} - impl QoSBuilderTrait for SampleBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { let qos: QoSBuilder = self.sample.qos.into(); @@ -215,14 +218,14 @@ impl QoSBuilderTrait for SampleBuilder { } } -impl ValueBuilderTrait for SampleBuilder { +impl ValueBuilderTrait for SampleBuilder { fn encoding>(self, encoding: T) -> Self { Self { sample: Sample { encoding: encoding.into(), ..self.sample }, - _t: PhantomData::, + _t: PhantomData::, } } fn payload>(self, payload: T) -> Self { @@ -231,7 +234,7 @@ impl ValueBuilderTrait for SampleBuilder { payload: payload.into(), ..self.sample }, - _t: PhantomData::, + _t: PhantomData::, } } fn value>(self, value: T) -> Self { @@ -242,21 +245,21 @@ impl ValueBuilderTrait for SampleBuilder { encoding, ..self.sample }, - _t: PhantomData::, + _t: PhantomData::, } } } -impl From for SampleBuilder { +impl From for SampleBuilder { fn from(sample: Sample) -> Self { SampleBuilder { sample, - _t: PhantomData::, + _t: PhantomData::, } } } -impl TryFrom for SampleBuilder { +impl TryFrom for SampleBuilder { type Error = zresult::Error; fn try_from(sample: Sample) -> Result { if sample.kind != SampleKind::Put { @@ -264,12 +267,12 @@ impl TryFrom for SampleBuilder { } Ok(SampleBuilder { sample, - _t: PhantomData::, + _t: PhantomData::, }) } } -impl TryFrom for SampleBuilder { +impl TryFrom for SampleBuilder { type Error = zresult::Error; fn try_from(sample: Sample) -> Result { if sample.kind != SampleKind::Delete { @@ -277,7 +280,7 @@ impl TryFrom for SampleBuilder { } Ok(SampleBuilder { sample, - _t: PhantomData::, + _t: PhantomData::, }) } } From bca953da3de684228241cbd1c8bc8641945b2b84 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 29 Mar 2024 18:36:58 +0100 Subject: [PATCH 086/598] Reorg sample files --- zenoh/src/sample.rs | 655 -------------------------------------------- 1 file changed, 655 deletions(-) delete mode 100644 zenoh/src/sample.rs diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs deleted file mode 100644 index 2b71105d5e..0000000000 --- a/zenoh/src/sample.rs +++ /dev/null @@ -1,655 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! Sample primitives -use crate::encoding::Encoding; -use crate::payload::Payload; -use crate::prelude::{KeyExpr, Value}; -use crate::sample::builder::{QoSBuilderTrait, ValueBuilderTrait}; -use crate::time::Timestamp; -use crate::Priority; -#[zenoh_macros::unstable] -use serde::Serialize; -use std::{convert::TryFrom, fmt}; -use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::network::declare::ext::QoSType; -use zenoh_protocol::{core::CongestionControl, zenoh}; - -pub mod builder; - -pub type SourceSn = u64; - -/// The locality of samples to be received by subscribers or targeted by publishers. -#[zenoh_macros::unstable] -#[derive(Clone, Copy, Debug, Default, Serialize, PartialEq, Eq)] -pub enum Locality { - SessionLocal, - Remote, - #[default] - Any, -} -#[cfg(not(feature = "unstable"))] -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] -pub(crate) enum Locality { - SessionLocal, - Remote, - #[default] - Any, -} - -#[derive(Debug, Clone, PartialEq, Eq, Default)] -pub(crate) struct DataInfo { - pub kind: SampleKind, - pub encoding: Option, - pub timestamp: Option, - pub source_id: Option, - pub source_sn: Option, - pub qos: QoS, -} - -pub(crate) trait DataInfoIntoSample { - fn into_sample( - self, - key_expr: IntoKeyExpr, - payload: IntoPayload, - #[cfg(feature = "unstable")] attachment: Option, - ) -> Sample - where - IntoKeyExpr: Into>, - IntoPayload: Into; -} - -impl DataInfoIntoSample for DataInfo { - // This function is for internal use only. - // Technically it may create invalid sample (e.g. a delete sample with a payload and encoding) - // The test for it is intentionally not added to avoid inserting extra "if" into hot path. - // The correctness of the data should be ensured by the caller. - #[inline] - fn into_sample( - self, - key_expr: IntoKeyExpr, - payload: IntoPayload, - #[cfg(feature = "unstable")] attachment: Option, - ) -> Sample - where - IntoKeyExpr: Into>, - IntoPayload: Into, - { - Sample { - key_expr: key_expr.into(), - payload: payload.into(), - kind: self.kind, - encoding: self.encoding.unwrap_or_default(), - timestamp: self.timestamp, - qos: self.qos, - #[cfg(feature = "unstable")] - source_info: SourceInfo { - source_id: self.source_id, - source_sn: self.source_sn, - }, - #[cfg(feature = "unstable")] - attachment, - } - } -} - -impl DataInfoIntoSample for Option { - #[inline] - fn into_sample( - self, - key_expr: IntoKeyExpr, - payload: IntoPayload, - #[cfg(feature = "unstable")] attachment: Option, - ) -> Sample - where - IntoKeyExpr: Into>, - IntoPayload: Into, - { - if let Some(data_info) = self { - data_info.into_sample(key_expr, payload, attachment) - } else { - Sample { - key_expr: key_expr.into(), - payload: payload.into(), - kind: SampleKind::Put, - encoding: Encoding::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment, - } - } - } -} - -/// Informations on the source of a zenoh [`Sample`]. -#[zenoh_macros::unstable] -#[derive(Debug, Clone)] -pub struct SourceInfo { - /// The [`EntityGlobalId`] of the zenoh entity that published the concerned [`Sample`]. - pub source_id: Option, - /// The sequence number of the [`Sample`] from the source. - pub source_sn: Option, -} - -#[test] -#[cfg(feature = "unstable")] -fn source_info_stack_size() { - use crate::{ - sample::{SourceInfo, SourceSn}, - ZenohId, - }; - - assert_eq!(std::mem::size_of::(), 16); - assert_eq!(std::mem::size_of::>(), 17); - assert_eq!(std::mem::size_of::>(), 16); - assert_eq!(std::mem::size_of::(), 17 + 16 + 7); -} - -#[zenoh_macros::unstable] -impl SourceInfo { - pub(crate) fn empty() -> Self { - SourceInfo { - source_id: None, - source_sn: None, - } - } - pub(crate) fn is_empty(&self) -> bool { - self.source_id.is_none() && self.source_sn.is_none() - } -} - -impl From for Option { - fn from(source_info: SourceInfo) -> Option { - if source_info.is_empty() { - None - } else { - Some(zenoh::put::ext::SourceInfoType { - id: source_info.source_id.unwrap_or_default(), - sn: source_info.source_sn.unwrap_or_default() as u32, - }) - } - } -} - -#[zenoh_macros::unstable] -impl From for SourceInfo { - fn from(data_info: DataInfo) -> Self { - SourceInfo { - source_id: data_info.source_id, - source_sn: data_info.source_sn, - } - } -} - -#[zenoh_macros::unstable] -impl From> for SourceInfo { - fn from(data_info: Option) -> Self { - match data_info { - Some(data_info) => data_info.into(), - None => SourceInfo::empty(), - } - } -} - -mod attachment { - #[zenoh_macros::unstable] - use zenoh_buffers::{ - reader::{HasReader, Reader}, - writer::HasWriter, - ZBuf, ZBufReader, ZSlice, - }; - #[zenoh_macros::unstable] - use zenoh_codec::{RCodec, WCodec, Zenoh080}; - #[zenoh_macros::unstable] - use zenoh_protocol::zenoh::ext::AttachmentType; - - /// A builder for [`Attachment`] - #[zenoh_macros::unstable] - #[derive(Debug)] - pub struct AttachmentBuilder { - pub(crate) inner: Vec, - } - #[zenoh_macros::unstable] - impl Default for AttachmentBuilder { - fn default() -> Self { - Self::new() - } - } - #[zenoh_macros::unstable] - impl AttachmentBuilder { - pub fn new() -> Self { - Self { inner: Vec::new() } - } - fn _insert(&mut self, key: &[u8], value: &[u8]) { - let codec = Zenoh080; - let mut writer = self.inner.writer(); - codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure - codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure - } - /// Inserts a key-value pair to the attachment. - /// - /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. - pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( - &mut self, - key: &Key, - value: &Value, - ) { - self._insert(key.as_ref(), value.as_ref()) - } - pub fn build(self) -> Attachment { - Attachment { - inner: self.inner.into(), - } - } - } - #[zenoh_macros::unstable] - impl From for Attachment { - fn from(value: AttachmentBuilder) -> Self { - Attachment { - inner: value.inner.into(), - } - } - } - #[zenoh_macros::unstable] - impl From for Option { - fn from(value: AttachmentBuilder) -> Self { - if value.inner.is_empty() { - None - } else { - Some(value.into()) - } - } - } - - #[zenoh_macros::unstable] - #[derive(Clone)] - pub struct Attachment { - pub(crate) inner: ZBuf, - } - #[zenoh_macros::unstable] - impl Default for Attachment { - fn default() -> Self { - Self::new() - } - } - #[zenoh_macros::unstable] - impl From for AttachmentType { - fn from(this: Attachment) -> Self { - AttachmentType { buffer: this.inner } - } - } - #[zenoh_macros::unstable] - impl From> for Attachment { - fn from(this: AttachmentType) -> Self { - Attachment { inner: this.buffer } - } - } - #[zenoh_macros::unstable] - impl Attachment { - pub fn new() -> Self { - Self { - inner: ZBuf::empty(), - } - } - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - pub fn len(&self) -> usize { - self.iter().count() - } - pub fn iter(&self) -> AttachmentIterator { - self.into_iter() - } - fn _get(&self, key: &[u8]) -> Option { - self.iter() - .find_map(|(k, v)| (k.as_slice() == key).then_some(v)) - } - pub fn get>(&self, key: &Key) -> Option { - self._get(key.as_ref()) - } - fn _insert(&mut self, key: &[u8], value: &[u8]) { - let codec = Zenoh080; - let mut writer = self.inner.writer(); - codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure - codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure - } - /// Inserts a key-value pair to the attachment. - /// - /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. - /// - /// [`Attachment`] is not very efficient at inserting, so if you wish to perform multiple inserts, it's generally better to [`Attachment::extend`] after performing the inserts on an [`AttachmentBuilder`] - pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( - &mut self, - key: &Key, - value: &Value, - ) { - self._insert(key.as_ref(), value.as_ref()) - } - fn _extend(&mut self, with: Self) -> &mut Self { - for slice in with.inner.zslices().cloned() { - self.inner.push_zslice(slice); - } - self - } - pub fn extend(&mut self, with: impl Into) -> &mut Self { - let with = with.into(); - self._extend(with) - } - } - #[zenoh_macros::unstable] - pub struct AttachmentIterator<'a> { - reader: ZBufReader<'a>, - } - #[zenoh_macros::unstable] - impl<'a> core::iter::IntoIterator for &'a Attachment { - type Item = (ZSlice, ZSlice); - type IntoIter = AttachmentIterator<'a>; - fn into_iter(self) -> Self::IntoIter { - AttachmentIterator { - reader: self.inner.reader(), - } - } - } - #[zenoh_macros::unstable] - impl core::fmt::Debug for Attachment { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{{")?; - for (key, value) in self { - let key = key.as_slice(); - let value = value.as_slice(); - match core::str::from_utf8(key) { - Ok(key) => write!(f, "\"{key}\": ")?, - Err(_) => { - write!(f, "0x")?; - for byte in key { - write!(f, "{byte:02X}")? - } - } - } - match core::str::from_utf8(value) { - Ok(value) => write!(f, "\"{value}\", ")?, - Err(_) => { - write!(f, "0x")?; - for byte in value { - write!(f, "{byte:02X}")? - } - write!(f, ", ")? - } - } - } - write!(f, "}}") - } - } - #[zenoh_macros::unstable] - impl<'a> core::iter::Iterator for AttachmentIterator<'a> { - type Item = (ZSlice, ZSlice); - fn next(&mut self) -> Option { - let key = Zenoh080.read(&mut self.reader).ok()?; - let value = Zenoh080.read(&mut self.reader).ok()?; - Some((key, value)) - } - fn size_hint(&self) -> (usize, Option) { - ( - (self.reader.remaining() != 0) as usize, - Some(self.reader.remaining() / 2), - ) - } - } - #[zenoh_macros::unstable] - impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for AttachmentBuilder { - fn from_iter>(iter: T) -> Self { - let codec = Zenoh080; - let mut buffer: Vec = Vec::new(); - let mut writer = buffer.writer(); - for (key, value) in iter { - codec.write(&mut writer, key).unwrap(); // Infallible, barring allocation failures - codec.write(&mut writer, value).unwrap(); // Infallible, barring allocation failures - } - Self { inner: buffer } - } - } - #[zenoh_macros::unstable] - impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for Attachment { - fn from_iter>(iter: T) -> Self { - AttachmentBuilder::from_iter(iter).into() - } - } -} - -/// The kind of a `Sample`. -#[repr(u8)] -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] -pub enum SampleKind { - /// if the `Sample` was issued by a `put` operation. - #[default] - Put = 0, - /// if the `Sample` was issued by a `delete` operation. - Delete = 1, -} - -impl fmt::Display for SampleKind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - SampleKind::Put => write!(f, "PUT"), - SampleKind::Delete => write!(f, "DELETE"), - } - } -} - -impl TryFrom for SampleKind { - type Error = u64; - fn try_from(kind: u64) -> Result { - match kind { - 0 => Ok(SampleKind::Put), - 1 => Ok(SampleKind::Delete), - _ => Err(kind), - } - } -} - -#[zenoh_macros::unstable] -pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; - -/// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. -pub struct SampleFields { - pub key_expr: KeyExpr<'static>, - pub payload: Payload, - pub kind: SampleKind, - pub encoding: Encoding, - pub timestamp: Option, - pub express: bool, - pub priority: Priority, - pub congestion_control: CongestionControl, - #[cfg(feature = "unstable")] - pub source_info: SourceInfo, - #[cfg(feature = "unstable")] - pub attachment: Option, -} - -impl From for SampleFields { - fn from(sample: Sample) -> Self { - SampleFields { - key_expr: sample.key_expr, - payload: sample.payload, - kind: sample.kind, - encoding: sample.encoding, - timestamp: sample.timestamp, - express: sample.qos.express(), - priority: sample.qos.priority(), - congestion_control: sample.qos.congestion_control(), - #[cfg(feature = "unstable")] - source_info: sample.source_info, - #[cfg(feature = "unstable")] - attachment: sample.attachment, - } - } -} - -/// A zenoh sample. -#[non_exhaustive] -#[derive(Clone, Debug)] -pub struct Sample { - pub(crate) key_expr: KeyExpr<'static>, - pub(crate) payload: Payload, - pub(crate) kind: SampleKind, - pub(crate) encoding: Encoding, - pub(crate) timestamp: Option, - pub(crate) qos: QoS, - - #[cfg(feature = "unstable")] - pub(crate) source_info: SourceInfo, - - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, -} - -impl Sample { - /// Gets the key expression on which this Sample was published. - #[inline] - pub fn key_expr(&self) -> &KeyExpr<'static> { - &self.key_expr - } - - /// Gets the payload of this Sample. - #[inline] - pub fn payload(&self) -> &Payload { - &self.payload - } - - /// Gets the kind of this Sample. - #[inline] - pub fn kind(&self) -> SampleKind { - self.kind - } - - /// Gets the encoding of this sample - #[inline] - pub fn encoding(&self) -> &Encoding { - &self.encoding - } - - /// Gets the timestamp of this Sample. - #[inline] - pub fn timestamp(&self) -> Option<&Timestamp> { - self.timestamp.as_ref() - } - - /// Gets the quality of service settings this Sample was sent with. - #[inline] - pub fn qos(&self) -> &QoS { - &self.qos - } - - /// Gets infos on the source of this Sample. - #[zenoh_macros::unstable] - #[inline] - pub fn source_info(&self) -> &SourceInfo { - &self.source_info - } - - /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. - #[zenoh_macros::unstable] - #[inline] - pub fn attachment(&self) -> Option<&Attachment> { - self.attachment.as_ref() - } -} - -impl From for Value { - fn from(sample: Sample) -> Self { - Value::new(sample.payload).encoding(sample.encoding) - } -} - -/// Structure containing quality of service data -#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] -pub struct QoS { - inner: QoSType, -} - -#[derive(Debug)] -pub struct QoSBuilder(QoS); - -impl From for QoSBuilder { - fn from(qos: QoS) -> Self { - QoSBuilder(qos) - } -} - -impl From for QoS { - fn from(builder: QoSBuilder) -> Self { - builder.0 - } -} - -impl QoSBuilderTrait for QoSBuilder { - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - let mut inner = self.0.inner; - inner.set_congestion_control(congestion_control); - Self(QoS { inner }) - } - - fn priority(self, priority: Priority) -> Self { - let mut inner = self.0.inner; - inner.set_priority(priority.into()); - Self(QoS { inner }) - } - - fn express(self, is_express: bool) -> Self { - let mut inner = self.0.inner; - inner.set_is_express(is_express); - Self(QoS { inner }) - } -} - -impl QoS { - /// Gets priority of the message. - pub fn priority(&self) -> Priority { - match Priority::try_from(self.inner.get_priority()) { - Ok(p) => p, - Err(e) => { - log::trace!( - "Failed to convert priority: {}; replacing with default value", - e.to_string() - ); - Priority::default() - } - } - } - - /// Gets congestion control of the message. - pub fn congestion_control(&self) -> CongestionControl { - self.inner.get_congestion_control() - } - - /// Gets express flag value. If `true`, the message is not batched during transmission, in order to reduce latency. - pub fn express(&self) -> bool { - self.inner.is_express() - } -} - -impl From for QoS { - fn from(qos: QoSType) -> Self { - QoS { inner: qos } - } -} - -impl From for QoSType { - fn from(qos: QoS) -> Self { - qos.inner - } -} From 9d1a5409541831926e70420fdf89006a67b1020c Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 29 Mar 2024 18:37:23 +0100 Subject: [PATCH 087/598] Remove error op struct in SampleBuilder --- zenoh/src/sample/builder.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 1ec20209aa..7f438d1381 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -70,8 +70,6 @@ pub mod op { #[derive(Debug)] pub struct Delete; #[derive(Debug)] - pub struct Error; - #[derive(Debug)] pub struct Any; } From 7904d099ba3d069ecc51b76241ef136678a5e005 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 29 Mar 2024 20:29:43 +0100 Subject: [PATCH 088/598] Add forgotten file --- zenoh/src/sample/mod.rs | 655 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 655 insertions(+) create mode 100644 zenoh/src/sample/mod.rs diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs new file mode 100644 index 0000000000..2b71105d5e --- /dev/null +++ b/zenoh/src/sample/mod.rs @@ -0,0 +1,655 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Sample primitives +use crate::encoding::Encoding; +use crate::payload::Payload; +use crate::prelude::{KeyExpr, Value}; +use crate::sample::builder::{QoSBuilderTrait, ValueBuilderTrait}; +use crate::time::Timestamp; +use crate::Priority; +#[zenoh_macros::unstable] +use serde::Serialize; +use std::{convert::TryFrom, fmt}; +use zenoh_protocol::core::EntityGlobalId; +use zenoh_protocol::network::declare::ext::QoSType; +use zenoh_protocol::{core::CongestionControl, zenoh}; + +pub mod builder; + +pub type SourceSn = u64; + +/// The locality of samples to be received by subscribers or targeted by publishers. +#[zenoh_macros::unstable] +#[derive(Clone, Copy, Debug, Default, Serialize, PartialEq, Eq)] +pub enum Locality { + SessionLocal, + Remote, + #[default] + Any, +} +#[cfg(not(feature = "unstable"))] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub(crate) enum Locality { + SessionLocal, + Remote, + #[default] + Any, +} + +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub(crate) struct DataInfo { + pub kind: SampleKind, + pub encoding: Option, + pub timestamp: Option, + pub source_id: Option, + pub source_sn: Option, + pub qos: QoS, +} + +pub(crate) trait DataInfoIntoSample { + fn into_sample( + self, + key_expr: IntoKeyExpr, + payload: IntoPayload, + #[cfg(feature = "unstable")] attachment: Option, + ) -> Sample + where + IntoKeyExpr: Into>, + IntoPayload: Into; +} + +impl DataInfoIntoSample for DataInfo { + // This function is for internal use only. + // Technically it may create invalid sample (e.g. a delete sample with a payload and encoding) + // The test for it is intentionally not added to avoid inserting extra "if" into hot path. + // The correctness of the data should be ensured by the caller. + #[inline] + fn into_sample( + self, + key_expr: IntoKeyExpr, + payload: IntoPayload, + #[cfg(feature = "unstable")] attachment: Option, + ) -> Sample + where + IntoKeyExpr: Into>, + IntoPayload: Into, + { + Sample { + key_expr: key_expr.into(), + payload: payload.into(), + kind: self.kind, + encoding: self.encoding.unwrap_or_default(), + timestamp: self.timestamp, + qos: self.qos, + #[cfg(feature = "unstable")] + source_info: SourceInfo { + source_id: self.source_id, + source_sn: self.source_sn, + }, + #[cfg(feature = "unstable")] + attachment, + } + } +} + +impl DataInfoIntoSample for Option { + #[inline] + fn into_sample( + self, + key_expr: IntoKeyExpr, + payload: IntoPayload, + #[cfg(feature = "unstable")] attachment: Option, + ) -> Sample + where + IntoKeyExpr: Into>, + IntoPayload: Into, + { + if let Some(data_info) = self { + data_info.into_sample(key_expr, payload, attachment) + } else { + Sample { + key_expr: key_expr.into(), + payload: payload.into(), + kind: SampleKind::Put, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment, + } + } + } +} + +/// Informations on the source of a zenoh [`Sample`]. +#[zenoh_macros::unstable] +#[derive(Debug, Clone)] +pub struct SourceInfo { + /// The [`EntityGlobalId`] of the zenoh entity that published the concerned [`Sample`]. + pub source_id: Option, + /// The sequence number of the [`Sample`] from the source. + pub source_sn: Option, +} + +#[test] +#[cfg(feature = "unstable")] +fn source_info_stack_size() { + use crate::{ + sample::{SourceInfo, SourceSn}, + ZenohId, + }; + + assert_eq!(std::mem::size_of::(), 16); + assert_eq!(std::mem::size_of::>(), 17); + assert_eq!(std::mem::size_of::>(), 16); + assert_eq!(std::mem::size_of::(), 17 + 16 + 7); +} + +#[zenoh_macros::unstable] +impl SourceInfo { + pub(crate) fn empty() -> Self { + SourceInfo { + source_id: None, + source_sn: None, + } + } + pub(crate) fn is_empty(&self) -> bool { + self.source_id.is_none() && self.source_sn.is_none() + } +} + +impl From for Option { + fn from(source_info: SourceInfo) -> Option { + if source_info.is_empty() { + None + } else { + Some(zenoh::put::ext::SourceInfoType { + id: source_info.source_id.unwrap_or_default(), + sn: source_info.source_sn.unwrap_or_default() as u32, + }) + } + } +} + +#[zenoh_macros::unstable] +impl From for SourceInfo { + fn from(data_info: DataInfo) -> Self { + SourceInfo { + source_id: data_info.source_id, + source_sn: data_info.source_sn, + } + } +} + +#[zenoh_macros::unstable] +impl From> for SourceInfo { + fn from(data_info: Option) -> Self { + match data_info { + Some(data_info) => data_info.into(), + None => SourceInfo::empty(), + } + } +} + +mod attachment { + #[zenoh_macros::unstable] + use zenoh_buffers::{ + reader::{HasReader, Reader}, + writer::HasWriter, + ZBuf, ZBufReader, ZSlice, + }; + #[zenoh_macros::unstable] + use zenoh_codec::{RCodec, WCodec, Zenoh080}; + #[zenoh_macros::unstable] + use zenoh_protocol::zenoh::ext::AttachmentType; + + /// A builder for [`Attachment`] + #[zenoh_macros::unstable] + #[derive(Debug)] + pub struct AttachmentBuilder { + pub(crate) inner: Vec, + } + #[zenoh_macros::unstable] + impl Default for AttachmentBuilder { + fn default() -> Self { + Self::new() + } + } + #[zenoh_macros::unstable] + impl AttachmentBuilder { + pub fn new() -> Self { + Self { inner: Vec::new() } + } + fn _insert(&mut self, key: &[u8], value: &[u8]) { + let codec = Zenoh080; + let mut writer = self.inner.writer(); + codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure + codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure + } + /// Inserts a key-value pair to the attachment. + /// + /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. + pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( + &mut self, + key: &Key, + value: &Value, + ) { + self._insert(key.as_ref(), value.as_ref()) + } + pub fn build(self) -> Attachment { + Attachment { + inner: self.inner.into(), + } + } + } + #[zenoh_macros::unstable] + impl From for Attachment { + fn from(value: AttachmentBuilder) -> Self { + Attachment { + inner: value.inner.into(), + } + } + } + #[zenoh_macros::unstable] + impl From for Option { + fn from(value: AttachmentBuilder) -> Self { + if value.inner.is_empty() { + None + } else { + Some(value.into()) + } + } + } + + #[zenoh_macros::unstable] + #[derive(Clone)] + pub struct Attachment { + pub(crate) inner: ZBuf, + } + #[zenoh_macros::unstable] + impl Default for Attachment { + fn default() -> Self { + Self::new() + } + } + #[zenoh_macros::unstable] + impl From for AttachmentType { + fn from(this: Attachment) -> Self { + AttachmentType { buffer: this.inner } + } + } + #[zenoh_macros::unstable] + impl From> for Attachment { + fn from(this: AttachmentType) -> Self { + Attachment { inner: this.buffer } + } + } + #[zenoh_macros::unstable] + impl Attachment { + pub fn new() -> Self { + Self { + inner: ZBuf::empty(), + } + } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn len(&self) -> usize { + self.iter().count() + } + pub fn iter(&self) -> AttachmentIterator { + self.into_iter() + } + fn _get(&self, key: &[u8]) -> Option { + self.iter() + .find_map(|(k, v)| (k.as_slice() == key).then_some(v)) + } + pub fn get>(&self, key: &Key) -> Option { + self._get(key.as_ref()) + } + fn _insert(&mut self, key: &[u8], value: &[u8]) { + let codec = Zenoh080; + let mut writer = self.inner.writer(); + codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure + codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure + } + /// Inserts a key-value pair to the attachment. + /// + /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. + /// + /// [`Attachment`] is not very efficient at inserting, so if you wish to perform multiple inserts, it's generally better to [`Attachment::extend`] after performing the inserts on an [`AttachmentBuilder`] + pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( + &mut self, + key: &Key, + value: &Value, + ) { + self._insert(key.as_ref(), value.as_ref()) + } + fn _extend(&mut self, with: Self) -> &mut Self { + for slice in with.inner.zslices().cloned() { + self.inner.push_zslice(slice); + } + self + } + pub fn extend(&mut self, with: impl Into) -> &mut Self { + let with = with.into(); + self._extend(with) + } + } + #[zenoh_macros::unstable] + pub struct AttachmentIterator<'a> { + reader: ZBufReader<'a>, + } + #[zenoh_macros::unstable] + impl<'a> core::iter::IntoIterator for &'a Attachment { + type Item = (ZSlice, ZSlice); + type IntoIter = AttachmentIterator<'a>; + fn into_iter(self) -> Self::IntoIter { + AttachmentIterator { + reader: self.inner.reader(), + } + } + } + #[zenoh_macros::unstable] + impl core::fmt::Debug for Attachment { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{{")?; + for (key, value) in self { + let key = key.as_slice(); + let value = value.as_slice(); + match core::str::from_utf8(key) { + Ok(key) => write!(f, "\"{key}\": ")?, + Err(_) => { + write!(f, "0x")?; + for byte in key { + write!(f, "{byte:02X}")? + } + } + } + match core::str::from_utf8(value) { + Ok(value) => write!(f, "\"{value}\", ")?, + Err(_) => { + write!(f, "0x")?; + for byte in value { + write!(f, "{byte:02X}")? + } + write!(f, ", ")? + } + } + } + write!(f, "}}") + } + } + #[zenoh_macros::unstable] + impl<'a> core::iter::Iterator for AttachmentIterator<'a> { + type Item = (ZSlice, ZSlice); + fn next(&mut self) -> Option { + let key = Zenoh080.read(&mut self.reader).ok()?; + let value = Zenoh080.read(&mut self.reader).ok()?; + Some((key, value)) + } + fn size_hint(&self) -> (usize, Option) { + ( + (self.reader.remaining() != 0) as usize, + Some(self.reader.remaining() / 2), + ) + } + } + #[zenoh_macros::unstable] + impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for AttachmentBuilder { + fn from_iter>(iter: T) -> Self { + let codec = Zenoh080; + let mut buffer: Vec = Vec::new(); + let mut writer = buffer.writer(); + for (key, value) in iter { + codec.write(&mut writer, key).unwrap(); // Infallible, barring allocation failures + codec.write(&mut writer, value).unwrap(); // Infallible, barring allocation failures + } + Self { inner: buffer } + } + } + #[zenoh_macros::unstable] + impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for Attachment { + fn from_iter>(iter: T) -> Self { + AttachmentBuilder::from_iter(iter).into() + } + } +} + +/// The kind of a `Sample`. +#[repr(u8)] +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] +pub enum SampleKind { + /// if the `Sample` was issued by a `put` operation. + #[default] + Put = 0, + /// if the `Sample` was issued by a `delete` operation. + Delete = 1, +} + +impl fmt::Display for SampleKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SampleKind::Put => write!(f, "PUT"), + SampleKind::Delete => write!(f, "DELETE"), + } + } +} + +impl TryFrom for SampleKind { + type Error = u64; + fn try_from(kind: u64) -> Result { + match kind { + 0 => Ok(SampleKind::Put), + 1 => Ok(SampleKind::Delete), + _ => Err(kind), + } + } +} + +#[zenoh_macros::unstable] +pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; + +/// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. +pub struct SampleFields { + pub key_expr: KeyExpr<'static>, + pub payload: Payload, + pub kind: SampleKind, + pub encoding: Encoding, + pub timestamp: Option, + pub express: bool, + pub priority: Priority, + pub congestion_control: CongestionControl, + #[cfg(feature = "unstable")] + pub source_info: SourceInfo, + #[cfg(feature = "unstable")] + pub attachment: Option, +} + +impl From for SampleFields { + fn from(sample: Sample) -> Self { + SampleFields { + key_expr: sample.key_expr, + payload: sample.payload, + kind: sample.kind, + encoding: sample.encoding, + timestamp: sample.timestamp, + express: sample.qos.express(), + priority: sample.qos.priority(), + congestion_control: sample.qos.congestion_control(), + #[cfg(feature = "unstable")] + source_info: sample.source_info, + #[cfg(feature = "unstable")] + attachment: sample.attachment, + } + } +} + +/// A zenoh sample. +#[non_exhaustive] +#[derive(Clone, Debug)] +pub struct Sample { + pub(crate) key_expr: KeyExpr<'static>, + pub(crate) payload: Payload, + pub(crate) kind: SampleKind, + pub(crate) encoding: Encoding, + pub(crate) timestamp: Option, + pub(crate) qos: QoS, + + #[cfg(feature = "unstable")] + pub(crate) source_info: SourceInfo, + + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, +} + +impl Sample { + /// Gets the key expression on which this Sample was published. + #[inline] + pub fn key_expr(&self) -> &KeyExpr<'static> { + &self.key_expr + } + + /// Gets the payload of this Sample. + #[inline] + pub fn payload(&self) -> &Payload { + &self.payload + } + + /// Gets the kind of this Sample. + #[inline] + pub fn kind(&self) -> SampleKind { + self.kind + } + + /// Gets the encoding of this sample + #[inline] + pub fn encoding(&self) -> &Encoding { + &self.encoding + } + + /// Gets the timestamp of this Sample. + #[inline] + pub fn timestamp(&self) -> Option<&Timestamp> { + self.timestamp.as_ref() + } + + /// Gets the quality of service settings this Sample was sent with. + #[inline] + pub fn qos(&self) -> &QoS { + &self.qos + } + + /// Gets infos on the source of this Sample. + #[zenoh_macros::unstable] + #[inline] + pub fn source_info(&self) -> &SourceInfo { + &self.source_info + } + + /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. + #[zenoh_macros::unstable] + #[inline] + pub fn attachment(&self) -> Option<&Attachment> { + self.attachment.as_ref() + } +} + +impl From for Value { + fn from(sample: Sample) -> Self { + Value::new(sample.payload).encoding(sample.encoding) + } +} + +/// Structure containing quality of service data +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] +pub struct QoS { + inner: QoSType, +} + +#[derive(Debug)] +pub struct QoSBuilder(QoS); + +impl From for QoSBuilder { + fn from(qos: QoS) -> Self { + QoSBuilder(qos) + } +} + +impl From for QoS { + fn from(builder: QoSBuilder) -> Self { + builder.0 + } +} + +impl QoSBuilderTrait for QoSBuilder { + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + let mut inner = self.0.inner; + inner.set_congestion_control(congestion_control); + Self(QoS { inner }) + } + + fn priority(self, priority: Priority) -> Self { + let mut inner = self.0.inner; + inner.set_priority(priority.into()); + Self(QoS { inner }) + } + + fn express(self, is_express: bool) -> Self { + let mut inner = self.0.inner; + inner.set_is_express(is_express); + Self(QoS { inner }) + } +} + +impl QoS { + /// Gets priority of the message. + pub fn priority(&self) -> Priority { + match Priority::try_from(self.inner.get_priority()) { + Ok(p) => p, + Err(e) => { + log::trace!( + "Failed to convert priority: {}; replacing with default value", + e.to_string() + ); + Priority::default() + } + } + } + + /// Gets congestion control of the message. + pub fn congestion_control(&self) -> CongestionControl { + self.inner.get_congestion_control() + } + + /// Gets express flag value. If `true`, the message is not batched during transmission, in order to reduce latency. + pub fn express(&self) -> bool { + self.inner.is_express() + } +} + +impl From for QoS { + fn from(qos: QoSType) -> Self { + QoS { inner: qos } + } +} + +impl From for QoSType { + fn from(qos: QoS) -> Self { + qos.inner + } +} From ab349b2e91ee2fce1b0776526f6bb26af26a3b76 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 1 Apr 2024 11:46:29 +0200 Subject: [PATCH 089/598] support of TryIntoKeyexpr --- zenoh/src/key_expr.rs | 2 +- zenoh/src/queryable.rs | 144 ++++++++++++++++++++++++++++------------ zenoh/src/sample/mod.rs | 6 ++ 3 files changed, 107 insertions(+), 45 deletions(-) diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index aaa1d13724..d2bfb5bcfe 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -185,7 +185,7 @@ impl<'a> KeyExpr<'a> { /// # Safety /// Key Expressions must follow some rules to be accepted by a Zenoh network. /// Messages addressed with invalid key expressions will be dropped. - pub unsafe fn from_str_uncheckend(s: &'a str) -> Self { + pub unsafe fn from_str_unchecked(s: &'a str) -> Self { keyexpr::from_str_unchecked(s).into() } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index c2a5557440..37c3a2303a 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -19,10 +19,9 @@ use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; use crate::sample::builder::{ - op, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, - ValueBuilderTrait, + QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; -use crate::sample::SourceInfo; +use crate::sample::{QoSBuilder, SourceInfo}; use crate::Id; use crate::SessionRef; use crate::Undeclarable; @@ -132,17 +131,23 @@ impl Query { &self, key_expr: TryIntoKeyExpr, payload: IntoPayload, - ) -> ReplySampleBuilder<'_, op::Put> + ) -> ReplyBuilder<'_, 'b, ReplyBuilderPut> where TryIntoKeyExpr: TryInto>, >>::Error: Into, IntoPayload: Into, { - let sample_builder = - SampleBuilder::put(key_expr, payload).qos(response::ext::QoSType::RESPONSE.into()); - ReplySampleBuilder { + ReplyBuilder { query: self, - sample_builder, + key_expr: key_expr.try_into().map_err(Into::into), + qos: response::ext::QoSType::RESPONSE.into(), + kind: ReplyBuilderPut { + payload: payload.into(), + encoding: Encoding::default(), + }, + timestamp: None, + source_info: SourceInfo::empty(), + attachment: None, } } @@ -165,19 +170,22 @@ impl Query { /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), /// replying on a disjoint key expression will result in an error when resolving the reply. #[inline(always)] - pub fn reply_del( + pub fn reply_del<'b, TryIntoKeyExpr>( &self, - key_expr: IntoKeyExpr, - ) -> ReplySampleBuilder<'_, op::Delete> + key_expr: TryIntoKeyExpr, + ) -> ReplyBuilder<'_, 'b, ReplyBuilderDelete> where TryIntoKeyExpr: TryInto>, >>::Error: Into, { - let sample_builder = - SampleBuilder::delete(key_expr).qos(response::ext::QoSType::RESPONSE.into()); - ReplySampleBuilder { + ReplyBuilder { query: self, - sample_builder, + key_expr: key_expr.try_into().map_err(Into::into), + qos: response::ext::QoSType::RESPONSE.into(), + kind: ReplyBuilderDelete, + timestamp: None, + source_info: SourceInfo::empty(), + attachment: None, } } @@ -243,28 +251,45 @@ impl AsyncResolve for ReplySample<'_> { } } -/// A builder returned by [`Query::reply()`](Query::reply) +#[derive(Debug)] +pub struct ReplyBuilderPut { + payload: super::Payload, + encoding: super::Encoding, +} +#[derive(Debug)] +pub struct ReplyBuilderDelete; + +/// A builder returned by [`Query::reply()`](Query::reply) and [`Query::reply_del()`](Query::reply_del) #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct ReplySampleBuilder<'a, T> { +pub struct ReplyBuilder<'a, 'b, T> { query: &'a Query, - sample_builder: SampleBuilder, + key_expr: ZResult>, + kind: T, + timestamp: Option, + qos: QoSBuilder, + + #[cfg(feature = "unstable")] + source_info: SourceInfo, + + #[cfg(feature = "unstable")] + attachment: Option, } -impl TimestampBuilderTrait for ReplySampleBuilder<'_, T> { +impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { fn timestamp>>(self, timestamp: U) -> Self { Self { - sample_builder: self.sample_builder.timestamp(timestamp), + timestamp: timestamp.into(), ..self } } } -impl SampleBuilderTrait for ReplySampleBuilder<'_, T> { +impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { Self { - sample_builder: self.sample_builder.source_info(source_info), + source_info, ..self } } @@ -272,69 +297,100 @@ impl SampleBuilderTrait for ReplySampleBuilder<'_, T> { #[cfg(feature = "unstable")] fn attachment>>(self, attachment: U) -> Self { Self { - sample_builder: self.sample_builder.attachment(attachment), + attachment: attachment.into(), ..self } } } -impl QoSBuilderTrait for ReplySampleBuilder<'_, T> { +impl QoSBuilderTrait for ReplyBuilder<'_, '_, T> { fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self { - sample_builder: self.sample_builder.congestion_control(congestion_control), - ..self - } + let qos = self.qos.congestion_control(congestion_control); + Self { qos, ..self } } fn priority(self, priority: Priority) -> Self { - Self { - sample_builder: self.sample_builder.priority(priority), - ..self - } + let qos = self.qos.priority(priority); + Self { qos, ..self } } fn express(self, is_express: bool) -> Self { - Self { - sample_builder: self.sample_builder.express(is_express), - ..self - } + let qos = self.qos.express(is_express); + Self { qos, ..self } } } -impl ValueBuilderTrait for ReplySampleBuilder<'_, op::Put> { +impl ValueBuilderTrait for ReplyBuilder<'_, '_, ReplyBuilderPut> { fn encoding>(self, encoding: T) -> Self { Self { - sample_builder: self.sample_builder.encoding(encoding), + kind: ReplyBuilderPut { + encoding: encoding.into(), + ..self.kind + }, ..self } } fn payload>(self, payload: T) -> Self { Self { - sample_builder: self.sample_builder.payload(payload), + kind: ReplyBuilderPut { + payload: payload.into(), + ..self.kind + }, ..self } } fn value>(self, value: T) -> Self { let Value { payload, encoding } = value.into(); Self { - sample_builder: self.sample_builder.payload(payload).encoding(encoding), + kind: ReplyBuilderPut { payload, encoding }, ..self } } } -impl<'a, T> Resolvable for ReplySampleBuilder<'a, T> { +impl Resolvable for ReplyBuilder<'_, '_, T> { type To = ZResult<()>; } -impl SyncResolve for ReplySampleBuilder<'_, T> { +impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderPut> { + fn res_sync(self) -> ::To { + let key_expr = self.key_expr?.into_owned(); + let sample = SampleBuilder::put(key_expr, self.kind.payload) + .encoding(self.kind.encoding) + .timestamp(self.timestamp) + .qos(self.qos.into()); + #[cfg(feature = "unstable")] + let sample = sample.source_info(self.source_info); + #[cfg(feature = "unstable")] + let sample = sample.attachment(self.attachment); + self.query._reply_sample(sample.into()) + } +} + +impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderDelete> { fn res_sync(self) -> ::To { - self.query._reply_sample(self.sample_builder.into()) + let key_expr = self.key_expr?.into_owned(); + let sample = SampleBuilder::delete(key_expr) + .timestamp(self.timestamp) + .qos(self.qos.into()); + #[cfg(feature = "unstable")] + let sample = sample.source_info(self.source_info); + #[cfg(feature = "unstable")] + let sample = sample.attachment(self.attachment); + self.query._reply_sample(sample.into()) + } +} + +impl AsyncResolve for ReplyBuilder<'_, '_, ReplyBuilderPut> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) } } -impl<'a, T> AsyncResolve for ReplySampleBuilder<'a, T> { +impl AsyncResolve for ReplyBuilder<'_, '_, ReplyBuilderDelete> { type Future = Ready; fn res_async(self) -> Self::Future { diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index 2b71105d5e..be80f8277e 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -590,6 +590,12 @@ impl From for QoSBuilder { } } +impl From for QoSBuilder { + fn from(qos: QoSType) -> Self { + QoSBuilder(QoS { inner: qos }) + } +} + impl From for QoS { fn from(builder: QoSBuilder) -> Self { builder.0 From e4c4be1d4c5dd5b02ed539a57eba324c6e5b2a07 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 1 Apr 2024 11:53:44 +0200 Subject: [PATCH 090/598] removed "op" namespace to align naming with ReplyBuilder --- zenoh/src/sample/builder.rs | 46 ++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 7f438d1381..fd697e942a 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -64,14 +64,12 @@ pub trait ValueBuilderTrait { fn value>(self, value: T) -> Self; } -pub mod op { - #[derive(Debug)] - pub struct Put; - #[derive(Debug)] - pub struct Delete; - #[derive(Debug)] - pub struct Any; -} +#[derive(Debug)] +pub struct SampleBuilderPut; +#[derive(Debug)] +pub struct SampleBuilderDelete; +#[derive(Debug)] +pub struct SampleBuilderAny; #[derive(Debug)] pub struct SampleBuilder { @@ -79,11 +77,11 @@ pub struct SampleBuilder { _t: PhantomData, } -impl SampleBuilder { +impl SampleBuilder { pub fn put( key_expr: IntoKeyExpr, payload: IntoPayload, - ) -> SampleBuilder + ) -> SampleBuilder where IntoKeyExpr: Into>, IntoPayload: Into, @@ -101,13 +99,13 @@ impl SampleBuilder { #[cfg(feature = "unstable")] attachment: None, }, - _t: PhantomData::, + _t: PhantomData::, } } } -impl SampleBuilder { - pub fn delete(key_expr: IntoKeyExpr) -> SampleBuilder +impl SampleBuilder { + pub fn delete(key_expr: IntoKeyExpr) -> SampleBuilder where IntoKeyExpr: Into>, { @@ -124,7 +122,7 @@ impl SampleBuilder { #[cfg(feature = "unstable")] attachment: None, }, - _t: PhantomData::, + _t: PhantomData::, } } } @@ -216,14 +214,14 @@ impl QoSBuilderTrait for SampleBuilder { } } -impl ValueBuilderTrait for SampleBuilder { +impl ValueBuilderTrait for SampleBuilder { fn encoding>(self, encoding: T) -> Self { Self { sample: Sample { encoding: encoding.into(), ..self.sample }, - _t: PhantomData::, + _t: PhantomData::, } } fn payload>(self, payload: T) -> Self { @@ -232,7 +230,7 @@ impl ValueBuilderTrait for SampleBuilder { payload: payload.into(), ..self.sample }, - _t: PhantomData::, + _t: PhantomData::, } } fn value>(self, value: T) -> Self { @@ -243,21 +241,21 @@ impl ValueBuilderTrait for SampleBuilder { encoding, ..self.sample }, - _t: PhantomData::, + _t: PhantomData::, } } } -impl From for SampleBuilder { +impl From for SampleBuilder { fn from(sample: Sample) -> Self { SampleBuilder { sample, - _t: PhantomData::, + _t: PhantomData::, } } } -impl TryFrom for SampleBuilder { +impl TryFrom for SampleBuilder { type Error = zresult::Error; fn try_from(sample: Sample) -> Result { if sample.kind != SampleKind::Put { @@ -265,12 +263,12 @@ impl TryFrom for SampleBuilder { } Ok(SampleBuilder { sample, - _t: PhantomData::, + _t: PhantomData::, }) } } -impl TryFrom for SampleBuilder { +impl TryFrom for SampleBuilder { type Error = zresult::Error; fn try_from(sample: Sample) -> Result { if sample.kind != SampleKind::Delete { @@ -278,7 +276,7 @@ impl TryFrom for SampleBuilder { } Ok(SampleBuilder { sample, - _t: PhantomData::, + _t: PhantomData::, }) } } From d631f761620b377cd9460f275c4f6deeef61e996 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 1 Apr 2024 13:25:59 +0200 Subject: [PATCH 091/598] publication builder shortened --- zenoh/src/publication.rs | 145 ++++++++---------------------------- zenoh/src/sample/builder.rs | 8 +- zenoh/src/session.rs | 15 ++-- 3 files changed, 46 insertions(+), 122 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 1d62375cdd..69715a0867 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -41,33 +41,15 @@ use zenoh_result::ZResult; /// The kind of congestion control. pub use zenoh_protocol::core::CongestionControl; -/// A builder for initializing a [`delete`](crate::Session::delete) operation. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::publication::CongestionControl; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// session -/// .delete("key/expression") -/// .res() -/// .await -/// .unwrap(); -/// # } -/// ``` -pub struct DeleteBuilder<'a, 'b> { - pub(crate) publisher: PublisherBuilder<'a, 'b>, - pub(crate) timestamp: Option, - #[cfg(feature = "unstable")] - pub(crate) source_info: SourceInfo, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, +#[derive(Debug, Clone)] +pub struct PublicationBuilderPut { + pub(crate) payload: Payload, + pub(crate) encoding: Encoding, } +#[derive(Debug, Clone)] +pub struct PublicationBuilderDelete; -/// A builder for initializing a [`put`](crate::Session::put) operation. +/// A builder for initializing a [`put`](crate::Session::put) and [`delete`](crate::Session::delete) operations /// /// # Examples /// ``` @@ -89,10 +71,9 @@ pub struct DeleteBuilder<'a, 'b> { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug, Clone)] -pub struct PutBuilder<'a, 'b> { +pub struct PublicationBuilder<'a, 'b, T> { pub(crate) publisher: PublisherBuilder<'a, 'b>, - pub(crate) payload: Payload, - pub(crate) encoding: Encoding, + pub(crate) kind: T, pub(crate) timestamp: Option, #[cfg(feature = "unstable")] pub(crate) source_info: SourceInfo, @@ -100,7 +81,7 @@ pub struct PutBuilder<'a, 'b> { pub(crate) attachment: Option, } -impl QoSBuilderTrait for PutBuilder<'_, '_> { +impl QoSBuilderTrait for PublicationBuilder<'_, '_, T> { #[inline] fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self { @@ -124,58 +105,8 @@ impl QoSBuilderTrait for PutBuilder<'_, '_> { } } -impl QoSBuilderTrait for DeleteBuilder<'_, '_> { - #[inline] - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self { - publisher: self.publisher.congestion_control(congestion_control), - ..self - } - } - #[inline] - fn priority(self, priority: Priority) -> Self { - Self { - publisher: self.publisher.priority(priority), - ..self - } - } - #[inline] - fn express(self, is_express: bool) -> Self { - Self { - publisher: self.publisher.express(is_express), - ..self - } - } -} - -impl TimestampBuilderTrait for PutBuilder<'_, '_> { - fn timestamp>>(self, timestamp: T) -> Self { - Self { - timestamp: timestamp.into(), - ..self - } - } -} - -impl SampleBuilderTrait for PutBuilder<'_, '_> { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - source_info, - ..self - } - } - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { - Self { - attachment: attachment.into(), - ..self - } - } -} - -impl TimestampBuilderTrait for DeleteBuilder<'_, '_> { - fn timestamp>>(self, timestamp: T) -> Self { +impl TimestampBuilderTrait for PublicationBuilder<'_, '_, T> { + fn timestamp>>(self, timestamp: TS) -> Self { Self { timestamp: timestamp.into(), ..self @@ -183,7 +114,7 @@ impl TimestampBuilderTrait for DeleteBuilder<'_, '_> { } } -impl SampleBuilderTrait for DeleteBuilder<'_, '_> { +impl SampleBuilderTrait for PublicationBuilder<'_, '_, T> { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { Self { @@ -192,7 +123,7 @@ impl SampleBuilderTrait for DeleteBuilder<'_, '_> { } } #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { + fn attachment>>(self, attachment: TA) -> Self { Self { attachment: attachment.into(), ..self @@ -200,10 +131,13 @@ impl SampleBuilderTrait for DeleteBuilder<'_, '_> { } } -impl ValueBuilderTrait for PutBuilder<'_, '_> { +impl ValueBuilderTrait for PublicationBuilder<'_, '_, PublicationBuilderPut> { fn encoding>(self, encoding: T) -> Self { Self { - encoding: encoding.into(), + kind: PublicationBuilderPut { + encoding: encoding.into(), + ..self.kind + }, ..self } } @@ -213,32 +147,23 @@ impl ValueBuilderTrait for PutBuilder<'_, '_> { IntoPayload: Into, { Self { - payload: payload.into(), + kind: PublicationBuilderPut { + payload: payload.into(), + ..self.kind + }, ..self } } fn value>(self, value: T) -> Self { let Value { payload, encoding } = value.into(); Self { - payload, - encoding, + kind: PublicationBuilderPut { payload, encoding }, ..self } } } -impl PutBuilder<'_, '_> { - /// Restrict the matching subscribers that will receive the published data - /// to the ones that have the given [`Locality`](crate::prelude::Locality). - #[zenoh_macros::unstable] - #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { - self.publisher = self.publisher.allowed_destination(destination); - self - } -} - -impl DeleteBuilder<'_, '_> { +impl PublicationBuilder<'_, '_, T> { /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] @@ -249,23 +174,19 @@ impl DeleteBuilder<'_, '_> { } } -impl Resolvable for PutBuilder<'_, '_> { - type To = ZResult<()>; -} - -impl Resolvable for DeleteBuilder<'_, '_> { +impl Resolvable for PublicationBuilder<'_, '_, T> { type To = ZResult<()>; } -impl SyncResolve for PutBuilder<'_, '_> { +impl SyncResolve for PublicationBuilder<'_, '_, PublicationBuilderPut> { #[inline] fn res_sync(self) -> ::To { let publisher = self.publisher.create_one_shot_publisher()?; resolve_put( &publisher, - self.payload, + self.kind.payload, SampleKind::Put, - self.encoding, + self.kind.encoding, self.timestamp, #[cfg(feature = "unstable")] self.source_info, @@ -275,7 +196,7 @@ impl SyncResolve for PutBuilder<'_, '_> { } } -impl SyncResolve for DeleteBuilder<'_, '_> { +impl SyncResolve for PublicationBuilder<'_, '_, PublicationBuilderDelete> { #[inline] fn res_sync(self) -> ::To { let publisher = self.publisher.create_one_shot_publisher()?; @@ -293,7 +214,7 @@ impl SyncResolve for DeleteBuilder<'_, '_> { } } -impl AsyncResolve for PutBuilder<'_, '_> { +impl AsyncResolve for PublicationBuilder<'_, '_, PublicationBuilderPut> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -301,7 +222,7 @@ impl AsyncResolve for PutBuilder<'_, '_> { } } -impl AsyncResolve for DeleteBuilder<'_, '_> { +impl AsyncResolve for PublicationBuilder<'_, '_, PublicationBuilderDelete> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -1038,7 +959,7 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { self } - // internal function for `PutBuilder` and `DeleteBuilder` + // internal function for perfroming the publication fn create_one_shot_publisher(self) -> ZResult> { Ok(Publisher { session: self.session, diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index fd697e942a..295451abc1 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -64,14 +64,14 @@ pub trait ValueBuilderTrait { fn value>(self, value: T) -> Self; } -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct SampleBuilderPut; -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct SampleBuilderDelete; -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct SampleBuilderAny; -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct SampleBuilder { sample: Sample, _t: PhantomData, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 4d71f58ffa..b1b059d163 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -705,17 +705,19 @@ impl Session { &'a self, key_expr: TryIntoKeyExpr, payload: IntoPayload, - ) -> PutBuilder<'a, 'b> + ) -> PublicationBuilder<'a, 'b, PublicationBuilderPut> where TryIntoKeyExpr: TryInto>, >>::Error: Into, IntoPayload: Into, { - PutBuilder { + PublicationBuilder { publisher: self.declare_publisher(key_expr), - payload: payload.into(), + kind: PublicationBuilderPut { + payload: payload.into(), + encoding: Encoding::default(), + }, timestamp: None, - encoding: Encoding::default(), #[cfg(feature = "unstable")] attachment: None, #[cfg(feature = "unstable")] @@ -743,13 +745,14 @@ impl Session { pub fn delete<'a, 'b: 'a, TryIntoKeyExpr>( &'a self, key_expr: TryIntoKeyExpr, - ) -> DeleteBuilder<'a, 'b> + ) -> PublicationBuilder<'a, 'b, PublicationBuilderDelete> where TryIntoKeyExpr: TryInto>, >>::Error: Into, { - DeleteBuilder { + PublicationBuilder { publisher: self.declare_publisher(key_expr), + kind: PublicationBuilderDelete, timestamp: None, #[cfg(feature = "unstable")] attachment: None, From 9b8aaa69d190547a65084cb5c0be605aa706b67f Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 1 Apr 2024 13:45:01 +0200 Subject: [PATCH 092/598] parametrized publication builder --- zenoh/src/publication.rs | 24 ++++++++++++------------ zenoh/src/session.rs | 4 ++-- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 69715a0867..dd1818d842 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -71,8 +71,8 @@ pub struct PublicationBuilderDelete; /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug, Clone)] -pub struct PublicationBuilder<'a, 'b, T> { - pub(crate) publisher: PublisherBuilder<'a, 'b>, +pub struct PublicationBuilder { + pub(crate) publisher: P, pub(crate) kind: T, pub(crate) timestamp: Option, #[cfg(feature = "unstable")] @@ -81,7 +81,7 @@ pub struct PublicationBuilder<'a, 'b, T> { pub(crate) attachment: Option, } -impl QoSBuilderTrait for PublicationBuilder<'_, '_, T> { +impl QoSBuilderTrait for PublicationBuilder, T> { #[inline] fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self { @@ -105,7 +105,7 @@ impl QoSBuilderTrait for PublicationBuilder<'_, '_, T> { } } -impl TimestampBuilderTrait for PublicationBuilder<'_, '_, T> { +impl TimestampBuilderTrait for PublicationBuilder { fn timestamp>>(self, timestamp: TS) -> Self { Self { timestamp: timestamp.into(), @@ -114,7 +114,7 @@ impl TimestampBuilderTrait for PublicationBuilder<'_, '_, T> { } } -impl SampleBuilderTrait for PublicationBuilder<'_, '_, T> { +impl SampleBuilderTrait for PublicationBuilder { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { Self { @@ -131,7 +131,7 @@ impl SampleBuilderTrait for PublicationBuilder<'_, '_, T> { } } -impl ValueBuilderTrait for PublicationBuilder<'_, '_, PublicationBuilderPut> { +impl

ValueBuilderTrait for PublicationBuilder { fn encoding>(self, encoding: T) -> Self { Self { kind: PublicationBuilderPut { @@ -163,7 +163,7 @@ impl ValueBuilderTrait for PublicationBuilder<'_, '_, PublicationBuilderPut> { } } -impl PublicationBuilder<'_, '_, T> { +impl PublicationBuilder, T> { /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] @@ -174,11 +174,11 @@ impl PublicationBuilder<'_, '_, T> { } } -impl Resolvable for PublicationBuilder<'_, '_, T> { +impl Resolvable for PublicationBuilder { type To = ZResult<()>; } -impl SyncResolve for PublicationBuilder<'_, '_, PublicationBuilderPut> { +impl SyncResolve for PublicationBuilder, PublicationBuilderPut> { #[inline] fn res_sync(self) -> ::To { let publisher = self.publisher.create_one_shot_publisher()?; @@ -196,7 +196,7 @@ impl SyncResolve for PublicationBuilder<'_, '_, PublicationBuilderPut> { } } -impl SyncResolve for PublicationBuilder<'_, '_, PublicationBuilderDelete> { +impl SyncResolve for PublicationBuilder, PublicationBuilderDelete> { #[inline] fn res_sync(self) -> ::To { let publisher = self.publisher.create_one_shot_publisher()?; @@ -214,7 +214,7 @@ impl SyncResolve for PublicationBuilder<'_, '_, PublicationBuilderDelete> { } } -impl AsyncResolve for PublicationBuilder<'_, '_, PublicationBuilderPut> { +impl AsyncResolve for PublicationBuilder, PublicationBuilderPut> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -222,7 +222,7 @@ impl AsyncResolve for PublicationBuilder<'_, '_, PublicationBuilderPut> { } } -impl AsyncResolve for PublicationBuilder<'_, '_, PublicationBuilderDelete> { +impl AsyncResolve for PublicationBuilder, PublicationBuilderDelete> { type Future = Ready; fn res_async(self) -> Self::Future { diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index b1b059d163..63cc0bb7fa 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -705,7 +705,7 @@ impl Session { &'a self, key_expr: TryIntoKeyExpr, payload: IntoPayload, - ) -> PublicationBuilder<'a, 'b, PublicationBuilderPut> + ) -> PublicationBuilder, PublicationBuilderPut> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -745,7 +745,7 @@ impl Session { pub fn delete<'a, 'b: 'a, TryIntoKeyExpr>( &'a self, key_expr: TryIntoKeyExpr, - ) -> PublicationBuilder<'a, 'b, PublicationBuilderDelete> + ) -> PublicationBuilder, PublicationBuilderDelete> where TryIntoKeyExpr: TryInto>, >>::Error: Into, From bbe07f78294418e5f1d2aa95499987f827e3510c Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 1 Apr 2024 14:03:02 +0200 Subject: [PATCH 093/598] removed PutPublication, DeletePublication --- zenoh/src/publication.rs | 144 +++++---------------------------------- 1 file changed, 18 insertions(+), 126 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index dd1818d842..41e2b0fa04 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -408,14 +408,19 @@ impl<'a> Publisher<'a> { /// # } /// ``` #[inline] - pub fn put(&self, payload: IntoPayload) -> PutPublication + pub fn put( + &self, + payload: IntoPayload, + ) -> PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> where IntoPayload: Into, { - PutPublication { + PublicationBuilder { publisher: self, - payload: payload.into(), - encoding: Encoding::ZENOH_BYTES, + kind: PublicationBuilderPut { + payload: payload.into(), + encoding: Encoding::ZENOH_BYTES, + }, timestamp: None, #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), @@ -437,9 +442,10 @@ impl<'a> Publisher<'a> { /// publisher.delete().res().await.unwrap(); /// # } /// ``` - pub fn delete(&self) -> DeletePublication { - DeletePublication { + pub fn delete(&self) -> PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { + PublicationBuilder { publisher: self, + kind: PublicationBuilderDelete, timestamp: None, #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), @@ -674,127 +680,13 @@ impl Drop for Publisher<'_> { } } -/// A [`Resolvable`] returned by [`Publisher::put()`](Publisher::put), -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -pub struct PutPublication<'a> { - publisher: &'a Publisher<'a>, - payload: Payload, - encoding: Encoding, - timestamp: Option, - #[cfg(feature = "unstable")] - pub(crate) source_info: SourceInfo, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, -} - -/// A [`Resolvable`] returned by [`Publisher::delete()`](Publisher::delete) -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -pub struct DeletePublication<'a> { - publisher: &'a Publisher<'a>, - timestamp: Option, - #[cfg(feature = "unstable")] - pub(crate) source_info: SourceInfo, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, -} - -impl TimestampBuilderTrait for PutPublication<'_> { - fn timestamp>>(self, timestamp: T) -> Self { - Self { - timestamp: timestamp.into(), - ..self - } - } -} - -impl SampleBuilderTrait for PutPublication<'_> { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - source_info, - ..self - } - } - - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { - Self { - attachment: attachment.into(), - ..self - } - } -} - -impl ValueBuilderTrait for PutPublication<'_> { - fn encoding>(self, encoding: T) -> Self { - Self { - encoding: encoding.into(), - ..self - } - } - - fn payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { - Self { - payload: payload.into(), - ..self - } - } - - fn value>(self, value: T) -> Self { - let Value { payload, encoding } = value.into(); - Self { - payload, - encoding, - ..self - } - } -} - -impl TimestampBuilderTrait for DeletePublication<'_> { - fn timestamp>>(self, timestamp: T) -> Self { - Self { - timestamp: timestamp.into(), - ..self - } - } -} - -impl SampleBuilderTrait for DeletePublication<'_> { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - source_info, - ..self - } - } - - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { - Self { - attachment: attachment.into(), - ..self - } - } -} - -impl Resolvable for PutPublication<'_> { - type To = ZResult<()>; -} - -impl Resolvable for DeletePublication<'_> { - type To = ZResult<()>; -} - -impl SyncResolve for PutPublication<'_> { +impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { fn res_sync(self) -> ::To { resolve_put( self.publisher, - self.payload, + self.kind.payload, SampleKind::Put, - self.encoding, + self.kind.encoding, self.timestamp, #[cfg(feature = "unstable")] self.source_info, @@ -804,7 +696,7 @@ impl SyncResolve for PutPublication<'_> { } } -impl SyncResolve for DeletePublication<'_> { +impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { fn res_sync(self) -> ::To { resolve_put( self.publisher, @@ -820,7 +712,7 @@ impl SyncResolve for DeletePublication<'_> { } } -impl AsyncResolve for PutPublication<'_> { +impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -828,7 +720,7 @@ impl AsyncResolve for PutPublication<'_> { } } -impl AsyncResolve for DeletePublication<'_> { +impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { type Future = Ready; fn res_async(self) -> Self::Future { From 4d0f6e52d07c9c0208430b454f8982044f2e0409 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 1 Apr 2024 15:32:30 +0200 Subject: [PATCH 094/598] removed extra uses --- examples/examples/z_ping.rs | 1 - examples/examples/z_pong.rs | 1 - examples/examples/z_pub.rs | 1 - examples/examples/z_pub_shm_thr.rs | 1 - examples/examples/z_pub_thr.rs | 1 - .../zenoh-plugin-rest/examples/z_serve_sse.rs | 1 - .../src/replica/align_queryable.rs | 2 - .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 53 +++----- zenoh-ext/src/group.rs | 1 - zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/publication.rs | 65 +++++---- zenoh/src/queryable.rs | 126 +++++++++--------- zenoh/tests/session.rs | 1 - zenoh/tests/unicity.rs | 1 - 15 files changed, 115 insertions(+), 144 deletions(-) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 59bcaddadc..a57c937e48 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -16,7 +16,6 @@ use std::time::{Duration, Instant}; use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index e0fa079629..baa5683f62 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -15,7 +15,6 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index b6a1ddc0d8..8cd3c4edba 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -15,7 +15,6 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::SampleBuilderTrait; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 86429e8ab7..c8a33f98fa 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -15,7 +15,6 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh::shm::SharedMemoryManager; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 78d54111a8..4354ad2e68 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -16,7 +16,6 @@ use clap::Parser; use std::convert::TryInto; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index c353826fab..bb76005d6e 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -15,7 +15,6 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh::{config::Config, key_expr::keyexpr}; const HTML: &str = r#" diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 729572601c..1ce6a1cb16 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -20,8 +20,6 @@ use std::str; use std::str::FromStr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::TimestampBuilderTrait; -use zenoh::sample::builder::ValueBuilderTrait; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 5121f0b445..64d5cfa1cd 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -21,7 +21,7 @@ use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::{SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; +use zenoh::sample::builder::SampleBuilder; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index feebfb588a..06c5882408 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -23,14 +23,13 @@ use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::buffer::SplitBuffer; use zenoh::buffers::ZBuf; -use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; -use zenoh::sample::builder::{SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; +use zenoh::sample::builder::SampleBuilder; use zenoh::sample::{Sample, SampleKind}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; -use zenoh::{Result as ZResult, Session, SessionDeclarations}; +use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; @@ -296,31 +295,25 @@ impl StorageService { ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store: Sample = match self + let sample_to_store: Sample = if let Some(update) = self .ovderriding_wild_update(&k, sample.timestamp().unwrap()) .await { - Some(Update { - kind: SampleKind::Put, - data, - }) => { - let Value { - payload, encoding, .. - } = data.value; - SampleBuilder::put(KeyExpr::from(k.clone()), payload) - .encoding(encoding) - .timestamp(data.timestamp) - .into() + match update.kind { + SampleKind::Put => { + SampleBuilder::put(KeyExpr::from(k.clone()), update.data.value.payload) + .encoding(update.data.value.encoding) + .timestamp(update.data.timestamp) + .into() + } + SampleKind::Delete => SampleBuilder::delete(KeyExpr::from(k.clone())) + .timestamp(update.data.timestamp) + .into(), } - Some(Update { - kind: SampleKind::Delete, - data, - }) => SampleBuilder::delete(KeyExpr::from(k.clone())) - .timestamp(data.timestamp) - .into(), - None => SampleBuilder::from(sample.clone()) + } else { + SampleBuilder::from(sample.clone()) .keyexpr(k.clone()) - .into(), + .into() }; let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { @@ -520,12 +513,9 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let Value { - payload, encoding, .. - } = entry.value; if let Err(e) = q - .reply(key.clone(), payload) - .encoding(encoding) + .reply(key.clone(), entry.value.payload) + .encoding(entry.value.encoding) .timestamp(entry.timestamp) .res() .await @@ -555,12 +545,9 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let Value { - payload, encoding, .. - } = entry.value; if let Err(e) = q - .reply(q.key_expr().clone(), payload) - .encoding(encoding) + .reply(q.key_expr().clone(), entry.value.payload) + .encoding(entry.value.encoding) .timestamp(entry.timestamp) .res() .await diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 2075ea9472..8a7823ed72 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -29,7 +29,6 @@ use zenoh::payload::PayloadReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh::Error as ZError; use zenoh::Result as ZResult; use zenoh::Session; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 4e9b46854d..d749a94ed9 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -20,7 +20,7 @@ use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample::builder::{SampleBuilder, TimestampBuilderTrait}; +use zenoh::sample::builder::SampleBuilder; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 41e2b0fa04..64fa5b49c6 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -15,9 +15,6 @@ //! Publishing primitives. use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::builder::{ - QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, -}; #[zenoh_macros::unstable] use crate::sample::Attachment; use crate::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; @@ -105,29 +102,14 @@ impl QoSBuilderTrait for PublicationBuilder, T> { } } -impl TimestampBuilderTrait for PublicationBuilder { - fn timestamp>>(self, timestamp: TS) -> Self { - Self { - timestamp: timestamp.into(), - ..self - } - } -} - -impl SampleBuilderTrait for PublicationBuilder { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - source_info, - ..self - } - } - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: TA) -> Self { - Self { - attachment: attachment.into(), - ..self - } +impl PublicationBuilder, T> { + /// Restrict the matching subscribers that will receive the published data + /// to the ones that have the given [`Locality`](crate::prelude::Locality). + #[zenoh_macros::unstable] + #[inline] + pub fn allowed_destination(mut self, destination: Locality) -> Self { + self.publisher = self.publisher.allowed_destination(destination); + self } } @@ -163,14 +145,29 @@ impl

ValueBuilderTrait for PublicationBuilder { } } -impl PublicationBuilder, T> { - /// Restrict the matching subscribers that will receive the published data - /// to the ones that have the given [`Locality`](crate::prelude::Locality). - #[zenoh_macros::unstable] - #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { - self.publisher = self.publisher.allowed_destination(destination); - self +impl SampleBuilderTrait for PublicationBuilder { + #[cfg(feature = "unstable")] + fn source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info, + ..self + } + } + #[cfg(feature = "unstable")] + fn attachment>>(self, attachment: TA) -> Self { + Self { + attachment: attachment.into(), + ..self + } + } +} + +impl TimestampBuilderTrait for PublicationBuilder { + fn timestamp>>(self, timestamp: TS) -> Self { + Self { + timestamp: timestamp.into(), + ..self + } } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 37c3a2303a..0696fcbe33 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -18,9 +18,7 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::builder::{ - QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, -}; +use crate::sample::builder::SampleBuilder; use crate::sample::{QoSBuilder, SourceInfo}; use crate::Id; use crate::SessionRef; @@ -287,17 +285,17 @@ impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { + fn attachment>>(self, attachment: U) -> Self { Self { - source_info, + attachment: attachment.into(), ..self } } #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: U) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { - attachment: attachment.into(), + source_info, ..self } } @@ -382,6 +380,63 @@ impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderDelete> { } } +impl Query { + fn _reply_sample(&self, sample: Sample) -> ZResult<()> { + if !self._accepts_any_replies().unwrap_or(false) + && !self.key_expr().intersects(&sample.key_expr) + { + bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.key_expr()) + } + #[cfg(not(feature = "unstable"))] + let ext_sinfo = None; + #[cfg(feature = "unstable")] + let ext_sinfo = sample.source_info.into(); + self.inner.primitives.send_response(Response { + rid: self.inner.qid, + wire_expr: WireExpr { + scope: 0, + suffix: std::borrow::Cow::Owned(sample.key_expr.into()), + mapping: Mapping::Sender, + }, + payload: ResponseBody::Reply(zenoh::Reply { + consolidation: zenoh::Consolidation::DEFAULT, + ext_unknown: vec![], + payload: match sample.kind { + SampleKind::Put => ReplyBody::Put(Put { + timestamp: sample.timestamp, + encoding: sample.encoding.into(), + ext_sinfo, + #[cfg(feature = "shared-memory")] + ext_shm: None, + #[cfg(feature = "unstable")] + ext_attachment: sample.attachment.map(|a| a.into()), + #[cfg(not(feature = "unstable"))] + ext_attachment: None, + ext_unknown: vec![], + payload: sample.payload.into(), + }), + SampleKind::Delete => ReplyBody::Del(Del { + timestamp: sample.timestamp, + ext_sinfo, + #[cfg(feature = "unstable")] + ext_attachment: sample.attachment.map(|a| a.into()), + #[cfg(not(feature = "unstable"))] + ext_attachment: None, + ext_unknown: vec![], + }), + }, + }), + ext_qos: sample.qos.into(), + ext_tstamp: None, + ext_respid: Some(response::ext::ResponderIdType { + zid: self.inner.zid, + eid: self.eid, + }), + }); + Ok(()) + } +} + impl AsyncResolve for ReplyBuilder<'_, '_, ReplyBuilderPut> { type Future = Ready; @@ -467,63 +522,6 @@ impl<'a> AsyncResolve for ReplyErrBuilder<'a> { } } -impl Query { - fn _reply_sample(&self, sample: Sample) -> ZResult<()> { - if !self._accepts_any_replies().unwrap_or(false) - && !self.key_expr().intersects(&sample.key_expr) - { - bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.key_expr()) - } - #[cfg(not(feature = "unstable"))] - let ext_sinfo = None; - #[cfg(feature = "unstable")] - let ext_sinfo = sample.source_info.into(); - self.inner.primitives.send_response(Response { - rid: self.inner.qid, - wire_expr: WireExpr { - scope: 0, - suffix: std::borrow::Cow::Owned(sample.key_expr.into()), - mapping: Mapping::Sender, - }, - payload: ResponseBody::Reply(zenoh::Reply { - consolidation: zenoh::Consolidation::DEFAULT, - ext_unknown: vec![], - payload: match sample.kind { - SampleKind::Put => ReplyBody::Put(Put { - timestamp: sample.timestamp, - encoding: sample.encoding.into(), - ext_sinfo, - #[cfg(feature = "shared-memory")] - ext_shm: None, - #[cfg(feature = "unstable")] - ext_attachment: sample.attachment.map(|a| a.into()), - #[cfg(not(feature = "unstable"))] - ext_attachment: None, - ext_unknown: vec![], - payload: sample.payload.into(), - }), - SampleKind::Delete => ReplyBody::Del(Del { - timestamp: sample.timestamp, - ext_sinfo, - #[cfg(feature = "unstable")] - ext_attachment: sample.attachment.map(|a| a.into()), - #[cfg(not(feature = "unstable"))] - ext_attachment: None, - ext_unknown: vec![], - }), - }, - }), - ext_qos: sample.qos.into(), - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid: self.inner.zid, - eid: self.eid, - }), - }); - Ok(()) - } -} - pub(crate) struct QueryableState { pub(crate) id: Id, pub(crate) key_expr: WireExpr<'static>, diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 0518316be9..8c2d2e9937 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -15,7 +15,6 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index a71a0a8034..f34704fb7e 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -16,7 +16,6 @@ use std::sync::Arc; use std::time::Duration; use tokio::runtime::Handle; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); From 23931f92d5f9c321d8a0247a1379cc76b0275def Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 1 Apr 2024 15:48:02 +0200 Subject: [PATCH 095/598] more cleanup --- plugins/zenoh-plugin-rest/src/lib.rs | 1 - zenoh/src/query.rs | 1 - zenoh/src/sample/builder.rs | 2 ++ zenoh/src/sample/mod.rs | 8 +++++++- zenoh/src/session.rs | 1 + zenoh/tests/attachments.rs | 2 +- 6 files changed, 11 insertions(+), 4 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index f78c541eff..43c3f33776 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -34,7 +34,6 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; -use zenoh::sample::builder::ValueBuilderTrait; use zenoh::selector::TIME_RANGE_KEY; use zenoh::Session; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 9f96db4f4b..cb1116130d 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -15,7 +15,6 @@ //! Query primitives. use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; -use crate::sample::builder::{QoSBuilderTrait, SampleBuilderTrait, ValueBuilderTrait}; #[zenoh_macros::unstable] use crate::sample::Attachment; use crate::sample::QoSBuilder; diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 295451abc1..5fab36617d 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -14,9 +14,11 @@ use std::marker::PhantomData; +#[cfg(feature = "unstable")] use crate::sample::Attachment; use crate::sample::QoS; use crate::sample::QoSBuilder; +#[cfg(feature = "unstable")] use crate::sample::SourceInfo; use crate::Encoding; use crate::KeyExpr; diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index be80f8277e..6e457578a3 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -117,7 +117,12 @@ impl DataInfoIntoSample for Option { IntoPayload: Into, { if let Some(data_info) = self { - data_info.into_sample(key_expr, payload, attachment) + data_info.into_sample( + key_expr, + payload, + #[cfg(feature = "unstable")] + attachment, + ) } else { Sample { key_expr: key_expr.into(), @@ -172,6 +177,7 @@ impl SourceInfo { } } +#[zenoh_macros::unstable] impl From for Option { fn from(source_info: SourceInfo) -> Option { if source_info.is_empty() { diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 63cc0bb7fa..c44cb4f817 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -809,6 +809,7 @@ impl Session { #[cfg(feature = "unstable")] attachment: None, handler: DefaultHandler, + #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), } } diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 2725351ab0..9fb99b7cc0 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,7 +14,7 @@ #[cfg(feature = "unstable")] #[test] fn pubsub() { - use zenoh::{prelude::sync::*, sample::builder::SampleBuilderTrait}; + use zenoh::prelude::sync::*; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh From 90923ca30bcc56b5eaf2e194643fe45c5395168e Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 11:43:03 +0200 Subject: [PATCH 096/598] keyexpr.rs in api/ --- zenoh/src/{ => api}/key_expr.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{ => api}/key_expr.rs (100%) diff --git a/zenoh/src/key_expr.rs b/zenoh/src/api/key_expr.rs similarity index 100% rename from zenoh/src/key_expr.rs rename to zenoh/src/api/key_expr.rs From dd2ee5c4add81fb68d892622e189b3914a9d3188 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 11:43:12 +0200 Subject: [PATCH 097/598] keyexpr.rs in api/ --- plugins/zenoh-backend-traits/src/config.rs | 2 +- plugins/zenoh-plugin-rest/examples/z_serve_sse.rs | 1 - .../src/replica/aligner.rs | 1 - zenoh/src/api.rs | 15 +++++++++++++++ zenoh/src/api/key_expr.rs | 4 ++-- zenoh/src/lib.rs | 10 +++++++++- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/prelude.rs | 2 +- zenoh/src/publication.rs | 12 ++++++------ zenoh/src/selector.rs | 2 +- zenoh/src/session.rs | 2 +- zenoh/src/subscriber.rs | 2 +- 12 files changed, 38 insertions(+), 17 deletions(-) create mode 100644 zenoh/src/api.rs diff --git a/plugins/zenoh-backend-traits/src/config.rs b/plugins/zenoh-backend-traits/src/config.rs index d3ddbd43cc..5ab59ebe45 100644 --- a/plugins/zenoh-backend-traits/src/config.rs +++ b/plugins/zenoh-backend-traits/src/config.rs @@ -17,7 +17,7 @@ use schemars::JsonSchema; use serde_json::{Map, Value}; use std::convert::TryFrom; use std::time::Duration; -use zenoh::{key_expr::keyexpr, prelude::OwnedKeyExpr, Result as ZResult}; +use zenoh::{prelude::keyexpr, prelude::OwnedKeyExpr, Result as ZResult}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_result::{bail, zerror, Error}; diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index bb76005d6e..85d730bb41 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -15,7 +15,6 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; -use zenoh::{config::Config, key_expr::keyexpr}; const HTML: &str = r#"

diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 64d5cfa1cd..ca93651e46 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -18,7 +18,6 @@ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; -use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::sample::builder::SampleBuilder; diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs new file mode 100644 index 0000000000..94893aca68 --- /dev/null +++ b/zenoh/src/api.rs @@ -0,0 +1,15 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub(crate) mod key_expr; \ No newline at end of file diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index d2bfb5bcfe..4cbe6409f2 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -323,8 +323,8 @@ impl FromStr for KeyExpr<'static> { Ok(Self(KeyExprInner::Owned(s.parse()?))) } } -impl<'a> From> for OwnedKeyExpr { - fn from(val: super::KeyExpr<'a>) -> Self { +impl<'a> From> for OwnedKeyExpr { + fn from(val: KeyExpr<'a>) -> Self { match val.0 { KeyExprInner::Borrowed(key_expr) | KeyExprInner::BorrowedWire { key_expr, .. } => { key_expr.into() diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index ea212485ec..e62db34c59 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -123,12 +123,20 @@ pub const FEATURES: &str = concat_enabled_features!( ] ); +pub mod key_expr { + pub use crate::api::key_expr::OwnedKeyExpr; + pub mod format { + pub use crate::api::key_expr::format::KeFormat; + } +} + + mod admin; #[macro_use] mod session; pub use session::*; -pub mod key_expr; +mod api; pub(crate) mod net; pub use net::runtime; pub mod selector; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 5ef6b7cdfe..148d9eddab 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -13,7 +13,7 @@ use super::routing::dispatcher::face::Face; use super::Runtime; use crate::encoding::Encoding; -use crate::key_expr::KeyExpr; +use crate::api::key_expr::KeyExpr; use crate::net::primitives::Primitives; use crate::payload::Payload; use crate::plugins::sealed::{self as plugins}; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 850148f506..d059a82afc 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -24,7 +24,7 @@ pub use common::*; pub(crate) mod common { - pub use crate::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; + pub use crate::api::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; pub use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 64fa5b49c6..1f6ad17333 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -878,9 +878,9 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { .try_into() .expect("How did you get a key expression with a length over 2^32!?"); key_expr = match key_expr.0 { - crate::key_expr::KeyExprInner::Borrowed(key_expr) - | crate::key_expr::KeyExprInner::BorrowedWire { key_expr, .. } => { - KeyExpr(crate::key_expr::KeyExprInner::BorrowedWire { + crate::api::key_expr::KeyExprInner::Borrowed(key_expr) + | crate::api::key_expr::KeyExprInner::BorrowedWire { key_expr, .. } => { + KeyExpr(crate::api::key_expr::KeyExprInner::BorrowedWire { key_expr, expr_id, mapping: Mapping::Sender, @@ -888,9 +888,9 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { session_id, }) } - crate::key_expr::KeyExprInner::Owned(key_expr) - | crate::key_expr::KeyExprInner::Wire { key_expr, .. } => { - KeyExpr(crate::key_expr::KeyExprInner::Wire { + crate::api::key_expr::KeyExprInner::Owned(key_expr) + | crate::api::key_expr::KeyExprInner::Wire { key_expr, .. } => { + KeyExpr(crate::api::key_expr::KeyExprInner::Wire { key_expr, expr_id, mapping: Mapping::Sender, diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 2a9a38c02c..df562e196b 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -18,7 +18,7 @@ use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; use zenoh_result::ZResult; pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; -use crate::{prelude::KeyExpr, queryable::Query}; +use crate::{api::key_expr::KeyExpr, queryable::Query}; use std::{ borrow::{Borrow, Cow}, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index c44cb4f817..9af5ee1d5c 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -17,7 +17,7 @@ use crate::config::Notifier; use crate::encoding::Encoding; use crate::handlers::{Callback, DefaultHandler}; use crate::info::*; -use crate::key_expr::KeyExprInner; +use crate::api::key_expr::KeyExprInner; #[zenoh_macros::unstable] use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::primitives::Primitives; diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 60a31a6577..1fc6e82b46 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -14,7 +14,7 @@ //! Subscribing primitives. use crate::handlers::{locked, Callback, DefaultHandler, IntoHandler}; -use crate::key_expr::KeyExpr; +use crate::api::key_expr::KeyExpr; use crate::prelude::Locality; use crate::sample::Sample; use crate::Id; From 948f8e32dfa3583914d5576b43cd2d257ada88dc Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 12:10:20 +0200 Subject: [PATCH 098/598] keyexpr build fix --- examples/examples/z_formats.rs | 8 +++++--- zenoh/src/lib.rs | 9 ++++++++- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/examples/examples/z_formats.rs b/examples/examples/z_formats.rs index 357448143e..69313f0e56 100644 --- a/examples/examples/z_formats.rs +++ b/examples/examples/z_formats.rs @@ -12,9 +12,11 @@ // ZettaScale Zenoh Team, // -use zenoh::prelude::keyexpr; +use zenoh::key_expr::keyexpr; +use zenoh::key_expr::kedefine; +use zenoh::key_expr::keformat; -zenoh::kedefine!( +kedefine!( pub file_format: "user_id/${user_id:*}/file/${file:*/**}", pub(crate) settings_format: "user_id/${user_id:*}/settings/${setting:**}" ); @@ -23,7 +25,7 @@ fn main() { // Formatting let mut formatter = file_format::formatter(); let file = "hi/there"; - let ke = zenoh::keformat!(formatter, user_id = 42, file).unwrap(); + let ke = keformat!(formatter, user_id = 42, file).unwrap(); println!("{formatter:?} => {ke}"); // Parsing let settings_ke = keyexpr::new("user_id/30/settings/dark_mode").unwrap(); diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e62db34c59..c0bf501cc9 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -124,9 +124,16 @@ pub const FEATURES: &str = concat_enabled_features!( ); pub mod key_expr { + pub use crate::api::key_expr::keyexpr; pub use crate::api::key_expr::OwnedKeyExpr; + pub use crate::api::key_expr::kedefine; + pub use crate::api::key_expr::keformat; + // keyexpr format macro support pub mod format { - pub use crate::api::key_expr::format::KeFormat; + pub use crate::api::key_expr::format::*; + pub mod macro_support { + pub use crate::api::key_expr::format::macro_support::*; + } } } From 5bd3e99fb0a5955c6b83679fe3ee4de2a40e168c Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 14:24:57 +0200 Subject: [PATCH 099/598] session move unfinished --- zenoh/src/{ => api}/session.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{ => api}/session.rs (100%) diff --git a/zenoh/src/session.rs b/zenoh/src/api/session.rs similarity index 100% rename from zenoh/src/session.rs rename to zenoh/src/api/session.rs From 2378d5c3f9eb16a6bd6a815bc1478f705f848484 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 14:25:06 +0200 Subject: [PATCH 100/598] session move unfinished --- plugins/zenoh-plugin-example/src/lib.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 4 +- .../src/backends_mgt.rs | 2 +- .../zenoh-plugin-storage-manager/src/lib.rs | 4 +- .../src/replica/align_queryable.rs | 2 +- .../src/replica/aligner.rs | 2 +- .../src/replica/mod.rs | 2 +- .../src/replica/storage.rs | 378 ++++++++++-------- .../src/storages_mgt.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh-ext/src/publication_cache.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh-ext/src/session_ext.rs | 2 +- zenoh/src/admin.rs | 4 +- zenoh/src/api.rs | 3 +- zenoh/src/api/key_expr.rs | 2 +- zenoh/src/api/session.rs | 156 ++++++++ zenoh/src/handlers.rs | 3 +- zenoh/src/info.rs | 2 +- zenoh/src/lib.rs | 171 +------- zenoh/src/liveliness.rs | 2 +- zenoh/src/publication.rs | 8 +- zenoh/src/queryable.rs | 4 +- zenoh/src/subscriber.rs | 6 +- zenoh/tests/qos.rs | 2 +- 25 files changed, 416 insertions(+), 353 deletions(-) diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index ad254278e3..5615ce68af 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -144,7 +144,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { env_logger::init(); // create a zenoh Session that shares the same Runtime than zenohd - let session = zenoh::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).res().await.unwrap(); // the HasMap used as a storage by this example of storage plugin let mut stored: HashMap = HashMap::new(); diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 43c3f33776..49c58f5074 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -35,7 +35,7 @@ use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; use zenoh::selector::TIME_RANGE_KEY; -use zenoh::Session; +use zenoh::session::Session; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, zerror, ZResult}; @@ -490,7 +490,7 @@ pub async fn run(runtime: Runtime, conf: Config) -> ZResult<()> { let _ = env_logger::try_init(); let zid = runtime.zid().to_string(); - let session = zenoh::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).res().await.unwrap(); let mut app = Server::with_state((Arc::new(session), zid)); app.with( diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index 90a6ae6250..dcce49f5da 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -14,7 +14,7 @@ use super::storages_mgt::*; use flume::Sender; use std::sync::Arc; -use zenoh::Session; +use zenoh::session::Session; use zenoh_backend_traits::config::StorageConfig; use zenoh_backend_traits::{Capability, VolumeInstance}; use zenoh_result::ZResult; diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 91df2f108d..78a9814179 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -30,7 +30,7 @@ use storages_mgt::StorageMessage; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::sync::*; use zenoh::runtime::Runtime; -use zenoh::Session; +use zenoh::session::Session; use zenoh_backend_traits::config::ConfigDiff; use zenoh_backend_traits::config::PluginConfig; use zenoh_backend_traits::config::StorageConfig; @@ -114,7 +114,7 @@ impl StorageRuntimeInner { let plugins_manager = PluginsManager::dynamic(lib_loader.clone(), BACKEND_LIB_PREFIX) .declare_static_plugin::(); - let session = Arc::new(zenoh::init(runtime.clone()).res_sync()?); + let session = Arc::new(zenoh::session::init(runtime.clone()).res_sync()?); // After this moment result should be only Ok. Failure of loading of one voulme or storage should not affect others. diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 1ce6a1cb16..8654927f9f 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -21,7 +21,7 @@ use std::str::FromStr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; -use zenoh::Session; +use zenoh::session::Session; pub struct AlignQueryable { session: Arc, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index ca93651e46..9c54bcf461 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -22,7 +22,7 @@ use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::sample::builder::SampleBuilder; use zenoh::time::Timestamp; -use zenoh::Session; +use zenoh::session::Session; pub struct Aligner { session: Arc, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 5dda032029..9a4fd35a11 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -29,7 +29,7 @@ use urlencoding::encode; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; -use zenoh::Session; +use zenoh::session::Session; use zenoh_backend_traits::config::{ReplicaConfig, StorageConfig}; pub mod align_queryable; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 06c5882408..69ecf9477c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -19,19 +19,15 @@ use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; use std::collections::{HashMap, HashSet}; -use std::str::{self, FromStr}; +use std::str::FromStr; use std::time::{SystemTime, UNIX_EPOCH}; -use zenoh::buffers::buffer::SplitBuffer; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; -use zenoh::query::{ConsolidationMode, QueryTarget}; -use zenoh::sample::builder::SampleBuilder; -use zenoh::sample::{Sample, SampleKind}; +use zenoh::query::ConsolidationMode; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; -use zenoh::value::Value; -use zenoh::{Result as ZResult, Session}; +use zenoh::{Result as ZResult, session::Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; -use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; +use zenoh_backend_traits::{Capability, History, Persistence, Storage, StorageInsertionResult, StoredData}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; @@ -42,15 +38,148 @@ use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer}; pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; +#[derive(Clone, Debug)] +pub enum StorageSampleKind { + Put(Value), + Delete, +} + +#[derive(Clone, Debug)] +pub struct StorageSample { + pub key_expr: KeyExpr<'static>, + pub timestamp: Timestamp, + pub kind: StorageSampleKind, +} + +impl From for StorageSample { + fn from(sample: Sample) -> Self { + let timestamp = *sample.timestamp().unwrap_or(&new_reception_timestamp()); + // TODO: add API for disassembly of Sample + let key_expr = sample.key_expr().clone(); + let payload = sample.payload().clone(); + let encoding = sample.encoding().clone(); + let kind = match sample.kind() { + SampleKind::Put => StorageSampleKind::Put(Value::new(payload).with_encoding(encoding)), + SampleKind::Delete => StorageSampleKind::Delete, + }; + StorageSample { + key_expr, + timestamp, + kind, + } + } +} + #[derive(Clone)] -struct Update { - kind: SampleKind, - data: StoredData, +enum Update { + Put(StoredData), + Delete(Timestamp), +} + +impl From for Update { + fn from(value: StorageSample) -> Self { + match value.kind { + StorageSampleKind::Put(data) => Update::Put(StoredData { + value: data, + timestamp: value.timestamp, + }), + StorageSampleKind::Delete => Update::Delete(value.timestamp), + } + } +} + +impl Update { + fn timestamp(&self) -> &Timestamp { + match self { + Update::Put(data) => &data.timestamp, + Update::Delete(ts) => ts, + } + } +} + +// implement from String for Update +impl TryFrom for Update { + type Error = zenoh::Error; + + fn try_from(value: String) -> Result { + let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&value)?; + let mut payload = ZBuf::default(); + for slice in result.3 { + payload.push_zslice(slice.to_vec().into()); + } + let value = Value::new(payload).with_encoding(result.2); + let timestamp = Timestamp::from_str(&result.1).map_err(|_| "Error parsing timestamp")?; + if result.0.eq(&(SampleKind::Put).to_string()) { + Ok(Update::Put(StoredData { value, timestamp })) + } else { + Ok(Update::Delete(timestamp)) + } + } +} + +// implement to_string for Update +impl ToString for Update { + fn to_string(&self) -> String { + let result = match self { + Update::Put(data) => ( + SampleKind::Put.to_string(), + data.timestamp.to_string(), + data.value.encoding.to_string(), + data.value.payload.slices().collect::>(), + ), + Update::Delete(ts) => ( + SampleKind::Delete.to_string(), + ts.to_string(), + "".to_string(), + vec![], + ), + }; + serde_json::to_string_pretty(&result).unwrap() + } +} + +trait IntoStorageSample { + fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample + where + IntoKeyExpr: Into>; +} + +impl IntoStorageSample for StoredData { + fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample + where + IntoKeyExpr: Into>, + { + StorageSample { + key_expr: key_expr.into(), + timestamp: self.timestamp, + kind: StorageSampleKind::Put(self.value), + } + } +} + +impl IntoStorageSample for Update { + fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample + where + IntoKeyExpr: Into>, + { + match self { + Update::Put(data) => StorageSample { + key_expr: key_expr.into(), + timestamp: data.timestamp, + kind: StorageSampleKind::Put(data.value), + }, + Update::Delete(ts) => StorageSample { + key_expr: key_expr.into(), + timestamp: ts, + kind: StorageSampleKind::Delete, + }, + } + } } pub struct ReplicationService { pub empty_start: bool, - pub aligner_updates: Receiver, + pub aligner_updates: Receiver, pub log_propagation: Sender<(OwnedKeyExpr, Timestamp)>, } @@ -109,10 +238,11 @@ impl StorageService { let saved_wc = std::fs::read_to_string(zenoh_home().join(WILDCARD_UPDATES_FILENAME)).unwrap(); let saved_wc: HashMap = - serde_json::from_str(&saved_wc).unwrap(); + serde_json::from_str(&saved_wc).unwrap(); // TODO: Remove unwrap let mut wildcard_updates = storage_service.wildcard_updates.write().await; for (k, data) in saved_wc { - wildcard_updates.insert(&k, construct_update(data)); + wildcard_updates.insert(&k, Update::try_from(data).unwrap()); + // TODO: Remove unwrap } } } @@ -183,7 +313,7 @@ impl StorageService { log::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { - self.process_sample(sample).await; + self.process_sample(sample.into()).await; } }, // on query on key_expr @@ -223,16 +353,15 @@ impl StorageService { select!( // on sample for key_expr sample = storage_sub.recv_async() => { - let sample = match sample { + let mut sample = match sample { Ok(sample) => sample, Err(e) => { log::error!("Error in sample: {}", e); continue; } }; - let timestamp = sample.timestamp().cloned().unwrap_or(new_reception_timestamp()); - let sample = SampleBuilder::from(sample).timestamp(timestamp).into(); - self.process_sample(sample).await; + sample.ensure_timestamp(); + self.process_sample(sample.into()).await; }, // on query on key_expr query = storage_queryable.recv_async() => { @@ -262,61 +391,48 @@ impl StorageService { // The storage should only simply save the key, sample pair while put and retrieve the same during get // the trimming during PUT and GET should be handled by the plugin - async fn process_sample(&self, sample: Sample) { + async fn process_sample(&self, sample: StorageSample) { log::trace!("[STORAGE] Processing sample: {:?}", sample); + // if wildcard, update wildcard_updates - if sample.key_expr().is_wild() { + if sample.key_expr.is_wild() { self.register_wildcard_update(sample.clone()).await; } - let matching_keys = if sample.key_expr().is_wild() { - self.get_matching_keys(sample.key_expr()).await + let matching_keys = if sample.key_expr.is_wild() { + self.get_matching_keys(&sample.key_expr).await } else { - vec![sample.key_expr().clone().into()] + vec![sample.key_expr.clone().into()] }; log::trace!( "The list of keys matching `{}` is : {:?}", - sample.key_expr(), + sample.key_expr, matching_keys ); for k in matching_keys { if !self - .is_deleted(&k.clone(), sample.timestamp().unwrap()) + .is_deleted(&k.clone(), &sample.timestamp) .await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) - && self.is_latest(&k, sample.timestamp().unwrap()).await)) + && self.is_latest(&k, &sample.timestamp).await)) { log::trace!( "Sample `{:?}` identified as neded processing for key {}", sample, - k + &k ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store: Sample = if let Some(update) = self - .ovderriding_wild_update(&k, sample.timestamp().unwrap()) - .await - { - match update.kind { - SampleKind::Put => { - SampleBuilder::put(KeyExpr::from(k.clone()), update.data.value.payload) - .encoding(update.data.value.encoding) - .timestamp(update.data.timestamp) - .into() - } - SampleKind::Delete => SampleBuilder::delete(KeyExpr::from(k.clone())) - .timestamp(update.data.timestamp) - .into(), - } - } else { - SampleBuilder::from(sample.clone()) - .keyexpr(k.clone()) - .into() - }; + let sample_to_store = + match self.ovderriding_wild_update(&k, &sample.timestamp).await { + Some(overriding_update) => overriding_update.into_sample(k.clone()), - let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { + None => sample.clone().into(), + }; + + let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { Ok(stripped) => stripped, Err(e) => { log::error!("{}", e); @@ -324,25 +440,21 @@ impl StorageService { } }; let mut storage = self.storage.lock().await; - let result = match sample.kind() { - SampleKind::Put => { + let result = match sample_to_store.kind { + StorageSampleKind::Put(data) => { storage .put( stripped_key, - Value::new(sample_to_store.payload().clone()) - .encoding(sample_to_store.encoding().clone()), - *sample_to_store.timestamp().unwrap(), + data, + sample_to_store.timestamp, ) .await - } - SampleKind::Delete => { + }, + StorageSampleKind::Delete => { // register a tombstone - self.mark_tombstone(&k, *sample_to_store.timestamp().unwrap()) - .await; - storage - .delete(stripped_key, *sample_to_store.timestamp().unwrap()) - .await - } + self.mark_tombstone(&k, sample_to_store.timestamp).await; + storage.delete(stripped_key, sample_to_store.timestamp).await + }, }; drop(storage); if self.replication.is_some() @@ -354,7 +466,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), *sample_to_store.timestamp().unwrap())); + .send((k.clone(), sample_to_store.timestamp)); match sending { Ok(_) => (), Err(e) => { @@ -385,26 +497,16 @@ impl StorageService { } } - async fn register_wildcard_update(&self, sample: Sample) { + async fn register_wildcard_update(&self, sample: StorageSample) { // @TODO: change into a better store that does incremental writes - let key = sample.key_expr().clone(); + let key = sample.key_expr.clone(); let mut wildcards = self.wildcard_updates.write().await; - let timestamp = *sample.timestamp().unwrap(); - wildcards.insert( - &key, - Update { - kind: sample.kind(), - data: StoredData { - value: Value::from(sample), - timestamp, - }, - }, - ); + wildcards.insert(&key, sample.into()); if self.capability.persistence.eq(&Persistence::Durable) { // flush to disk to makeit durable let mut serialized_data = HashMap::new(); for (k, update) in wildcards.key_value_pairs() { - serialized_data.insert(k, serialize_update(update)); + serialized_data.insert(k, update.to_string()); } if let Err(e) = std::fs::write( zenoh_home().join(WILDCARD_UPDATES_FILENAME), @@ -433,34 +535,36 @@ impl StorageService { let mut update = None; for node in wildcards.intersecting_keys(key_expr) { let weight = wildcards.weight_at(&node); - if weight.is_some() && weight.unwrap().data.timestamp > *ts { - // if the key matches a wild card update, check whether it was saved in storage - // remember that wild card updates change only existing keys - let stripped_key = match self.strip_prefix(&key_expr.into()) { - Ok(stripped) => stripped, - Err(e) => { - log::error!("{}", e); - break; - } - }; - let mut storage = self.storage.lock().await; - match storage.get(stripped_key, "").await { - Ok(stored_data) => { - for entry in stored_data { - if entry.timestamp > *ts { - return None; + if let Some(weight) = weight { + if weight.timestamp() > ts { + // if the key matches a wild card update, check whether it was saved in storage + // remember that wild card updates change only existing keys + let stripped_key = match self.strip_prefix(&key_expr.into()) { + Ok(stripped) => stripped, + Err(e) => { + log::error!("{}", e); + break; + } + }; + let mut storage = self.storage.lock().await; + match storage.get(stripped_key, "").await { + Ok(stored_data) => { + for entry in stored_data { + if entry.timestamp > *ts { + return None; + } } } - } - Err(e) => { - log::warn!( - "Storage '{}' raised an error fetching a query on key {} : {}", - self.name, - key_expr, - e - ); - ts = &weight.unwrap().data.timestamp; - update = Some(weight.unwrap().clone()); + Err(e) => { + log::warn!( + "Storage '{}' raised an error fetching a query on key {} : {}", + self.name, + key_expr, + e + ); + ts = weight.timestamp(); + update = Some(weight.clone()); + } } } } @@ -513,13 +617,8 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - if let Err(e) = q - .reply(key.clone(), entry.value.payload) - .encoding(entry.value.encoding) - .timestamp(entry.timestamp) - .res() - .await - { + let sample = entry.into_sample(key.clone()); + if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -545,13 +644,13 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - if let Err(e) = q - .reply(q.key_expr().clone(), entry.value.payload) - .encoding(entry.value.encoding) - .timestamp(entry.timestamp) - .res() - .await - { + let Value { + payload, encoding, .. + } = entry.value; + let sample = Sample::put(q.key_expr().clone(), payload) + .with_encoding(encoding) + .with_timestamp(entry.timestamp); + if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -658,7 +757,7 @@ impl StorageService { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { - self.process_sample(sample).await; + self.process_sample(sample.into()).await; } Err(e) => log::warn!( "Storage '{}' received an error to align query: {:?}", @@ -671,47 +770,6 @@ impl StorageService { } } -fn serialize_update(update: &Update) -> String { - let Update { - kind, - data: - StoredData { - value: Value { - payload, encoding, .. - }, - timestamp, - }, - } = update; - let zbuf: ZBuf = payload.into(); - - let result = ( - kind.to_string(), - timestamp.to_string(), - encoding.to_string(), - zbuf.slices().collect::>(), - ); - serde_json::to_string_pretty(&result).unwrap() -} - -fn construct_update(data: String) -> Update { - let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); // @TODO: remove the unwrap() - let mut payload = ZBuf::default(); - for slice in result.3 { - payload.push_zslice(slice.to_vec().into()); - } - let value = Value::new(payload).encoding(result.2); - let data = StoredData { - value, - timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() - }; - let kind = if result.0.eq(&(SampleKind::Put).to_string()) { - SampleKind::Put - } else { - SampleKind::Delete - }; - Update { kind, data } -} - // Periodic event cleaning-up data info for old metadata struct GarbageCollectionEvent { config: GarbageCollectionConfig, @@ -743,7 +801,7 @@ impl Timed for GarbageCollectionEvent { let mut to_be_removed = HashSet::new(); for (k, update) in wildcard_updates.key_value_pairs() { - let ts = update.data.timestamp; + let ts = update.timestamp(); if ts.get_time() < &time_limit { // mark key to be removed to_be_removed.insert(k); diff --git a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs index 6de5e2f2ca..8643429a65 100644 --- a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use async_std::sync::Arc; -use zenoh::Session; +use zenoh::session::Session; use zenoh_backend_traits::config::StorageConfig; use zenoh_result::ZResult; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 8a7823ed72..f74d9d547a 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -31,7 +31,7 @@ use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; use zenoh::Error as ZError; use zenoh::Result as ZResult; -use zenoh::Session; +use zenoh::session::Session; use zenoh_result::bail; use zenoh_sync::Condition; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 03f0814e5c..fdba3af231 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -18,7 +18,7 @@ use std::future::Ready; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; use zenoh::subscriber::FlumeSubscriber; -use zenoh::SessionRef; +use zenoh::session::SessionRef; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_result::{bail, ZResult}; use zenoh_util::core::ResolveFuture; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index d749a94ed9..4a9469c835 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -24,7 +24,7 @@ use zenoh::sample::builder::SampleBuilder; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; -use zenoh::SessionRef; +use zenoh::session::SessionRef; use zenoh_core::{zlock, AsyncResolve, Resolvable, SyncResolve}; use crate::ExtractSample; diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index 73fbd7dfc4..2c9826c98b 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -15,7 +15,7 @@ use super::PublicationCacheBuilder; use std::convert::TryInto; use std::sync::Arc; use zenoh::prelude::KeyExpr; -use zenoh::{Session, SessionRef}; +use zenoh::session::{Session, SessionRef}; /// Some extensions to the [`zenoh::Session`](zenoh::Session) pub trait SessionExt<'s, 'a> { diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 3c76ca468a..260617cda2 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -17,14 +17,14 @@ use crate::{ prelude::sync::{KeyExpr, Locality, SampleKind}, queryable::Query, sample::DataInfo, - Payload, Session, ZResult, + Payload, Session }; use std::{ collections::hash_map::DefaultHasher, hash::{Hash, Hasher}, sync::Arc, }; -use zenoh_core::SyncResolve; +use zenoh_core::{Result as ZResult, SyncResolve}; use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; use zenoh_transport::{ TransportEventHandler, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 94893aca68..1e7cec5380 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -12,4 +12,5 @@ // ZettaScale Zenoh Team, // -pub(crate) mod key_expr; \ No newline at end of file +pub(crate) mod key_expr; +pub(crate) mod session; \ No newline at end of file diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 4cbe6409f2..47d3a71c56 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -57,7 +57,7 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; -use crate::{net::primitives::Primitives, prelude::Selector, Session, Undeclarable}; +use crate::{net::primitives::Primitives, prelude::Selector, Session, api::session::Undeclarable}; #[derive(Clone, Debug)] pub(crate) enum KeyExprInner<'a> { diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 9af5ee1d5c..89cd249bdb 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -44,10 +44,12 @@ use crate::Selector; use crate::SourceInfo; use crate::Value; use log::{error, trace, warn}; +use zenoh_core::Resolvable; use std::collections::HashMap; use std::convert::TryFrom; use std::convert::TryInto; use std::fmt; +use std::future::Ready; use std::ops::Deref; use std::sync::atomic::{AtomicU16, Ordering}; use std::sync::Arc; @@ -2595,3 +2597,157 @@ impl crate::net::primitives::EPrimitives for Session { self } } + +/// Open a zenoh [`Session`]. +/// +/// # Arguments +/// +/// * `config` - The [`Config`] for the zenoh session +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::r#async::*; +/// +/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// # } +/// ``` +/// +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use std::str::FromStr; +/// use zenoh::prelude::r#async::*; +/// +/// let mut config = config::peer(); +/// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); +/// config.connect.endpoints.extend("tcp/10.10.10.10:7447,tcp/11.11.11.11:7447".split(',').map(|s|s.parse().unwrap())); +/// +/// let session = zenoh::open(config).res().await.unwrap(); +/// # } +/// ``` +pub fn open(config: TryIntoConfig) -> OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + OpenBuilder { config } +} + +/// A builder returned by [`open`] used to open a zenoh [`Session`]. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::r#async::*; +/// +/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// # } +/// ``` +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +pub struct OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + config: TryIntoConfig, +} + +impl Resolvable for OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + type To = ZResult; +} + +impl SyncResolve for OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + fn res_sync(self) -> ::To { + let config: crate::config::Config = self + .config + .try_into() + .map_err(|e| zerror!("Invalid Zenoh configuration {:?}", &e))?; + Session::new(config).res_sync() + } +} + +impl AsyncResolve for OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +/// Initialize a Session with an existing Runtime. +/// This operation is used by the plugins to share the same Runtime as the router. +#[doc(hidden)] +#[zenoh_macros::unstable] +pub fn init(runtime: Runtime) -> InitBuilder { + InitBuilder { + runtime, + aggregated_subscribers: vec![], + aggregated_publishers: vec![], + } +} + +/// A builder returned by [`init`] and used to initialize a Session with an existing Runtime. +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[doc(hidden)] +#[zenoh_macros::unstable] +pub struct InitBuilder { + runtime: Runtime, + aggregated_subscribers: Vec, + aggregated_publishers: Vec, +} + +#[zenoh_macros::unstable] +impl InitBuilder { + #[inline] + pub fn aggregated_subscribers(mut self, exprs: Vec) -> Self { + self.aggregated_subscribers = exprs; + self + } + + #[inline] + pub fn aggregated_publishers(mut self, exprs: Vec) -> Self { + self.aggregated_publishers = exprs; + self + } +} + +#[zenoh_macros::unstable] +impl Resolvable for InitBuilder { + type To = ZResult; +} + +#[zenoh_macros::unstable] +impl SyncResolve for InitBuilder { + fn res_sync(self) -> ::To { + Ok(Session::init( + self.runtime, + self.aggregated_subscribers, + self.aggregated_publishers, + ) + .res_sync()) + } +} + +#[zenoh_macros::unstable] +impl AsyncResolve for InitBuilder { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} diff --git a/zenoh/src/handlers.rs b/zenoh/src/handlers.rs index c5d2c6bb90..6aecda34b9 100644 --- a/zenoh/src/handlers.rs +++ b/zenoh/src/handlers.rs @@ -13,8 +13,7 @@ // //! Callback handler trait. -use crate::API_DATA_RECEPTION_CHANNEL_SIZE; - +use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; use std::sync::{Arc, Mutex, Weak}; use zenoh_collections::RingBuffer as RingBufferInner; use zenoh_result::ZResult; diff --git a/zenoh/src/info.rs b/zenoh/src/info.rs index 3e0efdf134..1f7a903ba4 100644 --- a/zenoh/src/info.rs +++ b/zenoh/src/info.rs @@ -13,7 +13,7 @@ // //! Tools to access information about the current zenoh [`Session`](crate::Session). -use crate::SessionRef; +use crate::api::session::SessionRef; use std::future::Ready; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::{WhatAmI, ZenohId}; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c0bf501cc9..2f1beb5413 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -84,14 +84,10 @@ pub(crate) type Id = u32; use git_version::git_version; use handlers::DefaultHandler; #[cfg(feature = "unstable")] -use net::runtime::Runtime; use prelude::*; use scouting::ScoutBuilder; -use std::future::Ready; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; pub use zenoh_macros::{ke, kedefine, keformat, kewrite}; use zenoh_protocol::core::WhatAmIMatcher; -use zenoh_result::{zerror, ZResult}; use zenoh_util::concat_enabled_features; /// A zenoh error. @@ -123,6 +119,8 @@ pub const FEATURES: &str = concat_enabled_features!( ] ); +pub use crate::api::session::open; + pub mod key_expr { pub use crate::api::key_expr::keyexpr; pub use crate::api::key_expr::OwnedKeyExpr; @@ -137,11 +135,16 @@ pub mod key_expr { } } +pub mod session { + pub use crate::api::session::open; + pub use crate::api::session::init; + pub use crate::api::session::Session; + pub use crate::api::session::SessionRef; + pub use crate::api::session::SessionDeclarations; +} mod admin; #[macro_use] -mod session; -pub use session::*; mod api; pub(crate) mod net; @@ -231,158 +234,4 @@ where config: config.try_into().map_err(|e| e.into()), handler: DefaultHandler, } -} - -/// Open a zenoh [`Session`]. -/// -/// # Arguments -/// -/// * `config` - The [`Config`] for the zenoh session -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// # } -/// ``` -/// -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use std::str::FromStr; -/// use zenoh::prelude::r#async::*; -/// -/// let mut config = config::peer(); -/// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); -/// config.connect.endpoints.extend("tcp/10.10.10.10:7447,tcp/11.11.11.11:7447".split(',').map(|s|s.parse().unwrap())); -/// -/// let session = zenoh::open(config).res().await.unwrap(); -/// # } -/// ``` -pub fn open(config: TryIntoConfig) -> OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - OpenBuilder { config } -} - -/// A builder returned by [`open`] used to open a zenoh [`Session`]. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// # } -/// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -pub struct OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - config: TryIntoConfig, -} - -impl Resolvable for OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - type To = ZResult; -} - -impl SyncResolve for OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - fn res_sync(self) -> ::To { - let config: crate::config::Config = self - .config - .try_into() - .map_err(|e| zerror!("Invalid Zenoh configuration {:?}", &e))?; - Session::new(config).res_sync() - } -} - -impl AsyncResolve for OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -/// Initialize a Session with an existing Runtime. -/// This operation is used by the plugins to share the same Runtime as the router. -#[doc(hidden)] -#[zenoh_macros::unstable] -pub fn init(runtime: Runtime) -> InitBuilder { - InitBuilder { - runtime, - aggregated_subscribers: vec![], - aggregated_publishers: vec![], - } -} - -/// A builder returned by [`init`] and used to initialize a Session with an existing Runtime. -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[doc(hidden)] -#[zenoh_macros::unstable] -pub struct InitBuilder { - runtime: Runtime, - aggregated_subscribers: Vec, - aggregated_publishers: Vec, -} - -#[zenoh_macros::unstable] -impl InitBuilder { - #[inline] - pub fn aggregated_subscribers(mut self, exprs: Vec) -> Self { - self.aggregated_subscribers = exprs; - self - } - - #[inline] - pub fn aggregated_publishers(mut self, exprs: Vec) -> Self { - self.aggregated_publishers = exprs; - self - } -} - -#[zenoh_macros::unstable] -impl Resolvable for InitBuilder { - type To = ZResult; -} - -#[zenoh_macros::unstable] -impl SyncResolve for InitBuilder { - fn res_sync(self) -> ::To { - Ok(Session::init( - self.runtime, - self.aggregated_subscribers, - self.aggregated_publishers, - ) - .res_sync()) - } -} - -#[zenoh_macros::unstable] -impl AsyncResolve for InitBuilder { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} +} \ No newline at end of file diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 23e1846741..8ce5386c3f 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -26,7 +26,7 @@ use { handlers::DefaultHandler, prelude::*, subscriber::{Subscriber, SubscriberInner}, - SessionRef, Undeclarable, + api::session::SessionRef, api::session::Undeclarable, }, std::convert::TryInto, std::future::Ready, diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 1f6ad17333..f634a14dd1 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -18,8 +18,8 @@ use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; use crate::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; -use crate::SessionRef; -use crate::Undeclarable; +use crate::api::session::SessionRef; +use crate::api::session::Undeclarable; #[cfg(feature = "unstable")] use crate::{ handlers::{Callback, DefaultHandler, IntoHandler}, @@ -1511,7 +1511,7 @@ mod tests { #[test] fn sample_kind_integrity_in_publication() { - use crate::{open, prelude::sync::*}; + use crate::{api::session::open, prelude::sync::*}; const KEY_EXPR: &str = "test/sample_kind_integrity/publication"; const VALUE: &str = "zenoh"; @@ -1539,7 +1539,7 @@ mod tests { #[test] fn sample_kind_integrity_in_put_builder() { - use crate::{open, prelude::sync::*}; + use crate::{api::session::open, prelude::sync::*}; const KEY_EXPR: &str = "test/sample_kind_integrity/put_builder"; const VALUE: &str = "zenoh"; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 0696fcbe33..447dfc81b6 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -21,8 +21,8 @@ use crate::prelude::*; use crate::sample::builder::SampleBuilder; use crate::sample::{QoSBuilder, SourceInfo}; use crate::Id; -use crate::SessionRef; -use crate::Undeclarable; +use crate::api::session::SessionRef; +use crate::api::session::Undeclarable; #[cfg(feature = "unstable")] use crate::{query::ReplyKeyExpr, sample::Attachment}; use std::fmt; diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 1fc6e82b46..64f8d5e026 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -13,13 +13,13 @@ // //! Subscribing primitives. -use crate::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; +use crate::api::session::Undeclarable; +use crate::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::prelude::Locality; use crate::sample::Sample; use crate::Id; -use crate::Undeclarable; -use crate::{Result as ZResult, SessionRef}; +use crate::{api::session::SessionRef, Result as ZResult}; use std::fmt; use std::future::Ready; use std::ops::{Deref, DerefMut}; diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 5fd3edd985..f64784399c 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -14,7 +14,7 @@ use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::sample::builder::QoSBuilderTrait; -use zenoh::{publication::Priority, SessionDeclarations}; +use zenoh::{publication::Priority, session::SessionDeclarations}; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); From 2656d9f900ab5a0c231ae684a39f8d1627c8b81b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 20:06:01 +0200 Subject: [PATCH 101/598] restored incorrectly committed file --- .../src/replica/storage.rs | 378 ++++++++---------- 1 file changed, 160 insertions(+), 218 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 69ecf9477c..06c5882408 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -19,15 +19,19 @@ use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; use std::collections::{HashMap, HashSet}; -use std::str::FromStr; +use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; +use zenoh::buffers::buffer::SplitBuffer; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; -use zenoh::query::ConsolidationMode; +use zenoh::query::{ConsolidationMode, QueryTarget}; +use zenoh::sample::builder::SampleBuilder; +use zenoh::sample::{Sample, SampleKind}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; -use zenoh::{Result as ZResult, session::Session}; +use zenoh::value::Value; +use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; -use zenoh_backend_traits::{Capability, History, Persistence, Storage, StorageInsertionResult, StoredData}; +use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; @@ -38,148 +42,15 @@ use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer}; pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; -#[derive(Clone, Debug)] -pub enum StorageSampleKind { - Put(Value), - Delete, -} - -#[derive(Clone, Debug)] -pub struct StorageSample { - pub key_expr: KeyExpr<'static>, - pub timestamp: Timestamp, - pub kind: StorageSampleKind, -} - -impl From for StorageSample { - fn from(sample: Sample) -> Self { - let timestamp = *sample.timestamp().unwrap_or(&new_reception_timestamp()); - // TODO: add API for disassembly of Sample - let key_expr = sample.key_expr().clone(); - let payload = sample.payload().clone(); - let encoding = sample.encoding().clone(); - let kind = match sample.kind() { - SampleKind::Put => StorageSampleKind::Put(Value::new(payload).with_encoding(encoding)), - SampleKind::Delete => StorageSampleKind::Delete, - }; - StorageSample { - key_expr, - timestamp, - kind, - } - } -} - #[derive(Clone)] -enum Update { - Put(StoredData), - Delete(Timestamp), -} - -impl From for Update { - fn from(value: StorageSample) -> Self { - match value.kind { - StorageSampleKind::Put(data) => Update::Put(StoredData { - value: data, - timestamp: value.timestamp, - }), - StorageSampleKind::Delete => Update::Delete(value.timestamp), - } - } -} - -impl Update { - fn timestamp(&self) -> &Timestamp { - match self { - Update::Put(data) => &data.timestamp, - Update::Delete(ts) => ts, - } - } -} - -// implement from String for Update -impl TryFrom for Update { - type Error = zenoh::Error; - - fn try_from(value: String) -> Result { - let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&value)?; - let mut payload = ZBuf::default(); - for slice in result.3 { - payload.push_zslice(slice.to_vec().into()); - } - let value = Value::new(payload).with_encoding(result.2); - let timestamp = Timestamp::from_str(&result.1).map_err(|_| "Error parsing timestamp")?; - if result.0.eq(&(SampleKind::Put).to_string()) { - Ok(Update::Put(StoredData { value, timestamp })) - } else { - Ok(Update::Delete(timestamp)) - } - } -} - -// implement to_string for Update -impl ToString for Update { - fn to_string(&self) -> String { - let result = match self { - Update::Put(data) => ( - SampleKind::Put.to_string(), - data.timestamp.to_string(), - data.value.encoding.to_string(), - data.value.payload.slices().collect::>(), - ), - Update::Delete(ts) => ( - SampleKind::Delete.to_string(), - ts.to_string(), - "".to_string(), - vec![], - ), - }; - serde_json::to_string_pretty(&result).unwrap() - } -} - -trait IntoStorageSample { - fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample - where - IntoKeyExpr: Into>; -} - -impl IntoStorageSample for StoredData { - fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample - where - IntoKeyExpr: Into>, - { - StorageSample { - key_expr: key_expr.into(), - timestamp: self.timestamp, - kind: StorageSampleKind::Put(self.value), - } - } -} - -impl IntoStorageSample for Update { - fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample - where - IntoKeyExpr: Into>, - { - match self { - Update::Put(data) => StorageSample { - key_expr: key_expr.into(), - timestamp: data.timestamp, - kind: StorageSampleKind::Put(data.value), - }, - Update::Delete(ts) => StorageSample { - key_expr: key_expr.into(), - timestamp: ts, - kind: StorageSampleKind::Delete, - }, - } - } +struct Update { + kind: SampleKind, + data: StoredData, } pub struct ReplicationService { pub empty_start: bool, - pub aligner_updates: Receiver, + pub aligner_updates: Receiver, pub log_propagation: Sender<(OwnedKeyExpr, Timestamp)>, } @@ -238,11 +109,10 @@ impl StorageService { let saved_wc = std::fs::read_to_string(zenoh_home().join(WILDCARD_UPDATES_FILENAME)).unwrap(); let saved_wc: HashMap = - serde_json::from_str(&saved_wc).unwrap(); // TODO: Remove unwrap + serde_json::from_str(&saved_wc).unwrap(); let mut wildcard_updates = storage_service.wildcard_updates.write().await; for (k, data) in saved_wc { - wildcard_updates.insert(&k, Update::try_from(data).unwrap()); - // TODO: Remove unwrap + wildcard_updates.insert(&k, construct_update(data)); } } } @@ -313,7 +183,7 @@ impl StorageService { log::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { - self.process_sample(sample.into()).await; + self.process_sample(sample).await; } }, // on query on key_expr @@ -353,15 +223,16 @@ impl StorageService { select!( // on sample for key_expr sample = storage_sub.recv_async() => { - let mut sample = match sample { + let sample = match sample { Ok(sample) => sample, Err(e) => { log::error!("Error in sample: {}", e); continue; } }; - sample.ensure_timestamp(); - self.process_sample(sample.into()).await; + let timestamp = sample.timestamp().cloned().unwrap_or(new_reception_timestamp()); + let sample = SampleBuilder::from(sample).timestamp(timestamp).into(); + self.process_sample(sample).await; }, // on query on key_expr query = storage_queryable.recv_async() => { @@ -391,48 +262,61 @@ impl StorageService { // The storage should only simply save the key, sample pair while put and retrieve the same during get // the trimming during PUT and GET should be handled by the plugin - async fn process_sample(&self, sample: StorageSample) { + async fn process_sample(&self, sample: Sample) { log::trace!("[STORAGE] Processing sample: {:?}", sample); - // if wildcard, update wildcard_updates - if sample.key_expr.is_wild() { + if sample.key_expr().is_wild() { self.register_wildcard_update(sample.clone()).await; } - let matching_keys = if sample.key_expr.is_wild() { - self.get_matching_keys(&sample.key_expr).await + let matching_keys = if sample.key_expr().is_wild() { + self.get_matching_keys(sample.key_expr()).await } else { - vec![sample.key_expr.clone().into()] + vec![sample.key_expr().clone().into()] }; log::trace!( "The list of keys matching `{}` is : {:?}", - sample.key_expr, + sample.key_expr(), matching_keys ); for k in matching_keys { if !self - .is_deleted(&k.clone(), &sample.timestamp) + .is_deleted(&k.clone(), sample.timestamp().unwrap()) .await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) - && self.is_latest(&k, &sample.timestamp).await)) + && self.is_latest(&k, sample.timestamp().unwrap()).await)) { log::trace!( "Sample `{:?}` identified as neded processing for key {}", sample, - &k + k ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store = - match self.ovderriding_wild_update(&k, &sample.timestamp).await { - Some(overriding_update) => overriding_update.into_sample(k.clone()), - - None => sample.clone().into(), - }; + let sample_to_store: Sample = if let Some(update) = self + .ovderriding_wild_update(&k, sample.timestamp().unwrap()) + .await + { + match update.kind { + SampleKind::Put => { + SampleBuilder::put(KeyExpr::from(k.clone()), update.data.value.payload) + .encoding(update.data.value.encoding) + .timestamp(update.data.timestamp) + .into() + } + SampleKind::Delete => SampleBuilder::delete(KeyExpr::from(k.clone())) + .timestamp(update.data.timestamp) + .into(), + } + } else { + SampleBuilder::from(sample.clone()) + .keyexpr(k.clone()) + .into() + }; - let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { + let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { Ok(stripped) => stripped, Err(e) => { log::error!("{}", e); @@ -440,21 +324,25 @@ impl StorageService { } }; let mut storage = self.storage.lock().await; - let result = match sample_to_store.kind { - StorageSampleKind::Put(data) => { + let result = match sample.kind() { + SampleKind::Put => { storage .put( stripped_key, - data, - sample_to_store.timestamp, + Value::new(sample_to_store.payload().clone()) + .encoding(sample_to_store.encoding().clone()), + *sample_to_store.timestamp().unwrap(), ) .await - }, - StorageSampleKind::Delete => { + } + SampleKind::Delete => { // register a tombstone - self.mark_tombstone(&k, sample_to_store.timestamp).await; - storage.delete(stripped_key, sample_to_store.timestamp).await - }, + self.mark_tombstone(&k, *sample_to_store.timestamp().unwrap()) + .await; + storage + .delete(stripped_key, *sample_to_store.timestamp().unwrap()) + .await + } }; drop(storage); if self.replication.is_some() @@ -466,7 +354,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), sample_to_store.timestamp)); + .send((k.clone(), *sample_to_store.timestamp().unwrap())); match sending { Ok(_) => (), Err(e) => { @@ -497,16 +385,26 @@ impl StorageService { } } - async fn register_wildcard_update(&self, sample: StorageSample) { + async fn register_wildcard_update(&self, sample: Sample) { // @TODO: change into a better store that does incremental writes - let key = sample.key_expr.clone(); + let key = sample.key_expr().clone(); let mut wildcards = self.wildcard_updates.write().await; - wildcards.insert(&key, sample.into()); + let timestamp = *sample.timestamp().unwrap(); + wildcards.insert( + &key, + Update { + kind: sample.kind(), + data: StoredData { + value: Value::from(sample), + timestamp, + }, + }, + ); if self.capability.persistence.eq(&Persistence::Durable) { // flush to disk to makeit durable let mut serialized_data = HashMap::new(); for (k, update) in wildcards.key_value_pairs() { - serialized_data.insert(k, update.to_string()); + serialized_data.insert(k, serialize_update(update)); } if let Err(e) = std::fs::write( zenoh_home().join(WILDCARD_UPDATES_FILENAME), @@ -535,36 +433,34 @@ impl StorageService { let mut update = None; for node in wildcards.intersecting_keys(key_expr) { let weight = wildcards.weight_at(&node); - if let Some(weight) = weight { - if weight.timestamp() > ts { - // if the key matches a wild card update, check whether it was saved in storage - // remember that wild card updates change only existing keys - let stripped_key = match self.strip_prefix(&key_expr.into()) { - Ok(stripped) => stripped, - Err(e) => { - log::error!("{}", e); - break; - } - }; - let mut storage = self.storage.lock().await; - match storage.get(stripped_key, "").await { - Ok(stored_data) => { - for entry in stored_data { - if entry.timestamp > *ts { - return None; - } + if weight.is_some() && weight.unwrap().data.timestamp > *ts { + // if the key matches a wild card update, check whether it was saved in storage + // remember that wild card updates change only existing keys + let stripped_key = match self.strip_prefix(&key_expr.into()) { + Ok(stripped) => stripped, + Err(e) => { + log::error!("{}", e); + break; + } + }; + let mut storage = self.storage.lock().await; + match storage.get(stripped_key, "").await { + Ok(stored_data) => { + for entry in stored_data { + if entry.timestamp > *ts { + return None; } } - Err(e) => { - log::warn!( - "Storage '{}' raised an error fetching a query on key {} : {}", - self.name, - key_expr, - e - ); - ts = weight.timestamp(); - update = Some(weight.clone()); - } + } + Err(e) => { + log::warn!( + "Storage '{}' raised an error fetching a query on key {} : {}", + self.name, + key_expr, + e + ); + ts = &weight.unwrap().data.timestamp; + update = Some(weight.unwrap().clone()); } } } @@ -617,8 +513,13 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let sample = entry.into_sample(key.clone()); - if let Err(e) = q.reply_sample(sample).res().await { + if let Err(e) = q + .reply(key.clone(), entry.value.payload) + .encoding(entry.value.encoding) + .timestamp(entry.timestamp) + .res() + .await + { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -644,13 +545,13 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let Value { - payload, encoding, .. - } = entry.value; - let sample = Sample::put(q.key_expr().clone(), payload) - .with_encoding(encoding) - .with_timestamp(entry.timestamp); - if let Err(e) = q.reply_sample(sample).res().await { + if let Err(e) = q + .reply(q.key_expr().clone(), entry.value.payload) + .encoding(entry.value.encoding) + .timestamp(entry.timestamp) + .res() + .await + { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -757,7 +658,7 @@ impl StorageService { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { - self.process_sample(sample.into()).await; + self.process_sample(sample).await; } Err(e) => log::warn!( "Storage '{}' received an error to align query: {:?}", @@ -770,6 +671,47 @@ impl StorageService { } } +fn serialize_update(update: &Update) -> String { + let Update { + kind, + data: + StoredData { + value: Value { + payload, encoding, .. + }, + timestamp, + }, + } = update; + let zbuf: ZBuf = payload.into(); + + let result = ( + kind.to_string(), + timestamp.to_string(), + encoding.to_string(), + zbuf.slices().collect::>(), + ); + serde_json::to_string_pretty(&result).unwrap() +} + +fn construct_update(data: String) -> Update { + let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); // @TODO: remove the unwrap() + let mut payload = ZBuf::default(); + for slice in result.3 { + payload.push_zslice(slice.to_vec().into()); + } + let value = Value::new(payload).encoding(result.2); + let data = StoredData { + value, + timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() + }; + let kind = if result.0.eq(&(SampleKind::Put).to_string()) { + SampleKind::Put + } else { + SampleKind::Delete + }; + Update { kind, data } +} + // Periodic event cleaning-up data info for old metadata struct GarbageCollectionEvent { config: GarbageCollectionConfig, @@ -801,7 +743,7 @@ impl Timed for GarbageCollectionEvent { let mut to_be_removed = HashSet::new(); for (k, update) in wildcard_updates.key_value_pairs() { - let ts = update.timestamp(); + let ts = update.data.timestamp; if ts.get_time() < &time_limit { // mark key to be removed to_be_removed.insert(k); From acbf5517fed172d00819ad374a3f8aae7fa98ab3 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 20:17:15 +0200 Subject: [PATCH 102/598] session move build fix --- .../zenoh-plugin-storage-manager/src/replica/storage.rs | 2 +- plugins/zenoh-plugin-storage-manager/tests/operations.rs | 9 +++++---- plugins/zenoh-plugin-storage-manager/tests/wildcard.rs | 9 +++++---- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 06c5882408..39e6d34f6b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -29,7 +29,7 @@ use zenoh::sample::builder::SampleBuilder; use zenoh::sample::{Sample, SampleKind}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; -use zenoh::{Result as ZResult, Session}; +use zenoh::{session::Session, Result as ZResult}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 36162f01c2..6b64bbd742 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -23,23 +23,24 @@ use async_std::task; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; +use zenoh::session::Session; use zenoh::{prelude::Config, time::Timestamp}; use zenoh_core::zasync_executor_init; use zenoh_plugin_trait::Plugin; -async fn put_data(session: &zenoh::Session, key_expr: &str, value: &str, _timestamp: Timestamp) { +async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { println!("Putting Data ('{key_expr}': '{value}')..."); // @TODO: how to add timestamp metadata with put, not manipulating sample... session.put(key_expr, value).res().await.unwrap(); } -async fn delete_data(session: &zenoh::Session, key_expr: &str, _timestamp: Timestamp) { +async fn delete_data(session: &Session, key_expr: &str, _timestamp: Timestamp) { println!("Deleting Data '{key_expr}'..."); // @TODO: how to add timestamp metadata with delete, not manipulating sample... session.delete(key_expr).res().await.unwrap(); } -async fn get_data(session: &zenoh::Session, key_expr: &str) -> Vec { +async fn get_data(session: &Session, key_expr: &str) -> Vec { let replies: Vec = session .get(key_expr) .res() @@ -83,7 +84,7 @@ async fn test_updates_in_order() { let storage = zenoh_plugin_storage_manager::StoragesPlugin::start("storage-manager", &runtime).unwrap(); - let session = zenoh::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).res().await.unwrap(); sleep(std::time::Duration::from_secs(1)); diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 5a71dc23f0..864ec5b79e 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -24,23 +24,24 @@ use async_std::task; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; +use zenoh::session::Session; use zenoh::{prelude::Config, time::Timestamp}; use zenoh_core::zasync_executor_init; use zenoh_plugin_trait::Plugin; -async fn put_data(session: &zenoh::Session, key_expr: &str, value: &str, _timestamp: Timestamp) { +async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { println!("Putting Data ('{key_expr}': '{value}')..."); // @TODO: how to add timestamp metadata with put, not manipulating sample... session.put(key_expr, value).res().await.unwrap(); } -async fn delete_data(session: &zenoh::Session, key_expr: &str, _timestamp: Timestamp) { +async fn delete_data(session: &Session, key_expr: &str, _timestamp: Timestamp) { println!("Deleting Data '{key_expr}'..."); // @TODO: how to add timestamp metadata with delete, not manipulating sample... session.delete(key_expr).res().await.unwrap(); } -async fn get_data(session: &zenoh::Session, key_expr: &str) -> Vec { +async fn get_data(session: &Session, key_expr: &str) -> Vec { let replies: Vec = session .get(key_expr) .res() @@ -84,7 +85,7 @@ async fn test_wild_card_in_order() { let storage = zenoh_plugin_storage_manager::StoragesPlugin::start("storage-manager", &runtime).unwrap(); - let session = zenoh::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).res().await.unwrap(); sleep(std::time::Duration::from_secs(1)); // put *, ts: 1 From 996accf66348632333a4abb40df34f5100324e9a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 21:48:05 +0200 Subject: [PATCH 103/598] sample and value into api --- zenoh/src/{sample/builder.rs => api/builders/sample.rs} | 0 zenoh/src/{sample/mod.rs => api/sample.rs} | 0 zenoh/src/{ => api}/value.rs | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{sample/builder.rs => api/builders/sample.rs} (100%) rename zenoh/src/{sample/mod.rs => api/sample.rs} (100%) rename zenoh/src/{ => api}/value.rs (100%) diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/api/builders/sample.rs similarity index 100% rename from zenoh/src/sample/builder.rs rename to zenoh/src/api/builders/sample.rs diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/api/sample.rs similarity index 100% rename from zenoh/src/sample/mod.rs rename to zenoh/src/api/sample.rs diff --git a/zenoh/src/value.rs b/zenoh/src/api/value.rs similarity index 100% rename from zenoh/src/value.rs rename to zenoh/src/api/value.rs From 554a65c2bcd8947899c37b2040915a461c77482f Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 21:48:14 +0200 Subject: [PATCH 104/598] sample and value into api --- .../src/replica/aligner.rs | 4 +-- .../src/replica/storage.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 4 +-- zenoh/src/admin.rs | 4 +-- zenoh/src/api.rs | 5 ++- zenoh/src/api/builders.rs | 15 +++++++++ zenoh/src/api/builders/sample.rs | 10 +++--- zenoh/src/api/sample.rs | 14 ++++----- zenoh/src/api/session.rs | 16 +++++----- zenoh/src/api/value.rs | 3 +- zenoh/src/lib.rs | 31 ++++++++++++++----- zenoh/src/net/runtime/adminspace.rs | 6 ++-- zenoh/src/prelude.rs | 12 +++---- zenoh/src/publication.rs | 8 ++--- zenoh/src/query.rs | 6 ++-- zenoh/src/queryable.rs | 10 +++--- zenoh/src/subscriber.rs | 2 +- zenoh/tests/attachments.rs | 2 +- zenoh/tests/qos.rs | 2 +- zenoh/tests/routing.rs | 2 +- 20 files changed, 96 insertions(+), 62 deletions(-) create mode 100644 zenoh/src/api/builders.rs diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 9c54bcf461..315a6bbb27 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -18,11 +18,11 @@ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; +use zenoh::builders::SampleBuilder; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::SampleBuilder; -use zenoh::time::Timestamp; use zenoh::session::Session; +use zenoh::time::Timestamp; pub struct Aligner { session: Arc, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 39e6d34f6b..c89fd94f04 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -23,9 +23,9 @@ use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::buffer::SplitBuffer; use zenoh::buffers::ZBuf; +use zenoh::builders::SampleBuilder; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; -use zenoh::sample::builder::SampleBuilder; use zenoh::sample::{Sample, SampleKind}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 4a9469c835..75386fd907 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -17,14 +17,14 @@ use std::future::Ready; use std::mem::swap; use std::sync::{Arc, Mutex}; use std::time::Duration; +use zenoh::builders::SampleBuilder; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample::builder::SampleBuilder; +use zenoh::session::SessionRef; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; -use zenoh::session::SessionRef; use zenoh_core::{zlock, AsyncResolve, Resolvable, SyncResolve}; use crate::ExtractSample; diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 260617cda2..cbb009eaf2 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -12,12 +12,12 @@ // ZettaScale Zenoh Team, // use crate::{ + api::sample::DataInfo, encoding::Encoding, keyexpr, prelude::sync::{KeyExpr, Locality, SampleKind}, queryable::Query, - sample::DataInfo, - Payload, Session + Payload, Session, }; use std::{ collections::hash_map::DefaultHasher, diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 1e7cec5380..d7e6259299 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -12,5 +12,8 @@ // ZettaScale Zenoh Team, // +pub(crate) mod builders; pub(crate) mod key_expr; -pub(crate) mod session; \ No newline at end of file +pub(crate) mod sample; +pub(crate) mod session; +pub(crate) mod value; diff --git a/zenoh/src/api/builders.rs b/zenoh/src/api/builders.rs new file mode 100644 index 0000000000..09d12657a5 --- /dev/null +++ b/zenoh/src/api/builders.rs @@ -0,0 +1,15 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub(crate) mod sample; diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 5fab36617d..87b2b928ff 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -15,18 +15,18 @@ use std::marker::PhantomData; #[cfg(feature = "unstable")] -use crate::sample::Attachment; -use crate::sample::QoS; -use crate::sample::QoSBuilder; +use crate::api::sample::Attachment; +use crate::api::sample::QoS; +use crate::api::sample::QoSBuilder; #[cfg(feature = "unstable")] -use crate::sample::SourceInfo; +use crate::api::sample::SourceInfo; +use crate::api::value::Value; use crate::Encoding; use crate::KeyExpr; use crate::Payload; use crate::Priority; use crate::Sample; use crate::SampleKind; -use crate::Value; use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 6e457578a3..649a375b1a 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -13,21 +13,22 @@ // //! Sample primitives +use crate::api::builders::sample::{QoSBuilderTrait, ValueBuilderTrait}; +use crate::api::key_expr::KeyExpr; +use crate::api::value::Value; use crate::encoding::Encoding; use crate::payload::Payload; -use crate::prelude::{KeyExpr, Value}; -use crate::sample::builder::{QoSBuilderTrait, ValueBuilderTrait}; use crate::time::Timestamp; use crate::Priority; #[zenoh_macros::unstable] +pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; +#[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::network::declare::ext::QoSType; use zenoh_protocol::{core::CongestionControl, zenoh}; -pub mod builder; - pub type SourceSn = u64; /// The locality of samples to be received by subscribers or targeted by publishers. @@ -154,7 +155,7 @@ pub struct SourceInfo { #[cfg(feature = "unstable")] fn source_info_stack_size() { use crate::{ - sample::{SourceInfo, SourceSn}, + api::sample::{SourceInfo, SourceSn}, ZenohId, }; @@ -467,9 +468,6 @@ impl TryFrom for SampleKind { } } -#[zenoh_macros::unstable] -pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; - /// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. pub struct SampleFields { pub key_expr: KeyExpr<'static>, diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 89cd249bdb..a197253997 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -12,12 +12,18 @@ // ZettaScale Zenoh Team, // use crate::admin; +use crate::api::key_expr::KeyExprInner; +#[cfg(feature = "unstable")] +use crate::api::sample::Attachment; +use crate::api::sample::DataInfo; +use crate::api::sample::DataInfoIntoSample; +use crate::api::sample::QoS; +use crate::api::value::Value; use crate::config::Config; use crate::config::Notifier; use crate::encoding::Encoding; use crate::handlers::{Callback, DefaultHandler}; use crate::info::*; -use crate::api::key_expr::KeyExprInner; #[zenoh_macros::unstable] use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::primitives::Primitives; @@ -29,11 +35,6 @@ use crate::prelude::{KeyExpr, Parameters}; use crate::publication::*; use crate::query::*; use crate::queryable::*; -#[cfg(feature = "unstable")] -use crate::sample::Attachment; -use crate::sample::DataInfo; -use crate::sample::DataInfoIntoSample; -use crate::sample::QoS; use crate::selector::TIME_RANGE_KEY; use crate::subscriber::*; use crate::Id; @@ -42,9 +43,7 @@ use crate::Sample; use crate::SampleKind; use crate::Selector; use crate::SourceInfo; -use crate::Value; use log::{error, trace, warn}; -use zenoh_core::Resolvable; use std::collections::HashMap; use std::convert::TryFrom; use std::convert::TryInto; @@ -59,6 +58,7 @@ use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; +use zenoh_core::Resolvable; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; #[cfg(feature = "unstable")] use zenoh_protocol::network::declare::SubscriberId; diff --git a/zenoh/src/api/value.rs b/zenoh/src/api/value.rs index 92a87cb6c5..a225f2b3d8 100644 --- a/zenoh/src/api/value.rs +++ b/zenoh/src/api/value.rs @@ -13,7 +13,8 @@ // //! Value primitives. -use crate::{encoding::Encoding, payload::Payload, sample::builder::ValueBuilderTrait}; +use crate::api::builders::sample::ValueBuilderTrait; +use crate::{encoding::Encoding, payload::Payload}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 2f1beb5413..a789fc17a9 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -121,11 +121,19 @@ pub const FEATURES: &str = concat_enabled_features!( pub use crate::api::session::open; +pub mod builders { + pub use crate::api::builders::sample::QoSBuilderTrait; + pub use crate::api::builders::sample::SampleBuilder; + pub use crate::api::builders::sample::SampleBuilderTrait; + pub use crate::api::builders::sample::TimestampBuilderTrait; + pub use crate::api::builders::sample::ValueBuilderTrait; +} + pub mod key_expr { - pub use crate::api::key_expr::keyexpr; - pub use crate::api::key_expr::OwnedKeyExpr; pub use crate::api::key_expr::kedefine; pub use crate::api::key_expr::keformat; + pub use crate::api::key_expr::keyexpr; + pub use crate::api::key_expr::OwnedKeyExpr; // keyexpr format macro support pub mod format { pub use crate::api::key_expr::format::*; @@ -136,11 +144,22 @@ pub mod key_expr { } pub mod session { - pub use crate::api::session::open; pub use crate::api::session::init; + pub use crate::api::session::open; pub use crate::api::session::Session; - pub use crate::api::session::SessionRef; pub use crate::api::session::SessionDeclarations; + pub use crate::api::session::SessionRef; +} + +pub mod sample { + pub use crate::api::sample::Attachment; + pub use crate::api::sample::Locality; + pub use crate::api::sample::Sample; + pub use crate::api::sample::SampleKind; +} + +pub mod value { + pub use crate::api::value::Value; } mod admin; @@ -163,9 +182,7 @@ pub mod prelude; pub mod publication; pub mod query; pub mod queryable; -pub mod sample; pub mod subscriber; -pub mod value; #[cfg(feature = "shared-memory")] pub use zenoh_shm as shm; @@ -234,4 +251,4 @@ where config: config.try_into().map_err(|e| e.into()), handler: DefaultHandler, } -} \ No newline at end of file +} diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 148d9eddab..26ba22621e 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -12,16 +12,16 @@ // ZettaScale Zenoh Team, use super::routing::dispatcher::face::Face; use super::Runtime; -use crate::encoding::Encoding; +use crate::api::builders::sample::ValueBuilderTrait; use crate::api::key_expr::KeyExpr; +use crate::api::value::Value; +use crate::encoding::Encoding; use crate::net::primitives::Primitives; use crate::payload::Payload; use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::SyncResolve; use crate::queryable::Query; use crate::queryable::QueryInner; -use crate::sample::builder::ValueBuilderTrait; -use crate::value::Value; use log::{error, trace}; use serde_json::json; use std::collections::HashMap; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index d059a82afc..4aff9654cc 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -43,25 +43,25 @@ pub(crate) mod common { pub use crate::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; + pub use crate::api::value::Value; pub use crate::encoding::Encoding; /// The encoding of a zenoh `Value`. pub use crate::payload::{Deserialize, Payload, Serialize}; - pub use crate::value::Value; #[zenoh_macros::unstable] - pub use crate::sample::Locality; + pub use crate::api::sample::Locality; + #[zenoh_macros::unstable] + pub use crate::api::sample::SourceInfo; + pub use crate::api::sample::{Sample, SampleKind}; #[cfg(not(feature = "unstable"))] pub(crate) use crate::sample::Locality; - #[zenoh_macros::unstable] - pub use crate::sample::SourceInfo; - pub use crate::sample::{Sample, SampleKind}; pub use crate::publication::Priority; #[zenoh_macros::unstable] pub use crate::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - pub use crate::sample::builder::{ + pub use crate::api::builders::sample::{ QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; } diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index f634a14dd1..3f528b41b7 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -13,13 +13,13 @@ // //! Publishing primitives. -use crate::net::primitives::Primitives; -use crate::prelude::*; #[zenoh_macros::unstable] -use crate::sample::Attachment; -use crate::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; +use crate::api::sample::Attachment; +use crate::api::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; use crate::api::session::SessionRef; use crate::api::session::Undeclarable; +use crate::net::primitives::Primitives; +use crate::prelude::*; #[cfg(feature = "unstable")] use crate::{ handlers::{Callback, DefaultHandler, IntoHandler}, diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index cb1116130d..b1dd5a5d73 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -13,11 +13,11 @@ // //! Query primitives. +#[zenoh_macros::unstable] +use crate::api::sample::Attachment; +use crate::api::sample::QoSBuilder; use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; -#[zenoh_macros::unstable] -use crate::sample::Attachment; -use crate::sample::QoSBuilder; use crate::Session; use std::collections::HashMap; use std::future::Ready; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 447dfc81b6..50190ff891 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -14,17 +14,17 @@ //! Queryable primitives. +use crate::api::builders::sample::SampleBuilder; +use crate::api::sample::{QoSBuilder, SourceInfo}; +use crate::api::session::SessionRef; +use crate::api::session::Undeclarable; use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::builder::SampleBuilder; -use crate::sample::{QoSBuilder, SourceInfo}; use crate::Id; -use crate::api::session::SessionRef; -use crate::api::session::Undeclarable; #[cfg(feature = "unstable")] -use crate::{query::ReplyKeyExpr, sample::Attachment}; +use crate::{api::sample::Attachment, query::ReplyKeyExpr}; use std::fmt; use std::future::Ready; use std::ops::Deref; diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 64f8d5e026..239ea488a9 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -14,10 +14,10 @@ //! Subscribing primitives. use crate::api::key_expr::KeyExpr; +use crate::api::sample::Sample; use crate::api::session::Undeclarable; use crate::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::prelude::Locality; -use crate::sample::Sample; use crate::Id; use crate::{api::session::SessionRef, Result as ZResult}; use std::fmt; diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 9fb99b7cc0..52508cf27e 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -61,7 +61,7 @@ fn pubsub() { #[cfg(feature = "unstable")] #[test] fn queries() { - use zenoh::{prelude::sync::*, sample::builder::SampleBuilderTrait, sample::Attachment}; + use zenoh::{builders::SampleBuilderTrait, prelude::sync::*, sample::Attachment}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index f64784399c..2eeee9c9df 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; +use zenoh::builders::QoSBuilderTrait; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh::{publication::Priority, session::SessionDeclarations}; use zenoh_core::ztimeout; diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 1031630a68..b3c6758ec3 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -16,9 +16,9 @@ use std::sync::atomic::Ordering; use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; +use zenoh::builders::QoSBuilderTrait; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh::Result; use zenoh_core::ztimeout; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; From 5735bf31cb47cb2748a53c4d54b22668cf76e4e9 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 21:53:13 +0200 Subject: [PATCH 105/598] encoding moved to api --- zenoh/src/admin.rs | 2 +- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/encoding.rs | 0 zenoh/src/api/sample.rs | 2 +- zenoh/src/api/session.rs | 2 +- zenoh/src/api/value.rs | 2 +- zenoh/src/lib.rs | 5 ++++- zenoh/src/prelude.rs | 2 +- 8 files changed, 10 insertions(+), 6 deletions(-) rename zenoh/src/{ => api}/encoding.rs (100%) diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index cbb009eaf2..f7ddb69a37 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use crate::{ + api::encoding::Encoding, api::sample::DataInfo, - encoding::Encoding, keyexpr, prelude::sync::{KeyExpr, Locality, SampleKind}, queryable::Query, diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index d7e6259299..b12a623235 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -13,6 +13,7 @@ // pub(crate) mod builders; +pub(crate) mod encoding; pub(crate) mod key_expr; pub(crate) mod sample; pub(crate) mod session; diff --git a/zenoh/src/encoding.rs b/zenoh/src/api/encoding.rs similarity index 100% rename from zenoh/src/encoding.rs rename to zenoh/src/api/encoding.rs diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 649a375b1a..4a4bc934a8 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -14,9 +14,9 @@ //! Sample primitives use crate::api::builders::sample::{QoSBuilderTrait, ValueBuilderTrait}; +use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; use crate::api::value::Value; -use crate::encoding::Encoding; use crate::payload::Payload; use crate::time::Timestamp; use crate::Priority; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index a197253997..0a9e34f61b 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use crate::admin; +use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExprInner; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; @@ -21,7 +22,6 @@ use crate::api::sample::QoS; use crate::api::value::Value; use crate::config::Config; use crate::config::Notifier; -use crate::encoding::Encoding; use crate::handlers::{Callback, DefaultHandler}; use crate::info::*; #[zenoh_macros::unstable] diff --git a/zenoh/src/api/value.rs b/zenoh/src/api/value.rs index a225f2b3d8..f75abd4241 100644 --- a/zenoh/src/api/value.rs +++ b/zenoh/src/api/value.rs @@ -14,7 +14,7 @@ //! Value primitives. use crate::api::builders::sample::ValueBuilderTrait; -use crate::{encoding::Encoding, payload::Payload}; +use crate::{api::encoding::Encoding, payload::Payload}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index a789fc17a9..e4a90ccac8 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -162,6 +162,10 @@ pub mod value { pub use crate::api::value::Value; } +pub mod encoding { + pub use crate::api::encoding::Encoding; +} + mod admin; #[macro_use] @@ -171,7 +175,6 @@ pub use net::runtime; pub mod selector; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; -pub(crate) mod encoding; pub mod handlers; pub mod info; #[cfg(feature = "unstable")] diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 4aff9654cc..61c21b9167 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -43,8 +43,8 @@ pub(crate) mod common { pub use crate::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; + pub use crate::api::encoding::Encoding; pub use crate::api::value::Value; - pub use crate::encoding::Encoding; /// The encoding of a zenoh `Value`. pub use crate::payload::{Deserialize, Payload, Serialize}; From b11ca7595d0a6f6d9725fa7a009cef7dc35dee18 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 21:56:30 +0200 Subject: [PATCH 106/598] payload moved to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/payload.rs | 0 zenoh/src/lib.rs | 9 ++++++++- 3 files changed, 9 insertions(+), 1 deletion(-) rename zenoh/src/{ => api}/payload.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index b12a623235..45f04bf6f4 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -15,6 +15,7 @@ pub(crate) mod builders; pub(crate) mod encoding; pub(crate) mod key_expr; +pub(crate) mod payload; pub(crate) mod sample; pub(crate) mod session; pub(crate) mod value; diff --git a/zenoh/src/payload.rs b/zenoh/src/api/payload.rs similarity index 100% rename from zenoh/src/payload.rs rename to zenoh/src/api/payload.rs diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e4a90ccac8..3e286df035 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -166,6 +166,14 @@ pub mod encoding { pub use crate::api::encoding::Encoding; } +pub mod payload { + pub use crate::api::payload::Deserialize; + pub use crate::api::payload::Payload; + pub use crate::api::payload::PayloadReader; + pub use crate::api::payload::Serialize; + pub use crate::api::payload::StringOrBase64; +} + mod admin; #[macro_use] @@ -179,7 +187,6 @@ pub mod handlers; pub mod info; #[cfg(feature = "unstable")] pub mod liveliness; -pub mod payload; pub mod plugins; pub mod prelude; pub mod publication; From 1574e1c7c79f0bb3f92a4e64ee05dd643ceca261 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 22:02:42 +0200 Subject: [PATCH 107/598] selector moved to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/selector.rs | 0 zenoh/src/api/session.rs | 7 ++++--- zenoh/src/lib.rs | 8 +++++++- 4 files changed, 12 insertions(+), 4 deletions(-) rename zenoh/src/{ => api}/selector.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 45f04bf6f4..068ff6d3d0 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -17,5 +17,6 @@ pub(crate) mod encoding; pub(crate) mod key_expr; pub(crate) mod payload; pub(crate) mod sample; +pub(crate) mod selector; pub(crate) mod session; pub(crate) mod value; diff --git a/zenoh/src/selector.rs b/zenoh/src/api/selector.rs similarity index 100% rename from zenoh/src/selector.rs rename to zenoh/src/api/selector.rs diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 0a9e34f61b..f50458102e 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -13,12 +13,16 @@ // use crate::admin; use crate::api::encoding::Encoding; +use crate::api::key_expr::KeyExpr; use crate::api::key_expr::KeyExprInner; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::DataInfo; use crate::api::sample::DataInfoIntoSample; use crate::api::sample::QoS; +use crate::api::selector::Parameters; +use crate::api::selector::Selector; +use crate::api::selector::TIME_RANGE_KEY; use crate::api::value::Value; use crate::config::Config; use crate::config::Notifier; @@ -31,17 +35,14 @@ use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; use crate::payload::Payload; use crate::prelude::Locality; -use crate::prelude::{KeyExpr, Parameters}; use crate::publication::*; use crate::query::*; use crate::queryable::*; -use crate::selector::TIME_RANGE_KEY; use crate::subscriber::*; use crate::Id; use crate::Priority; use crate::Sample; use crate::SampleKind; -use crate::Selector; use crate::SourceInfo; use log::{error, trace, warn}; use std::collections::HashMap; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 3e286df035..81aaa1545b 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -174,13 +174,19 @@ pub mod payload { pub use crate::api::payload::StringOrBase64; } +pub mod selector { + pub use crate::api::selector::Parameter; + pub use crate::api::selector::Parameters; + pub use crate::api::selector::Selector; + pub use crate::api::selector::TIME_RANGE_KEY; +} + mod admin; #[macro_use] mod api; pub(crate) mod net; pub use net::runtime; -pub mod selector; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; pub mod handlers; From 9659ab3edfc94d0840cf024117e7b1497396bd12 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 22:07:35 +0200 Subject: [PATCH 108/598] info moved to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/info.rs | 0 zenoh/src/api/session.rs | 2 +- zenoh/src/lib.rs | 1 - 4 files changed, 2 insertions(+), 2 deletions(-) rename zenoh/src/{ => api}/info.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 068ff6d3d0..be7ee42051 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -14,6 +14,7 @@ pub(crate) mod builders; pub(crate) mod encoding; +pub(crate) mod info; pub(crate) mod key_expr; pub(crate) mod payload; pub(crate) mod sample; diff --git a/zenoh/src/info.rs b/zenoh/src/api/info.rs similarity index 100% rename from zenoh/src/info.rs rename to zenoh/src/api/info.rs diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index f50458102e..a4634d9d28 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -13,6 +13,7 @@ // use crate::admin; use crate::api::encoding::Encoding; +use crate::api::info::SessionInfo; use crate::api::key_expr::KeyExpr; use crate::api::key_expr::KeyExprInner; #[cfg(feature = "unstable")] @@ -27,7 +28,6 @@ use crate::api::value::Value; use crate::config::Config; use crate::config::Notifier; use crate::handlers::{Callback, DefaultHandler}; -use crate::info::*; #[zenoh_macros::unstable] use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::primitives::Primitives; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 81aaa1545b..60e1183670 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -190,7 +190,6 @@ pub use net::runtime; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; pub mod handlers; -pub mod info; #[cfg(feature = "unstable")] pub mod liveliness; pub mod plugins; From a8de62e7d41a9fac9debd9f95d9743b507f226d8 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 22:19:19 +0200 Subject: [PATCH 109/598] subscriber moved to api --- zenoh/src/{ => api}/subscriber.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{ => api}/subscriber.rs (100%) diff --git a/zenoh/src/subscriber.rs b/zenoh/src/api/subscriber.rs similarity index 100% rename from zenoh/src/subscriber.rs rename to zenoh/src/api/subscriber.rs From 526c1b4fe2b8dacc24194d881d5a0c50f6c6d767 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 22:19:29 +0200 Subject: [PATCH 110/598] subscriber moved to api --- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/api.rs | 1 + zenoh/src/api/session.rs | 4 +++- zenoh/src/lib.rs | 21 +++++++++++-------- zenoh/src/liveliness.rs | 5 +++-- zenoh/tests/attachments.rs | 2 +- zenoh/tests/qos.rs | 2 +- zenoh/tests/routing.rs | 2 +- 10 files changed, 25 insertions(+), 18 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 315a6bbb27..23bf066263 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -18,9 +18,9 @@ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; -use zenoh::builders::SampleBuilder; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; +use zenoh::sample::SampleBuilder; use zenoh::session::Session; use zenoh::time::Timestamp; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index c89fd94f04..1abe311b65 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -23,9 +23,9 @@ use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::buffer::SplitBuffer; use zenoh::buffers::ZBuf; -use zenoh::builders::SampleBuilder; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; +use zenoh::sample::SampleBuilder; use zenoh::sample::{Sample, SampleKind}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 75386fd907..24501f9eca 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -17,10 +17,10 @@ use std::future::Ready; use std::mem::swap; use std::sync::{Arc, Mutex}; use std::time::Duration; -use zenoh::builders::SampleBuilder; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; +use zenoh::sample::SampleBuilder; use zenoh::session::SessionRef; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index be7ee42051..a9f08ed21c 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -20,4 +20,5 @@ pub(crate) mod payload; pub(crate) mod sample; pub(crate) mod selector; pub(crate) mod session; +pub(crate) mod subscriber; pub(crate) mod value; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index a4634d9d28..96ddd63ee6 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -24,6 +24,8 @@ use crate::api::sample::QoS; use crate::api::selector::Parameters; use crate::api::selector::Selector; use crate::api::selector::TIME_RANGE_KEY; +use crate::api::subscriber::SubscriberBuilder; +use crate::api::subscriber::SubscriberState; use crate::api::value::Value; use crate::config::Config; use crate::config::Notifier; @@ -38,7 +40,6 @@ use crate::prelude::Locality; use crate::publication::*; use crate::query::*; use crate::queryable::*; -use crate::subscriber::*; use crate::Id; use crate::Priority; use crate::Sample; @@ -61,6 +62,7 @@ use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; use zenoh_core::Resolvable; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; +use zenoh_protocol::core::Reliability; #[cfg(feature = "unstable")] use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::network::AtomicRequestId; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 60e1183670..c762009209 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -121,14 +121,6 @@ pub const FEATURES: &str = concat_enabled_features!( pub use crate::api::session::open; -pub mod builders { - pub use crate::api::builders::sample::QoSBuilderTrait; - pub use crate::api::builders::sample::SampleBuilder; - pub use crate::api::builders::sample::SampleBuilderTrait; - pub use crate::api::builders::sample::TimestampBuilderTrait; - pub use crate::api::builders::sample::ValueBuilderTrait; -} - pub mod key_expr { pub use crate::api::key_expr::kedefine; pub use crate::api::key_expr::keformat; @@ -152,6 +144,11 @@ pub mod session { } pub mod sample { + pub use crate::api::builders::sample::QoSBuilderTrait; + pub use crate::api::builders::sample::SampleBuilder; + pub use crate::api::builders::sample::SampleBuilderTrait; + pub use crate::api::builders::sample::TimestampBuilderTrait; + pub use crate::api::builders::sample::ValueBuilderTrait; pub use crate::api::sample::Attachment; pub use crate::api::sample::Locality; pub use crate::api::sample::Sample; @@ -181,6 +178,13 @@ pub mod selector { pub use crate::api::selector::TIME_RANGE_KEY; } +pub mod subscriber { + pub use crate::api::subscriber::FlumeSubscriber; + pub use crate::api::subscriber::Reliability; + pub use crate::api::subscriber::Subscriber; + pub use crate::api::subscriber::SubscriberBuilder; +} + mod admin; #[macro_use] @@ -197,7 +201,6 @@ pub mod prelude; pub mod publication; pub mod query; pub mod queryable; -pub mod subscriber; #[cfg(feature = "shared-memory")] pub use zenoh_shm as shm; diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 8ce5386c3f..b40b786dad 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -22,11 +22,12 @@ use crate::{query::Reply, Id}; #[zenoh_macros::unstable] use { crate::{ + api::session::SessionRef, + api::session::Undeclarable, + api::subscriber::{Subscriber, SubscriberInner}, handlers::locked, handlers::DefaultHandler, prelude::*, - subscriber::{Subscriber, SubscriberInner}, - api::session::SessionRef, api::session::Undeclarable, }, std::convert::TryInto, std::future::Ready, diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 52508cf27e..7580984c8d 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -61,7 +61,7 @@ fn pubsub() { #[cfg(feature = "unstable")] #[test] fn queries() { - use zenoh::{builders::SampleBuilderTrait, prelude::sync::*, sample::Attachment}; + use zenoh::{prelude::sync::*, sample::Attachment, sample::SampleBuilderTrait}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 2eeee9c9df..b9f3ab3945 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::builders::QoSBuilderTrait; use zenoh::prelude::r#async::*; +use zenoh::sample::QoSBuilderTrait; use zenoh::{publication::Priority, session::SessionDeclarations}; use zenoh_core::ztimeout; diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index b3c6758ec3..9803d62c4e 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -16,9 +16,9 @@ use std::sync::atomic::Ordering; use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; -use zenoh::builders::QoSBuilderTrait; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; +use zenoh::sample::QoSBuilderTrait; use zenoh::Result; use zenoh_core::ztimeout; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; From cd687d8e17386e17d950a80612f0414f98f0423f Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 22:26:10 +0200 Subject: [PATCH 111/598] publisher to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/publication.rs | 0 zenoh/src/api/session.rs | 5 +++++ zenoh/src/lib.rs | 10 +++++++++- 4 files changed, 15 insertions(+), 1 deletion(-) rename zenoh/src/{ => api}/publication.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index a9f08ed21c..259547740c 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -17,6 +17,7 @@ pub(crate) mod encoding; pub(crate) mod info; pub(crate) mod key_expr; pub(crate) mod payload; +pub(crate) mod publication; pub(crate) mod sample; pub(crate) mod selector; pub(crate) mod session; diff --git a/zenoh/src/publication.rs b/zenoh/src/api/publication.rs similarity index 100% rename from zenoh/src/publication.rs rename to zenoh/src/api/publication.rs diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 96ddd63ee6..5a780d51f0 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -16,6 +16,11 @@ use crate::api::encoding::Encoding; use crate::api::info::SessionInfo; use crate::api::key_expr::KeyExpr; use crate::api::key_expr::KeyExprInner; +use crate::api::publication::MatchingListenerState; +use crate::api::publication::MatchingStatus; +use crate::api::publication::PublicationBuilder; +use crate::api::publication::PublicationBuilderDelete; +use crate::api::publication::PublicationBuilderPut; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::DataInfo; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c762009209..140717dd1c 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -185,6 +185,15 @@ pub mod subscriber { pub use crate::api::subscriber::SubscriberBuilder; } +pub mod publication { + pub use crate::api::publication::CongestionControl; + pub use crate::api::publication::Priority; + pub use crate::api::publication::Publisher; + pub use crate::api::publication::PublisherBuilder; + #[zenoh_macros::unstable] + pub use crate::api::publication::PublisherDeclarations; +} + mod admin; #[macro_use] @@ -198,7 +207,6 @@ pub mod handlers; pub mod liveliness; pub mod plugins; pub mod prelude; -pub mod publication; pub mod query; pub mod queryable; #[cfg(feature = "shared-memory")] From 22a938965cd77ec4810e2b08a6360fc98d68995a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 22:37:45 +0200 Subject: [PATCH 112/598] query to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/query.rs | 0 zenoh/src/api/selector.rs | 5 +++-- zenoh/src/api/session.rs | 5 ++++- zenoh/src/lib.rs | 9 ++++++++- zenoh/src/liveliness.rs | 2 +- zenoh/src/queryable.rs | 4 ++-- 7 files changed, 19 insertions(+), 7 deletions(-) rename zenoh/src/{ => api}/query.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 259547740c..5af3b8ba45 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -18,6 +18,7 @@ pub(crate) mod info; pub(crate) mod key_expr; pub(crate) mod payload; pub(crate) mod publication; +pub(crate) mod query; pub(crate) mod sample; pub(crate) mod selector; pub(crate) mod session; diff --git a/zenoh/src/query.rs b/zenoh/src/api/query.rs similarity index 100% rename from zenoh/src/query.rs rename to zenoh/src/api/query.rs diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index df562e196b..d93a61c4fd 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -214,7 +214,7 @@ impl<'a> Selector<'a> { } #[cfg(any(feature = "unstable", test))] pub(crate) fn accept_any_keyexpr(self, any: bool) -> ZResult> { - use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; + use crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; let mut s = self.into_owned(); let any_selparam = s.parameter_index(_REPLY_KEY_EXPR_ANY_SEL_PARAM)?; match (any, any_selparam) { @@ -264,7 +264,8 @@ fn selector_accessors() { map_selector.time_range().unwrap() ); let without_any = selector.to_string(); - let with_any = selector.to_string() + "&" + crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; + let with_any = + selector.to_string() + "&" + crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; selector = selector.accept_any_keyexpr(false).unwrap(); assert_eq!(selector.to_string(), without_any); selector = selector.accept_any_keyexpr(true).unwrap(); diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 5a780d51f0..71f4100951 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -21,6 +21,9 @@ use crate::api::publication::MatchingStatus; use crate::api::publication::PublicationBuilder; use crate::api::publication::PublicationBuilderDelete; use crate::api::publication::PublicationBuilderPut; +use crate::api::query::GetBuilder; +use crate::api::query::QueryState; +use crate::api::query::Reply; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::DataInfo; @@ -2178,7 +2181,7 @@ impl Primitives for Session { query .selector .parameters() - .get_bools([crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM]), + .get_bools([crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM]), Ok([true]) ) && !query.selector.key_expr.intersects(&key_expr) { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 140717dd1c..8f509f1b0a 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -194,6 +194,14 @@ pub mod publication { pub use crate::api::publication::PublisherDeclarations; } +pub mod query { + pub use crate::api::query::Mode; + pub use crate::api::query::Reply; + pub use crate::api::query::ReplyKeyExpr; + pub use crate::api::query::REPLY_KEY_EXPR_ANY_SEL_PARAM; + pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; +} + mod admin; #[macro_use] @@ -207,7 +215,6 @@ pub mod handlers; pub mod liveliness; pub mod plugins; pub mod prelude; -pub mod query; pub mod queryable; #[cfg(feature = "shared-memory")] pub use zenoh_shm as shm; diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index b40b786dad..08145e36f1 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -17,7 +17,7 @@ //! see [`Liveliness`] use zenoh_protocol::network::request; -use crate::{query::Reply, Id}; +use crate::{api::query::Reply, Id}; #[zenoh_macros::unstable] use { diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 50190ff891..f8d8a8a8ab 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -24,7 +24,7 @@ use crate::net::primitives::Primitives; use crate::prelude::*; use crate::Id; #[cfg(feature = "unstable")] -use crate::{api::sample::Attachment, query::ReplyKeyExpr}; +use crate::{api::query::ReplyKeyExpr, api::sample::Attachment}; use std::fmt; use std::future::Ready; use std::ops::Deref; @@ -201,7 +201,7 @@ impl Query { } fn _accepts_any_replies(&self) -> ZResult { self.parameters() - .get_bools([crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM]) + .get_bools([crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM]) .map(|a| a[0]) } } From 7bc561d4b60835fe1cd3e430aecd7fe4484ed594 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 22:46:10 +0200 Subject: [PATCH 113/598] queryable added to api --- zenoh-ext/src/publication_cache.rs | 2 +- zenoh/src/admin.rs | 2 +- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/queryable.rs | 4 ++-- zenoh/src/api/selector.rs | 2 +- zenoh/src/api/session.rs | 3 +++ zenoh/src/lib.rs | 7 ++++++- zenoh/src/net/runtime/adminspace.rs | 4 ++-- 8 files changed, 17 insertions(+), 8 deletions(-) rename zenoh/src/{ => api}/queryable.rs (99%) diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index fdba3af231..b8b7c79cec 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -17,8 +17,8 @@ use std::convert::TryInto; use std::future::Ready; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; -use zenoh::subscriber::FlumeSubscriber; use zenoh::session::SessionRef; +use zenoh::subscriber::FlumeSubscriber; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_result::{bail, ZResult}; use zenoh_util::core::ResolveFuture; diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index f7ddb69a37..678f6d1bbb 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -13,10 +13,10 @@ // use crate::{ api::encoding::Encoding, + api::queryable::Query, api::sample::DataInfo, keyexpr, prelude::sync::{KeyExpr, Locality, SampleKind}, - queryable::Query, Payload, Session, }; use std::{ diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 5af3b8ba45..f7049b4106 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -19,6 +19,7 @@ pub(crate) mod key_expr; pub(crate) mod payload; pub(crate) mod publication; pub(crate) mod query; +pub(crate) mod queryable; pub(crate) mod sample; pub(crate) mod selector; pub(crate) mod session; diff --git a/zenoh/src/queryable.rs b/zenoh/src/api/queryable.rs similarity index 99% rename from zenoh/src/queryable.rs rename to zenoh/src/api/queryable.rs index f8d8a8a8ab..db4df63183 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -251,8 +251,8 @@ impl AsyncResolve for ReplySample<'_> { #[derive(Debug)] pub struct ReplyBuilderPut { - payload: super::Payload, - encoding: super::Encoding, + payload: Payload, + encoding: Encoding, } #[derive(Debug)] pub struct ReplyBuilderDelete; diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index d93a61c4fd..51b8296634 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -18,7 +18,7 @@ use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; use zenoh_result::ZResult; pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; -use crate::{api::key_expr::KeyExpr, queryable::Query}; +use crate::{api::key_expr::KeyExpr, api::queryable::Query}; use std::{ borrow::{Borrow, Cow}, diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 71f4100951..519b2c0011 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -24,6 +24,9 @@ use crate::api::publication::PublicationBuilderPut; use crate::api::query::GetBuilder; use crate::api::query::QueryState; use crate::api::query::Reply; +use crate::api::queryable::Query; +use crate::api::queryable::QueryInner; +use crate::api::queryable::QueryableState; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::DataInfo; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 8f509f1b0a..a2df421de8 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -202,6 +202,12 @@ pub mod query { pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; } +pub mod queryable { + pub use crate::api::queryable::Query; + pub use crate::api::queryable::Queryable; + pub use crate::api::queryable::QueryableBuilder; +} + mod admin; #[macro_use] @@ -215,7 +221,6 @@ pub mod handlers; pub mod liveliness; pub mod plugins; pub mod prelude; -pub mod queryable; #[cfg(feature = "shared-memory")] pub use zenoh_shm as shm; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 26ba22621e..708d2bb349 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -14,14 +14,14 @@ use super::routing::dispatcher::face::Face; use super::Runtime; use crate::api::builders::sample::ValueBuilderTrait; use crate::api::key_expr::KeyExpr; +use crate::api::queryable::Query; +use crate::api::queryable::QueryInner; use crate::api::value::Value; use crate::encoding::Encoding; use crate::net::primitives::Primitives; use crate::payload::Payload; use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::SyncResolve; -use crate::queryable::Query; -use crate::queryable::QueryInner; use log::{error, trace}; use serde_json::json; use std::collections::HashMap; From 98ea8aeeeaa2588c04575d298ea0c73409c2bfe7 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 3 Apr 2024 14:58:07 +0200 Subject: [PATCH 114/598] handlers to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/handlers.rs | 0 zenoh/src/api/publication.rs | 4 ++-- zenoh/src/api/query.rs | 2 +- zenoh/src/api/queryable.rs | 4 ++-- zenoh/src/api/session.rs | 2 +- zenoh/src/api/subscriber.rs | 2 +- zenoh/src/lib.rs | 10 ++++++++-- zenoh/src/liveliness.rs | 4 ++-- zenoh/src/scouting.rs | 3 +-- 10 files changed, 19 insertions(+), 13 deletions(-) rename zenoh/src/{ => api}/handlers.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index f7049b4106..96b733dd5c 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -14,6 +14,7 @@ pub(crate) mod builders; pub(crate) mod encoding; +pub(crate) mod handlers; pub(crate) mod info; pub(crate) mod key_expr; pub(crate) mod payload; diff --git a/zenoh/src/handlers.rs b/zenoh/src/api/handlers.rs similarity index 100% rename from zenoh/src/handlers.rs rename to zenoh/src/api/handlers.rs diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 3f528b41b7..3e650c5e39 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -22,7 +22,7 @@ use crate::net::primitives::Primitives; use crate::prelude::*; #[cfg(feature = "unstable")] use crate::{ - handlers::{Callback, DefaultHandler, IntoHandler}, + api::handlers::{Callback, DefaultHandler, IntoHandler}, Id, }; use std::future::Ready; @@ -1238,7 +1238,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { where CallbackMut: FnMut(MatchingStatus) + Send + Sync + 'static, { - self.callback(crate::handlers::locked(callback)) + self.callback(crate::api::handlers::locked(callback)) } /// Receive the MatchingStatuses for this listener with a [`Handler`](crate::prelude::IntoHandler). diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index b1dd5a5d73..1e749132da 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -13,10 +13,10 @@ // //! Query primitives. +use crate::api::handlers::{locked, Callback, DefaultHandler}; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::QoSBuilder; -use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; use crate::Session; use std::collections::HashMap; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index db4df63183..df76b6441f 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -15,11 +15,11 @@ //! Queryable primitives. use crate::api::builders::sample::SampleBuilder; +use crate::api::encoding::Encoding; +use crate::api::handlers::{locked, DefaultHandler}; use crate::api::sample::{QoSBuilder, SourceInfo}; use crate::api::session::SessionRef; use crate::api::session::Undeclarable; -use crate::encoding::Encoding; -use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; use crate::Id; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 519b2c0011..0bcb57d5e7 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -13,6 +13,7 @@ // use crate::admin; use crate::api::encoding::Encoding; +use crate::api::handlers::{Callback, DefaultHandler}; use crate::api::info::SessionInfo; use crate::api::key_expr::KeyExpr; use crate::api::key_expr::KeyExprInner; @@ -40,7 +41,6 @@ use crate::api::subscriber::SubscriberState; use crate::api::value::Value; use crate::config::Config; use crate::config::Notifier; -use crate::handlers::{Callback, DefaultHandler}; #[zenoh_macros::unstable] use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::primitives::Primitives; diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 239ea488a9..c549542b3b 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -13,10 +13,10 @@ // //! Subscribing primitives. +use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; use crate::api::sample::Sample; use crate::api::session::Undeclarable; -use crate::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::prelude::Locality; use crate::Id; use crate::{api::session::SessionRef, Result as ZResult}; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index a2df421de8..e8622045a7 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -81,8 +81,8 @@ extern crate zenoh_result; pub(crate) type Id = u32; +use api::handlers::DefaultHandler; use git_version::git_version; -use handlers::DefaultHandler; #[cfg(feature = "unstable")] use prelude::*; use scouting::ScoutBuilder; @@ -208,6 +208,13 @@ pub mod queryable { pub use crate::api::queryable::QueryableBuilder; } +pub mod handlers { + pub use crate::api::handlers::locked; + pub use crate::api::handlers::DefaultHandler; + pub use crate::api::handlers::IntoHandler; + pub use crate::api::handlers::RingBuffer; +} + mod admin; #[macro_use] @@ -216,7 +223,6 @@ pub(crate) mod net; pub use net::runtime; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; -pub mod handlers; #[cfg(feature = "unstable")] pub mod liveliness; pub mod plugins; diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 08145e36f1..dac046324d 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -22,11 +22,11 @@ use crate::{api::query::Reply, Id}; #[zenoh_macros::unstable] use { crate::{ + api::handlers::locked, + api::handlers::DefaultHandler, api::session::SessionRef, api::session::Undeclarable, api::subscriber::{Subscriber, SubscriberInner}, - handlers::locked, - handlers::DefaultHandler, prelude::*, }, std::convert::TryInto, diff --git a/zenoh/src/scouting.rs b/zenoh/src/scouting.rs index 49f2b4c01f..bfebc09d2c 100644 --- a/zenoh/src/scouting.rs +++ b/zenoh/src/scouting.rs @@ -11,9 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::handlers::{locked, Callback, DefaultHandler}; +use crate::api::handlers::{locked, Callback, DefaultHandler}; use crate::net::runtime::{orchestrator::Loop, Runtime}; - use futures::StreamExt; use std::{fmt, future::Ready, net::SocketAddr, ops::Deref}; use tokio::net::UdpSocket; From c8338f8748a49fea9a2b7c0e5db8df5fe499ff75 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 3 Apr 2024 17:48:42 +0200 Subject: [PATCH 115/598] scouting to api --- zenoh/src/{ => api}/scouting.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{ => api}/scouting.rs (100%) diff --git a/zenoh/src/scouting.rs b/zenoh/src/api/scouting.rs similarity index 100% rename from zenoh/src/scouting.rs rename to zenoh/src/api/scouting.rs From 36150cbf977ff66066f13450dba2e05274790a24 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 3 Apr 2024 17:48:54 +0200 Subject: [PATCH 116/598] scouting to api --- examples/examples/z_scout.rs | 3 +- zenoh/src/api.rs | 1 + zenoh/src/api/scouting.rs | 47 ++++++++++++++++++++++++++++-- zenoh/src/lib.rs | 56 ++++-------------------------------- zenoh/src/prelude.rs | 6 ++-- zenoh/tests/formatters.rs | 8 +++--- 6 files changed, 61 insertions(+), 60 deletions(-) diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index bc778cfc0f..11ed3a6fd8 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -14,6 +14,7 @@ use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::scouting::WhatAmI; +use zenoh::scouting::scout; #[tokio::main] async fn main() { @@ -21,7 +22,7 @@ async fn main() { env_logger::init(); println!("Scouting..."); - let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, Config::default()) + let receiver = scout(WhatAmI::Peer | WhatAmI::Router, Config::default()) .res() .await .unwrap(); diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 96b733dd5c..bc5b6a9301 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -22,6 +22,7 @@ pub(crate) mod publication; pub(crate) mod query; pub(crate) mod queryable; pub(crate) mod sample; +pub(crate) mod scouting; pub(crate) mod selector; pub(crate) mod session; pub(crate) mod subscriber; diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index bfebc09d2c..56f8d4c1a4 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -169,7 +169,7 @@ where { fn res_sync(self) -> ::To { let (callback, receiver) = self.handler.into_handler(); - scout(self.what, self.config?, callback).map(|scout| Scout { scout, receiver }) + _scout(self.what, self.config?, callback).map(|scout| Scout { scout, receiver }) } } @@ -294,7 +294,7 @@ impl Scout { } } -fn scout( +fn _scout( what: WhatAmIMatcher, config: zenoh_config::Config, callback: Callback<'static, Hello>, @@ -336,3 +336,46 @@ fn scout( } Ok(ScoutInner { stop_sender }) } + +/// Scout for routers and/or peers. +/// +/// [`scout`] spawns a task that periodically sends scout messages and waits for [`Hello`](crate::scouting::Hello) replies. +/// +/// Drop the returned [`Scout`](crate::scouting::Scout) to stop the scouting task. +/// +/// # Arguments +/// +/// * `what` - The kind of zenoh process to scout for +/// * `config` - The configuration [`Config`] to use for scouting +/// +/// # Examples +/// ```no_run +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::r#async::*; +/// use zenoh::scouting::WhatAmI; +/// +/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) +/// .res() +/// .await +/// .unwrap(); +/// while let Ok(hello) = receiver.recv_async().await { +/// println!("{}", hello); +/// } +/// # } +/// ``` +pub fn scout, TryIntoConfig>( + what: I, + config: TryIntoConfig, +) -> ScoutBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: + Into, +{ + ScoutBuilder { + what: what.into(), + config: config.try_into().map_err(|e| e.into()), + handler: DefaultHandler, + } +} diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e8622045a7..1129363c43 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -81,13 +81,9 @@ extern crate zenoh_result; pub(crate) type Id = u32; -use api::handlers::DefaultHandler; use git_version::git_version; #[cfg(feature = "unstable")] use prelude::*; -use scouting::ScoutBuilder; -pub use zenoh_macros::{ke, kedefine, keformat, kewrite}; -use zenoh_protocol::core::WhatAmIMatcher; use zenoh_util::concat_enabled_features; /// A zenoh error. @@ -215,6 +211,12 @@ pub mod handlers { pub use crate::api::handlers::RingBuffer; } +pub mod scouting { + pub use crate::api::scouting::scout; + pub use crate::api::scouting::ScoutBuilder; + pub use crate::api::scouting::WhatAmI; +} + mod admin; #[macro_use] @@ -250,49 +252,3 @@ pub mod time { Timestamp::new(now.into(), TimestampId::try_from([1]).unwrap()) } } - -/// Scouting primitives. -pub mod scouting; - -/// Scout for routers and/or peers. -/// -/// [`scout`] spawns a task that periodically sends scout messages and waits for [`Hello`](crate::scouting::Hello) replies. -/// -/// Drop the returned [`Scout`](crate::scouting::Scout) to stop the scouting task. -/// -/// # Arguments -/// -/// * `what` - The kind of zenoh process to scout for -/// * `config` - The configuration [`Config`] to use for scouting -/// -/// # Examples -/// ```no_run -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; -/// -/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) -/// .res() -/// .await -/// .unwrap(); -/// while let Ok(hello) = receiver.recv_async().await { -/// println!("{}", hello); -/// } -/// # } -/// ``` -pub fn scout, TryIntoConfig>( - what: I, - config: TryIntoConfig, -) -> ScoutBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: - Into, -{ - ScoutBuilder { - what: what.into(), - config: config.try_into().map_err(|e| e.into()), - handler: DefaultHandler, - } -} diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 61c21b9167..317cad3a68 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -41,7 +41,7 @@ pub(crate) mod common { pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::session::{Session, SessionDeclarations}; - pub use crate::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; + pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; pub use crate::api::encoding::Encoding; pub use crate::api::value::Value; @@ -56,9 +56,9 @@ pub(crate) mod common { #[cfg(not(feature = "unstable"))] pub(crate) use crate::sample::Locality; - pub use crate::publication::Priority; + pub use crate::api::publication::Priority; #[zenoh_macros::unstable] - pub use crate::publication::PublisherDeclarations; + pub use crate::api::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; pub use crate::api::builders::sample::{ diff --git a/zenoh/tests/formatters.rs b/zenoh/tests/formatters.rs index 22600b6cc0..22defaab05 100644 --- a/zenoh/tests/formatters.rs +++ b/zenoh/tests/formatters.rs @@ -13,11 +13,11 @@ // #[test] fn reuse() { - zenoh::kedefine!( + zenoh::key_expr::kedefine!( pub gkeys: "zenoh/${group:*}/${member:*}", ); let mut formatter = gkeys::formatter(); - let k1 = zenoh::keformat!(formatter, group = "foo", member = "bar").unwrap(); + let k1 = zenoh::key_expr::keformat!(formatter, group = "foo", member = "bar").unwrap(); assert_eq!(dbg!(k1).as_str(), "zenoh/foo/bar"); formatter.set("member", "*").unwrap(); @@ -29,8 +29,8 @@ fn reuse() { let k2 = dbg!(&mut formatter).build().unwrap(); assert_eq!(dbg!(k2).as_str(), "zenoh/foo/*"); - let k3 = zenoh::keformat!(formatter, group = "foo", member = "*").unwrap(); + let k3 = zenoh::key_expr::keformat!(formatter, group = "foo", member = "*").unwrap(); assert_eq!(dbg!(k3).as_str(), "zenoh/foo/*"); - zenoh::keformat!(formatter, group = "**", member = "**").unwrap_err(); + zenoh::key_expr::keformat!(formatter, group = "**", member = "**").unwrap_err(); } From b9166141baa645dbdc1a8c3f51cb7efef3ef47d0 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 3 Apr 2024 18:11:34 +0200 Subject: [PATCH 117/598] liveliness to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/liveliness.rs | 0 zenoh/src/api/session.rs | 14 +++++++------- zenoh/src/lib.rs | 6 +++++- 4 files changed, 13 insertions(+), 8 deletions(-) rename zenoh/src/{ => api}/liveliness.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index bc5b6a9301..d06acb8f96 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -17,6 +17,7 @@ pub(crate) mod encoding; pub(crate) mod handlers; pub(crate) mod info; pub(crate) mod key_expr; +pub(crate) mod liveliness; pub(crate) mod payload; pub(crate) mod publication; pub(crate) mod query; diff --git a/zenoh/src/liveliness.rs b/zenoh/src/api/liveliness.rs similarity index 100% rename from zenoh/src/liveliness.rs rename to zenoh/src/api/liveliness.rs diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 0bcb57d5e7..407a6256c3 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -17,6 +17,8 @@ use crate::api::handlers::{Callback, DefaultHandler}; use crate::api::info::SessionInfo; use crate::api::key_expr::KeyExpr; use crate::api::key_expr::KeyExprInner; +#[zenoh_macros::unstable] +use crate::api::liveliness::{Liveliness, LivelinessTokenState}; use crate::api::publication::MatchingListenerState; use crate::api::publication::MatchingStatus; use crate::api::publication::PublicationBuilder; @@ -41,8 +43,6 @@ use crate::api::subscriber::SubscriberState; use crate::api::value::Value; use crate::config::Config; use crate::config::Notifier; -#[zenoh_macros::unstable] -use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; @@ -1030,7 +1030,7 @@ impl Session { let declared_sub = origin != Locality::SessionLocal && !key_expr .as_str() - .starts_with(crate::liveliness::PREFIX_LIVELINESS); + .starts_with(crate::api::liveliness::PREFIX_LIVELINESS); let declared_sub = declared_sub @@ -1160,7 +1160,7 @@ impl Session { && !sub_state .key_expr .as_str() - .starts_with(crate::liveliness::PREFIX_LIVELINESS); + .starts_with(crate::api::liveliness::PREFIX_LIVELINESS); if send_forget { // Note: there might be several Subscribers on the same KeyExpr. // Before calling forget_subscriber(key_expr), check if this was the last one. @@ -1270,7 +1270,7 @@ impl Session { let mut state = zwrite!(self.state); log::trace!("declare_liveliness({:?})", key_expr); let id = self.runtime.next_id(); - let key_expr = KeyExpr::from(*crate::liveliness::KE_PREFIX_LIVELINESS / key_expr); + let key_expr = KeyExpr::from(*crate::api::liveliness::KE_PREFIX_LIVELINESS / key_expr); let tok_state = Arc::new(LivelinessTokenState { id, key_expr: key_expr.clone().into_owned(), @@ -2018,7 +2018,7 @@ impl Primitives for Session { if expr .as_str() - .starts_with(crate::liveliness::PREFIX_LIVELINESS) + .starts_with(crate::api::liveliness::PREFIX_LIVELINESS) { drop(state); self.handle_data( @@ -2047,7 +2047,7 @@ impl Primitives for Session { if expr .as_str() - .starts_with(crate::liveliness::PREFIX_LIVELINESS) + .starts_with(crate::api::liveliness::PREFIX_LIVELINESS) { drop(state); let data_info = DataInfo { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 1129363c43..e15e5fdd33 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -217,6 +217,11 @@ pub mod scouting { pub use crate::api::scouting::WhatAmI; } +pub mod liveliness { + pub use crate::api::liveliness::Liveliness; + pub use crate::api::liveliness::LivelinessSubscriberBuilder; +} + mod admin; #[macro_use] @@ -226,7 +231,6 @@ pub use net::runtime; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; #[cfg(feature = "unstable")] -pub mod liveliness; pub mod plugins; pub mod prelude; #[cfg(feature = "shared-memory")] From a92da3584934b89b71fc0f12450eb5b676e482a1 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 3 Apr 2024 22:09:52 +0200 Subject: [PATCH 118/598] time to api --- zenoh/src/api.rs | 1 + zenoh/src/api/sample.rs | 2 +- zenoh/src/api/time.rs | 26 ++++++++++++++++++++++++++ zenoh/src/lib.rs | 22 +++++----------------- 4 files changed, 33 insertions(+), 18 deletions(-) create mode 100644 zenoh/src/api/time.rs diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index d06acb8f96..19ea8afaf2 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -27,4 +27,5 @@ pub(crate) mod scouting; pub(crate) mod selector; pub(crate) mod session; pub(crate) mod subscriber; +pub(crate) mod time; pub(crate) mod value; diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 4a4bc934a8..148b61b9e2 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -18,7 +18,6 @@ use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; use crate::api::value::Value; use crate::payload::Payload; -use crate::time::Timestamp; use crate::Priority; #[zenoh_macros::unstable] pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; @@ -26,6 +25,7 @@ pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; use serde::Serialize; use std::{convert::TryFrom, fmt}; use zenoh_protocol::core::EntityGlobalId; +use zenoh_protocol::core::Timestamp; use zenoh_protocol::network::declare::ext::QoSType; use zenoh_protocol::{core::CongestionControl, zenoh}; diff --git a/zenoh/src/api/time.rs b/zenoh/src/api/time.rs new file mode 100644 index 0000000000..eeeebdc6ba --- /dev/null +++ b/zenoh/src/api/time.rs @@ -0,0 +1,26 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::convert::TryFrom; + +use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; + +/// Generates a reception [`Timestamp`] with id=0x01. +/// This operation should be called if a timestamp is required for an incoming [`zenoh::Sample`](crate::Sample) +/// that doesn't contain any timestamp. +pub fn new_reception_timestamp() -> Timestamp { + use std::time::{SystemTime, UNIX_EPOCH}; + + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); + Timestamp::new(now.into(), TimestampId::try_from([1]).unwrap()) +} diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e15e5fdd33..40c7a942bc 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -222,6 +222,11 @@ pub mod liveliness { pub use crate::api::liveliness::LivelinessSubscriberBuilder; } +pub mod time { + pub use crate::api::time::new_reception_timestamp; + pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; +} + mod admin; #[macro_use] @@ -239,20 +244,3 @@ pub use zenoh_shm as shm; /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate /// reading and writing data. pub use zenoh_buffers as buffers; - -/// Time related types and functions. -pub mod time { - use std::convert::TryFrom; - - pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; - - /// Generates a reception [`Timestamp`] with id=0x01. - /// This operation should be called if a timestamp is required for an incoming [`zenoh::Sample`](crate::Sample) - /// that doesn't contain any timestamp. - pub fn new_reception_timestamp() -> Timestamp { - use std::time::{SystemTime, UNIX_EPOCH}; - - let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); - Timestamp::new(now.into(), TimestampId::try_from([1]).unwrap()) - } -} From 60a9003cecb4592099ef3b51f4279a5c396eae22 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 3 Apr 2024 22:16:02 +0200 Subject: [PATCH 119/598] admin to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/admin.rs | 0 zenoh/src/api/session.rs | 2 +- zenoh/src/api/time.rs | 2 +- zenoh/src/lib.rs | 3 --- 5 files changed, 3 insertions(+), 5 deletions(-) rename zenoh/src/{ => api}/admin.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 19ea8afaf2..1af7da37c5 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // +pub(crate) mod admin; pub(crate) mod builders; pub(crate) mod encoding; pub(crate) mod handlers; diff --git a/zenoh/src/admin.rs b/zenoh/src/api/admin.rs similarity index 100% rename from zenoh/src/admin.rs rename to zenoh/src/api/admin.rs diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 407a6256c3..83a57ce260 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::admin; +use crate::api::admin; use crate::api::encoding::Encoding; use crate::api::handlers::{Callback, DefaultHandler}; use crate::api::info::SessionInfo; diff --git a/zenoh/src/api/time.rs b/zenoh/src/api/time.rs index eeeebdc6ba..cbdabe3a7e 100644 --- a/zenoh/src/api/time.rs +++ b/zenoh/src/api/time.rs @@ -13,7 +13,7 @@ // use std::convert::TryFrom; -use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; +use zenoh_protocol::core::{Timestamp, TimestampId}; /// Generates a reception [`Timestamp`] with id=0x01. /// This operation should be called if a timestamp is required for an incoming [`zenoh::Sample`](crate::Sample) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 40c7a942bc..3a5c358ae2 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -227,9 +227,6 @@ pub mod time { pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; } -mod admin; -#[macro_use] - mod api; pub(crate) mod net; pub use net::runtime; From e816f4efdc8591ce4db6ea9c88cc2109e1c037d4 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 4 Apr 2024 11:36:02 +0200 Subject: [PATCH 120/598] Add unicast open/close time tests (#898) * Add unicast open/close time * Remove unused import * Add print to tests --- io/zenoh-transport/tests/unicast_time.rs | 521 +++++++++++++++++++++++ 1 file changed, 521 insertions(+) create mode 100644 io/zenoh-transport/tests/unicast_time.rs diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs new file mode 100644 index 0000000000..75d3ae1d98 --- /dev/null +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -0,0 +1,521 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{ + convert::TryFrom, + sync::Arc, + time::{Duration, Instant}, +}; +use zenoh_core::ztimeout; +use zenoh_link::EndPoint; +use zenoh_protocol::core::{WhatAmI, ZenohId}; +use zenoh_result::ZResult; +use zenoh_transport::{ + multicast::TransportMulticast, + unicast::{test_helpers::make_transport_manager_builder, TransportUnicast}, + DummyTransportPeerEventHandler, TransportEventHandler, TransportManager, + TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, +}; + +const TIMEOUT: Duration = Duration::from_secs(60); +const TIMEOUT_EXPECTED: Duration = Duration::from_secs(5); +const SLEEP: Duration = Duration::from_millis(100); + +macro_rules! ztimeout_expected { + ($f:expr) => { + tokio::time::timeout(TIMEOUT_EXPECTED, $f).await.unwrap() + }; +} + +#[cfg(test)] +#[derive(Default)] +struct SHRouterOpenClose; + +impl TransportEventHandler for SHRouterOpenClose { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + Ok(Arc::new(DummyTransportPeerEventHandler)) + } + + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); + } +} + +// Transport Handler for the client +struct SHClientOpenClose {} + +impl SHClientOpenClose { + fn new() -> Self { + Self {} + } +} + +impl TransportEventHandler for SHClientOpenClose { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + Ok(Arc::new(DummyTransportPeerEventHandler)) + } + + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); + } +} + +async fn time_transport( + listen_endpoint: &EndPoint, + connect_endpoint: &EndPoint, + lowlatency_transport: bool, +) { + if lowlatency_transport { + println!(">>> Low latency transport"); + } else { + println!(">>> Universal transport"); + } + /* [ROUTER] */ + let router_id = ZenohId::try_from([1]).unwrap(); + + let router_handler = Arc::new(SHRouterOpenClose); + // Create the router transport manager + let unicast = make_transport_manager_builder( + #[cfg(feature = "transport_multilink")] + 1, + #[cfg(feature = "shared-memory")] + false, + lowlatency_transport, + ) + .max_sessions(1); + let router_manager = TransportManager::builder() + .whatami(WhatAmI::Router) + .zid(router_id) + .unicast(unicast) + .build(router_handler.clone()) + .unwrap(); + + /* [CLIENT] */ + let client01_id = ZenohId::try_from([2]).unwrap(); + + // Create the transport transport manager for the first client + let unicast = make_transport_manager_builder( + #[cfg(feature = "transport_multilink")] + 1, + #[cfg(feature = "shared-memory")] + false, + lowlatency_transport, + ) + .max_sessions(1); + let client01_manager = TransportManager::builder() + .whatami(WhatAmI::Client) + .zid(client01_id) + .unicast(unicast) + .build(Arc::new(SHClientOpenClose::new())) + .unwrap(); + + /* [1] */ + // Add the locator on the router + let start = Instant::now(); + ztimeout!(router_manager.add_listener(listen_endpoint.clone())).unwrap(); + println!("Add listener {}: {:#?}", listen_endpoint, start.elapsed()); + + // Open a transport from the client to the router + let start = Instant::now(); + let c_ses1 = + ztimeout_expected!(client01_manager.open_transport_unicast(connect_endpoint.clone())) + .unwrap(); + println!( + "Open transport {}: {:#?}", + connect_endpoint, + start.elapsed() + ); + + // Verify that the transport has been open on the router + ztimeout!(async { + loop { + let transports = ztimeout!(router_manager.get_transports_unicast()); + let s = transports + .iter() + .find(|s| s.get_zid().unwrap() == client01_id); + + match s { + Some(s) => { + let links = s.get_links().unwrap(); + assert_eq!(links.len(), 1); + break; + } + None => tokio::time::sleep(SLEEP).await, + } + } + }); + + /* [2] */ + // Close the open transport on the client + let start = Instant::now(); + ztimeout!(c_ses1.close()).unwrap(); + println!( + "Close transport {}: {:#?}", + connect_endpoint, + start.elapsed() + ); + + // Verify that the transport has been closed also on the router + ztimeout!(async { + loop { + let transports = ztimeout!(router_manager.get_transports_unicast()); + let index = transports + .iter() + .find(|s| s.get_zid().unwrap() == client01_id); + if index.is_none() { + break; + } + tokio::time::sleep(SLEEP).await; + } + }); + + /* [3] */ + let start = Instant::now(); + ztimeout!(router_manager.del_listener(listen_endpoint)).unwrap(); + println!( + "Delete listener {}: {:#?}", + listen_endpoint, + start.elapsed() + ); + + ztimeout!(async { + while !router_manager.get_listeners().await.is_empty() { + tokio::time::sleep(SLEEP).await; + } + }); + + // Wait a little bit + tokio::time::sleep(SLEEP).await; + + ztimeout!(router_manager.close()); + ztimeout!(client01_manager.close()); + + // Wait a little bit + tokio::time::sleep(SLEEP).await; +} + +async fn time_universal_transport(endpoint: &EndPoint) { + time_transport(endpoint, endpoint, false).await +} + +async fn time_lowlatency_transport(endpoint: &EndPoint) { + time_transport(endpoint, endpoint, true).await +} + +#[cfg(feature = "transport_tcp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tcp_only() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13000).parse().unwrap(); + time_universal_transport(&endpoint).await; +} + +#[cfg(feature = "transport_tcp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tcp_only_with_lowlatency_transport() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13100).parse().unwrap(); + time_lowlatency_transport(&endpoint).await; +} + +#[cfg(feature = "transport_udp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_udp_only() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13010).parse().unwrap(); + time_universal_transport(&endpoint).await; +} + +#[cfg(feature = "transport_udp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_udp_only_with_lowlatency_transport() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13110).parse().unwrap(); + time_lowlatency_transport(&endpoint).await; +} + +#[cfg(feature = "transport_ws")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_ws_only() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13020).parse().unwrap(); + time_universal_transport(&endpoint).await; +} + +#[cfg(feature = "transport_ws")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_ws_only_with_lowlatency_transport() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13120).parse().unwrap(); + time_lowlatency_transport(&endpoint).await; +} + +#[cfg(feature = "transport_unixpipe")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unixpipe_only() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = "unixpipe/time_unixpipe_only".parse().unwrap(); + time_universal_transport(&endpoint).await; +} + +#[cfg(feature = "transport_unixpipe")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unixpipe_only_with_lowlatency_transport() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = "unixpipe/time_unixpipe_only_with_lowlatency_transport" + .parse() + .unwrap(); + time_lowlatency_transport(&endpoint).await; +} + +#[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unix_only() { + let _ = env_logger::try_init(); + let f1 = "zenoh-test-unix-socket-9.sock"; + let _ = std::fs::remove_file(f1); + let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); + time_universal_transport(&endpoint).await; + let _ = std::fs::remove_file(f1); + let _ = std::fs::remove_file(format!("{f1}.lock")); +} + +#[cfg(feature = "transport_tls")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tls_only() { + use zenoh_link::tls::config::*; + + let _ = env_logger::try_init(); + // NOTE: this an auto-generated pair of certificate and key. + // The target domain is localhost, so it has no real + // mapping to any existing domain. The certificate and key + // have been generated using: https://github.com/jsha/minica + let key = "-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi +qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy +aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU +cELdFdaUOSbdCipJhbnkwixEr2h7PXxwba7SIZgZtRaQWak1VE9b716qe3iMuMha +Efo/UoFmeZCPu5spfwaOZsnCsxRPk2IjbzlsHTJ09lM9wmbEFHBMVAXejLTk++Sr +Xt8jASZhNen/2GzyLQNAquGn98lCMQ6SsE9vLQIDAQABAoIBAGQkKggHm6Q20L+4 +2+bNsoOqguLplpvM4RMpyx11qWE9h6GeUmWD+5yg+SysJQ9aw0ZSHWEjRD4ePji9 +lxvm2IIxzuIftp+NcM2gBN2ywhpfq9XbO/2NVR6PJ0dQQJzBG12bzKDFDdYkP0EU +WdiPL+WoEkvo0F57bAd77n6G7SZSgxYekBF+5S6rjbu5I1cEKW+r2vLehD4uFCVX +Q0Tu7TyIOE1KJ2anRb7ZXVUaguNj0/Er7EDT1+wN8KJKvQ1tYGIq/UUBtkP9nkOI +9XJd25k6m5AQPDddzd4W6/5+M7kjyVPi3CsQcpBPss6ueyecZOMaKqdWAHeEyaak +r67TofUCgYEA6GBa+YkRvp0Ept8cd5mh4gCRM8wUuhtzTQnhubCPivy/QqMWScdn +qD0OiARLAsqeoIfkAVgyqebVnxwTrKTvWe0JwpGylEVWQtpGz3oHgjST47yZxIiY +CSAaimi2CYnJZ+QB2oBkFVwNCuXdPEGX6LgnOGva19UKrm6ONsy6V9MCgYEAxBJu +fu4dGXZreARKEHa/7SQjI9ayAFuACFlON/EgSlICzQyG/pumv1FsMEiFrv6w7PRj +4AGqzyzGKXWVDRMrUNVeGPSKJSmlPGNqXfPaXRpVEeB7UQhAs5wyMrWDl8jEW7Ih +XcWhMLn1f/NOAKyrSDSEaEM+Nuu+xTifoAghvP8CgYEAlta9Fw+nihDIjT10cBo0 +38w4dOP7bFcXQCGy+WMnujOYPzw34opiue1wOlB3FIfL8i5jjY/fyzPA5PhHuSCT +Ec9xL3B9+AsOFHU108XFi/pvKTwqoE1+SyYgtEmGKKjdKOfzYA9JaCgJe1J8inmV +jwXCx7gTJVjwBwxSmjXIm+sCgYBQF8NhQD1M0G3YCdCDZy7BXRippCL0OGxVfL2R +5oKtOVEBl9NxH/3+evE5y/Yn5Mw7Dx3ZPHUcygpslyZ6v9Da5T3Z7dKcmaVwxJ+H +n3wcugv0EIHvOPLNK8npovINR6rGVj6BAqD0uZHKYYYEioQxK5rGyGkaoDQ+dgHm +qku12wKBgQDem5FvNp5iW7mufkPZMqf3sEGtu612QeqejIPFM1z7VkUgetsgPBXD +tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v +F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== +-----END RSA PRIVATE KEY-----"; + + let cert = "-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw +MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCx+oC6ESU3gefJ6oui9J3hB76c2/kDAKNI74cWIXfT +He9DUeKpEDRSbIWVKoGcUfdNQebglxp3jRB+tfx/XU0oZl2m8oewxipiNmdiREUZ +Lazh9DJoNtXkzTqzdQNfwRM+BjjVjx8IpNJV2L2IeTBxWtczFS7ggEHHQLWvYZKj +eCQgGdRwQt0V1pQ5Jt0KKkmFueTCLESvaHs9fHBtrtIhmBm1FpBZqTVUT1vvXqp7 +eIy4yFoR+j9SgWZ5kI+7myl/Bo5mycKzFE+TYiNvOWwdMnT2Uz3CZsQUcExUBd6M +tOT75Kte3yMBJmE16f/YbPItA0Cq4af3yUIxDpKwT28tAgMBAAGjdjB0MA4GA1Ud +DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzAUBgNVHREE +DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAG/POnBob0S7iYwsbtI2 +3LTTbRnmseIErtJuJmI9yYzgVIm6sUSKhlIUfAIm4rfRuzE94KFeWR2w9RabxOJD +wjYLLKvQ6rFY5g2AV/J0TwDjYuq0absdaDPZ8MKJ+/lpGYK3Te+CTOfq5FJRFt1q +GOkXAxnNpGg0obeRWRKFiAMHbcw6a8LIMfRjCooo3+uSQGsbVzGxSB4CYo720KcC +9vB1K9XALwzoqCewP4aiQsMY1GWpAmzXJftY3w+lka0e9dBYcdEdOqxSoZb5OBBZ +p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ +abY= +-----END CERTIFICATE-----"; + + // Configure the client + let ca = "-----BEGIN CERTIFICATE----- +MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw +MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuCq24O4P4Aep5vAVlrIQ7P8+ +uWWgcHIFYa02TmhBUB/hjo0JANCQvAtpVNuQ8NyKPlqnnq1cttePbSYVeA0rrnOs +DcfySAiyGBEY9zMjFfHJtH1wtrPcJEU8XIEY3xUlrAJE2CEuV9dVYgfEEydnvgLc +8Ug0WXSiARjqbnMW3l8jh6bYCp/UpL/gSM4mxdKrgpfyPoweGhlOWXc3RTS7cqM9 +T25acURGOSI6/g8GF0sNE4VZmUvHggSTmsbLeXMJzxDWO+xVehRmbQx3IkG7u++b +QdRwGIJcDNn7zHlDMHtQ0Z1DBV94fZNBwCULhCBB5g20XTGw//S7Fj2FPwyhAgMB +AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr +BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTWfAmQ/BUIQm/9 +/llJJs2jUMWzGzAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzANBgkq +hkiG9w0BAQsFAAOCAQEAvtcZFAELKiTuOiAeYts6zeKxc+nnHCzayDeD/BDCbxGJ +e1n+xdHjLtWGd+/Anc+fvftSYBPTFQqCi84lPiUIln5z/rUxE+ke81hNPIfw2obc +yIg87xCabQpVyEh8s+MV+7YPQ1+fH4FuSi2Fck1FejxkVqN2uOZPvOYUmSTsaVr1 +8SfRnwJNZ9UMRPM2bD4Jkvj0VcL42JM3QkOClOzYW4j/vll2cSs4kx7er27cIoo1 +Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN +R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== +-----END CERTIFICATE-----"; + + let mut endpoint: EndPoint = format!("tls/localhost:{}", 13030).parse().unwrap(); + endpoint + .config_mut() + .extend( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, ca), + (TLS_SERVER_PRIVATE_KEY_RAW, key), + (TLS_SERVER_CERTIFICATE_RAW, cert), + ] + .iter() + .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + ) + .unwrap(); + + time_universal_transport(&endpoint).await; +} + +#[cfg(feature = "transport_quic")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_quic_only() { + use zenoh_link::quic::config::*; + + // NOTE: this an auto-generated pair of certificate and key. + // The target domain is localhost, so it has no real + // mapping to any existing domain. The certificate and key + // have been generated using: https://github.com/jsha/minica + let key = "-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi +qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy +aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU +cELdFdaUOSbdCipJhbnkwixEr2h7PXxwba7SIZgZtRaQWak1VE9b716qe3iMuMha +Efo/UoFmeZCPu5spfwaOZsnCsxRPk2IjbzlsHTJ09lM9wmbEFHBMVAXejLTk++Sr +Xt8jASZhNen/2GzyLQNAquGn98lCMQ6SsE9vLQIDAQABAoIBAGQkKggHm6Q20L+4 +2+bNsoOqguLplpvM4RMpyx11qWE9h6GeUmWD+5yg+SysJQ9aw0ZSHWEjRD4ePji9 +lxvm2IIxzuIftp+NcM2gBN2ywhpfq9XbO/2NVR6PJ0dQQJzBG12bzKDFDdYkP0EU +WdiPL+WoEkvo0F57bAd77n6G7SZSgxYekBF+5S6rjbu5I1cEKW+r2vLehD4uFCVX +Q0Tu7TyIOE1KJ2anRb7ZXVUaguNj0/Er7EDT1+wN8KJKvQ1tYGIq/UUBtkP9nkOI +9XJd25k6m5AQPDddzd4W6/5+M7kjyVPi3CsQcpBPss6ueyecZOMaKqdWAHeEyaak +r67TofUCgYEA6GBa+YkRvp0Ept8cd5mh4gCRM8wUuhtzTQnhubCPivy/QqMWScdn +qD0OiARLAsqeoIfkAVgyqebVnxwTrKTvWe0JwpGylEVWQtpGz3oHgjST47yZxIiY +CSAaimi2CYnJZ+QB2oBkFVwNCuXdPEGX6LgnOGva19UKrm6ONsy6V9MCgYEAxBJu +fu4dGXZreARKEHa/7SQjI9ayAFuACFlON/EgSlICzQyG/pumv1FsMEiFrv6w7PRj +4AGqzyzGKXWVDRMrUNVeGPSKJSmlPGNqXfPaXRpVEeB7UQhAs5wyMrWDl8jEW7Ih +XcWhMLn1f/NOAKyrSDSEaEM+Nuu+xTifoAghvP8CgYEAlta9Fw+nihDIjT10cBo0 +38w4dOP7bFcXQCGy+WMnujOYPzw34opiue1wOlB3FIfL8i5jjY/fyzPA5PhHuSCT +Ec9xL3B9+AsOFHU108XFi/pvKTwqoE1+SyYgtEmGKKjdKOfzYA9JaCgJe1J8inmV +jwXCx7gTJVjwBwxSmjXIm+sCgYBQF8NhQD1M0G3YCdCDZy7BXRippCL0OGxVfL2R +5oKtOVEBl9NxH/3+evE5y/Yn5Mw7Dx3ZPHUcygpslyZ6v9Da5T3Z7dKcmaVwxJ+H +n3wcugv0EIHvOPLNK8npovINR6rGVj6BAqD0uZHKYYYEioQxK5rGyGkaoDQ+dgHm +qku12wKBgQDem5FvNp5iW7mufkPZMqf3sEGtu612QeqejIPFM1z7VkUgetsgPBXD +tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v +F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== +-----END RSA PRIVATE KEY-----"; + + let cert = "-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw +MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCx+oC6ESU3gefJ6oui9J3hB76c2/kDAKNI74cWIXfT +He9DUeKpEDRSbIWVKoGcUfdNQebglxp3jRB+tfx/XU0oZl2m8oewxipiNmdiREUZ +Lazh9DJoNtXkzTqzdQNfwRM+BjjVjx8IpNJV2L2IeTBxWtczFS7ggEHHQLWvYZKj +eCQgGdRwQt0V1pQ5Jt0KKkmFueTCLESvaHs9fHBtrtIhmBm1FpBZqTVUT1vvXqp7 +eIy4yFoR+j9SgWZ5kI+7myl/Bo5mycKzFE+TYiNvOWwdMnT2Uz3CZsQUcExUBd6M +tOT75Kte3yMBJmE16f/YbPItA0Cq4af3yUIxDpKwT28tAgMBAAGjdjB0MA4GA1Ud +DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzAUBgNVHREE +DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAG/POnBob0S7iYwsbtI2 +3LTTbRnmseIErtJuJmI9yYzgVIm6sUSKhlIUfAIm4rfRuzE94KFeWR2w9RabxOJD +wjYLLKvQ6rFY5g2AV/J0TwDjYuq0absdaDPZ8MKJ+/lpGYK3Te+CTOfq5FJRFt1q +GOkXAxnNpGg0obeRWRKFiAMHbcw6a8LIMfRjCooo3+uSQGsbVzGxSB4CYo720KcC +9vB1K9XALwzoqCewP4aiQsMY1GWpAmzXJftY3w+lka0e9dBYcdEdOqxSoZb5OBBZ +p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ +abY= +-----END CERTIFICATE-----"; + + // Configure the client + let ca = "-----BEGIN CERTIFICATE----- +MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw +MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuCq24O4P4Aep5vAVlrIQ7P8+ +uWWgcHIFYa02TmhBUB/hjo0JANCQvAtpVNuQ8NyKPlqnnq1cttePbSYVeA0rrnOs +DcfySAiyGBEY9zMjFfHJtH1wtrPcJEU8XIEY3xUlrAJE2CEuV9dVYgfEEydnvgLc +8Ug0WXSiARjqbnMW3l8jh6bYCp/UpL/gSM4mxdKrgpfyPoweGhlOWXc3RTS7cqM9 +T25acURGOSI6/g8GF0sNE4VZmUvHggSTmsbLeXMJzxDWO+xVehRmbQx3IkG7u++b +QdRwGIJcDNn7zHlDMHtQ0Z1DBV94fZNBwCULhCBB5g20XTGw//S7Fj2FPwyhAgMB +AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr +BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTWfAmQ/BUIQm/9 +/llJJs2jUMWzGzAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzANBgkq +hkiG9w0BAQsFAAOCAQEAvtcZFAELKiTuOiAeYts6zeKxc+nnHCzayDeD/BDCbxGJ +e1n+xdHjLtWGd+/Anc+fvftSYBPTFQqCi84lPiUIln5z/rUxE+ke81hNPIfw2obc +yIg87xCabQpVyEh8s+MV+7YPQ1+fH4FuSi2Fck1FejxkVqN2uOZPvOYUmSTsaVr1 +8SfRnwJNZ9UMRPM2bD4Jkvj0VcL42JM3QkOClOzYW4j/vll2cSs4kx7er27cIoo1 +Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN +R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== +-----END CERTIFICATE-----"; + + // Define the locator + let mut endpoint: EndPoint = format!("quic/localhost:{}", 13040).parse().unwrap(); + endpoint + .config_mut() + .extend( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, ca), + (TLS_SERVER_PRIVATE_KEY_RAW, key), + (TLS_SERVER_CERTIFICATE_RAW, cert), + ] + .iter() + .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + ) + .unwrap(); + + time_universal_transport(&endpoint).await; +} + +#[cfg(all(feature = "transport_vsock", target_os = "linux"))] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_vsock_only() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); + time_lowlatency_transport(&endpoint).await; +} From 2da0aeb0c59a5634b1975fad1200fb92256ec733 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 5 Apr 2024 11:06:54 +0200 Subject: [PATCH 121/598] Declare message can be Push/Request/RequestContinuous/Response (#902) * Declare message can be Push/Request/RequestContinuous/Response * Address review comments * Remove F: Future flag from DeclareInterest * cargo fmt --all --- commons/zenoh-codec/src/network/declare.rs | 236 +++++++----------- commons/zenoh-codec/tests/codec.rs | 16 ++ commons/zenoh-protocol/src/network/declare.rs | 215 ++++++++-------- commons/zenoh-protocol/src/network/mod.rs | 6 +- zenoh/src/key_expr.rs | 4 +- zenoh/src/net/routing/dispatcher/face.rs | 3 +- zenoh/src/net/routing/dispatcher/resource.rs | 4 +- zenoh/src/net/routing/hat/client/pubsub.rs | 10 +- zenoh/src/net/routing/hat/client/queries.rs | 8 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 14 +- .../net/routing/hat/linkstate_peer/queries.rs | 14 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 10 +- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 8 +- zenoh/src/net/routing/hat/router/pubsub.rs | 22 +- zenoh/src/net/routing/hat/router/queries.rs | 22 +- zenoh/src/net/routing/mod.rs | 3 +- zenoh/src/net/runtime/adminspace.rs | 8 +- zenoh/src/net/tests/tables.rs | 12 +- zenoh/src/session.rs | 19 +- 19 files changed, 283 insertions(+), 351 deletions(-) diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index d7a25ea0a9..173fbe5e4a 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -19,12 +19,16 @@ use zenoh_buffers::{ ZBuf, }; use zenoh_protocol::{ - common::{iext, imsg, ZExtZ64}, + common::{ + iext, + imsg::{self, HEADER_BITS}, + ZExtZ64, + }, core::{ExprId, ExprLen, WireExpr}, network::{ declare::{ self, common, interest, keyexpr, queryable, subscriber, token, Declare, DeclareBody, - Interest, + DeclareMode, Interest, }, id, Mapping, }, @@ -48,8 +52,7 @@ where DeclareBody::DeclareToken(r) => self.write(&mut *writer, r)?, DeclareBody::UndeclareToken(r) => self.write(&mut *writer, r)?, DeclareBody::DeclareInterest(r) => self.write(&mut *writer, r)?, - DeclareBody::FinalInterest(r) => self.write(&mut *writer, r)?, - DeclareBody::UndeclareInterest(r) => self.write(&mut *writer, r)?, + DeclareBody::DeclareFinal(r) => self.write(&mut *writer, r)?, } Ok(()) @@ -77,8 +80,7 @@ where D_TOKEN => DeclareBody::DeclareToken(codec.read(&mut *reader)?), U_TOKEN => DeclareBody::UndeclareToken(codec.read(&mut *reader)?), D_INTEREST => DeclareBody::DeclareInterest(codec.read(&mut *reader)?), - F_INTEREST => DeclareBody::FinalInterest(codec.read(&mut *reader)?), - U_INTEREST => DeclareBody::UndeclareInterest(codec.read(&mut *reader)?), + D_FINAL => DeclareBody::DeclareFinal(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; @@ -95,7 +97,7 @@ where fn write(self, writer: &mut W, x: &Declare) -> Self::Output { let Declare { - interest_id, + mode, ext_qos, ext_tstamp, ext_nodeid, @@ -104,9 +106,13 @@ where // Header let mut header = id::DECLARE; - if x.interest_id.is_some() { - header |= declare::flag::I; - } + header |= match mode { + DeclareMode::Push => 0b00, + DeclareMode::Response(_) => 0b01, + DeclareMode::Request(_) => 0b10, + DeclareMode::RequestContinuous(_) => 0b11, + } << HEADER_BITS; + let mut n_exts = ((ext_qos != &declare::ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) + ((ext_nodeid != &declare::ext::NodeIdType::DEFAULT) as u8); @@ -116,8 +122,11 @@ where self.write(&mut *writer, header)?; // Body - if let Some(interest_id) = interest_id { - self.write(&mut *writer, interest_id)?; + if let DeclareMode::Request(rid) + | DeclareMode::RequestContinuous(rid) + | DeclareMode::Response(rid) = mode + { + self.write(&mut *writer, rid)?; } // Extensions @@ -166,10 +175,14 @@ where return Err(DidntRead); } - let mut interest_id = None; - if imsg::has_flag(self.header, declare::flag::I) { - interest_id = Some(self.codec.read(&mut *reader)?); - } + // Body + let mode = match (self.header >> HEADER_BITS) & 0b11 { + 0b00 => DeclareMode::Push, + 0b01 => DeclareMode::Response(self.codec.read(&mut *reader)?), + 0b10 => DeclareMode::Request(self.codec.read(&mut *reader)?), + 0b11 => DeclareMode::RequestContinuous(self.codec.read(&mut *reader)?), + _ => return Err(DidntRead), + }; // Extensions let mut ext_qos = declare::ext::QoSType::DEFAULT; @@ -206,7 +219,7 @@ where let body: DeclareBody = self.codec.read(&mut *reader)?; Ok(Declare { - interest_id, + mode, ext_qos, ext_tstamp, ext_nodeid, @@ -215,6 +228,59 @@ where } } +// Final +impl WCodec<&common::DeclareFinal, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &common::DeclareFinal) -> Self::Output { + let common::DeclareFinal = x; + + // Header + let header = declare::id::D_FINAL; + self.write(&mut *writer, header)?; + + Ok(()) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let header: u8 = self.read(&mut *reader)?; + let codec = Zenoh080Header::new(header); + + codec.read(reader) + } +} + +impl RCodec for Zenoh080Header +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + if imsg::mid(self.header) != declare::id::D_FINAL { + return Err(DidntRead); + } + + // Extensions + let has_ext = imsg::has_flag(self.header, token::flag::Z); + if has_ext { + extension::skip_all(reader, "Final")?; + } + + Ok(common::DeclareFinal) + } +} + // DeclareKeyExpr impl WCodec<&keyexpr::DeclareKeyExpr, &mut W> for Zenoh080 where @@ -907,7 +973,7 @@ where } = x; // Header - let header = declare::id::D_INTEREST | x.flags(); + let header = declare::id::D_INTEREST; self.write(&mut *writer, header)?; // Body @@ -976,140 +1042,6 @@ where } } -// FinalInterest -impl WCodec<&interest::FinalInterest, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &interest::FinalInterest) -> Self::Output { - let interest::FinalInterest { id } = x; - - // Header - let header = declare::id::F_INTEREST; - self.write(&mut *writer, header)?; - - // Body - self.write(&mut *writer, id)?; - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != declare::id::F_INTEREST { - return Err(DidntRead); - } - - // Body - let id: interest::InterestId = self.codec.read(&mut *reader)?; - - // Extensions - let has_ext = imsg::has_flag(self.header, token::flag::Z); - if has_ext { - extension::skip_all(reader, "FinalInterest")?; - } - - Ok(interest::FinalInterest { id }) - } -} - -// UndeclareInterest -impl WCodec<&interest::UndeclareInterest, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &interest::UndeclareInterest) -> Self::Output { - let interest::UndeclareInterest { id, ext_wire_expr } = x; - - // Header - let header = declare::id::U_INTEREST | interest::flag::Z; - self.write(&mut *writer, header)?; - - // Body - self.write(&mut *writer, id)?; - - // Extension - self.write(&mut *writer, (ext_wire_expr, false))?; - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != declare::id::U_INTEREST { - return Err(DidntRead); - } - - // Body - let id: interest::InterestId = self.codec.read(&mut *reader)?; - - // Extensions - let mut ext_wire_expr = common::ext::WireExprType::null(); - - let mut has_ext = imsg::has_flag(self.header, interest::flag::Z); - while has_ext { - let ext: u8 = self.codec.read(&mut *reader)?; - let eodec = Zenoh080Header::new(ext); - match iext::eid(ext) { - common::ext::WireExprExt::ID => { - let (we, ext): (common::ext::WireExprType, bool) = eodec.read(&mut *reader)?; - ext_wire_expr = we; - has_ext = ext; - } - _ => { - has_ext = extension::skip(reader, "UndeclareInterest", ext)?; - } - } - } - - Ok(interest::UndeclareInterest { id, ext_wire_expr }) - } -} - // WARNING: this is a temporary extension used for undeclarations impl WCodec<(&common::ext::WireExprType, bool), &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index 2f0e870c4f..d28ba9a4d3 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -31,6 +31,22 @@ use zenoh_protocol::{ zenoh, zextunit, zextz64, zextzbuf, }; +#[test] +fn zbuf_test() { + let mut buffer = vec![0u8; 64]; + + let zbuf = ZBuf::empty(); + let mut writer = buffer.writer(); + + let codec = Zenoh080::new(); + codec.write(&mut writer, &zbuf).unwrap(); + println!("Buffer: {:?}", buffer); + + let mut reader = buffer.reader(); + let ret: ZBuf = codec.read(&mut reader).unwrap(); + assert_eq!(ret, zbuf); +} + const NUM_ITER: usize = 100; const MAX_PAYLOAD_SIZE: usize = 256; diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 10027259c2..996e7768ee 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -18,6 +18,8 @@ use crate::{ zextz64, zextzbuf, }; use alloc::borrow::Cow; +pub use common::*; +use core::sync::atomic::AtomicU32; pub use interest::*; pub use keyexpr::*; pub use queryable::*; @@ -31,24 +33,59 @@ pub mod flag { } /// Flags: -/// - I: Interest If I==1 then the declare is in a response to an Interest with future==false -/// - X: Reserved +/// - |: Mode The mode of the the declaration* +/// -/ /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|X|I| DECLARE | +/// |Z|Mod| DECLARE | /// +-+-+-+---------+ -/// ~interest_id:z32~ if I==1 +/// ~ rid:z32 ~ if Mode != Push /// +---------------+ /// ~ [decl_exts] ~ if Z==1 /// +---------------+ /// ~ declaration ~ /// +---------------+ /// +/// *Mode of declaration: +/// - Mode 0b00: Push +/// - Mode 0b01: Response +/// - Mode 0b10: Request +/// - Mode 0b11: RequestContinuous + +/// The resolution of a RequestId +pub type DeclareRequestId = u32; +pub type AtomicDeclareRequestId = AtomicU32; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum DeclareMode { + Push, + Request(DeclareRequestId), + RequestContinuous(DeclareRequestId), + Response(DeclareRequestId), +} + +impl DeclareMode { + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + + let mut rng = rand::thread_rng(); + + match rng.gen_range(0..4) { + 0 => DeclareMode::Push, + 1 => DeclareMode::Request(rng.gen()), + 2 => DeclareMode::RequestContinuous(rng.gen()), + 3 => DeclareMode::Response(rng.gen()), + _ => unreachable!(), + } + } +} + #[derive(Debug, Clone, PartialEq, Eq)] pub struct Declare { - pub interest_id: Option, + pub mode: DeclareMode, pub ext_qos: ext::QoSType, pub ext_tstamp: Option, pub ext_nodeid: ext::NodeIdType, @@ -85,8 +122,8 @@ pub mod id { pub const U_TOKEN: u8 = 0x07; pub const D_INTEREST: u8 = 0x08; - pub const F_INTEREST: u8 = 0x09; - pub const U_INTEREST: u8 = 0x0A; + + pub const D_FINAL: u8 = 0x1A; } #[derive(Debug, Clone, PartialEq, Eq)] @@ -100,8 +137,7 @@ pub enum DeclareBody { DeclareToken(DeclareToken), UndeclareToken(UndeclareToken), DeclareInterest(DeclareInterest), - FinalInterest(FinalInterest), - UndeclareInterest(UndeclareInterest), + DeclareFinal(DeclareFinal), } impl DeclareBody { @@ -111,7 +147,7 @@ impl DeclareBody { let mut rng = rand::thread_rng(); - match rng.gen_range(0..11) { + match rng.gen_range(0..10) { 0 => DeclareBody::DeclareKeyExpr(DeclareKeyExpr::rand()), 1 => DeclareBody::UndeclareKeyExpr(UndeclareKeyExpr::rand()), 2 => DeclareBody::DeclareSubscriber(DeclareSubscriber::rand()), @@ -121,8 +157,7 @@ impl DeclareBody { 6 => DeclareBody::DeclareToken(DeclareToken::rand()), 7 => DeclareBody::UndeclareToken(UndeclareToken::rand()), 8 => DeclareBody::DeclareInterest(DeclareInterest::rand()), - 9 => DeclareBody::FinalInterest(FinalInterest::rand()), - 10 => DeclareBody::UndeclareInterest(UndeclareInterest::rand()), + 9 => DeclareBody::DeclareFinal(DeclareFinal::rand()), _ => unreachable!(), } } @@ -135,14 +170,14 @@ impl Declare { let mut rng = rand::thread_rng(); - let interest_id = rng.gen_bool(0.5).then_some(rng.gen::()); + let mode = DeclareMode::rand(); let ext_qos = ext::QoSType::rand(); let ext_tstamp = rng.gen_bool(0.5).then(ext::TimestampType::rand); let ext_nodeid = ext::NodeIdType::rand(); let body = DeclareBody::rand(); Self { - interest_id, + mode, ext_qos, ext_tstamp, ext_nodeid, @@ -154,6 +189,29 @@ impl Declare { pub mod common { use super::*; + /// ```text + /// Flags: + /// - X: Reserved + /// - X: Reserved + /// - Z: Extension If Z==1 then at least one extension is present + /// + /// 7 6 5 4 3 2 1 0 + /// +-+-+-+-+-+-+-+-+ + /// |Z|x|x| D_FINAL | + /// +---------------+ + /// ~ [final_exts] ~ if Z==1 + /// +---------------+ + /// ``` + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct DeclareFinal; + + impl DeclareFinal { + #[cfg(feature = "test")] + pub fn rand() -> Self { + Self + } + } + pub mod ext { use super::*; @@ -545,7 +603,7 @@ pub mod queryable { /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ - /// |Z|0_2| U_QBL | + /// |Z|X|X| U_QBL | /// +---------------+ /// ~ qbls_id:z32 ~ /// +---------------+ @@ -668,44 +726,51 @@ pub mod interest { pub type InterestId = u32; pub mod flag { - pub const C: u8 = 1 << 5; // 0x20 Current if C==1 then the interest refers to the current declarations. - pub const F: u8 = 1 << 6; // 0x40 Future if F==1 then the interest refers to the future declarations. + // pub const X: u8 = 1 << 5; // 0x20 Reserved + // pub const X: u8 = 1 << 6; // 0x40 Reserved pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } /// # DeclareInterest message /// - /// The DECLARE INTEREST message is sent to request the transmission of existing and future - /// declarations of a given kind matching a target keyexpr. E.g., a declare interest could be sent to - /// request the transmisison of all existing subscriptions matching `a/*`. A FINAL INTEREST is used to - /// mark the end of the transmission of exisiting matching declarations. + /// The DECLARE INTEREST message is sent to request the transmission of current and/or future + /// declarations of a given kind matching a target keyexpr. E.g., a declare interest could be + /// sent to request the transmisison of all current subscriptions matching `a/*`. + /// + /// The behaviour of a DECLARE INTEREST depends on the DECLARE MODE in the DECLARE MESSAGE: + /// - Push: only future declarations + /// - Request: only current declarations + /// - RequestContinous: current and future declarations + /// - Response: invalid /// - /// E.g., the [`DeclareInterest`]/[`FinalInterest`]/[`UndeclareInterest`] message flow is the following: + /// E.g., the [`DeclareInterest`] message flow is the following: /// /// ```text /// A B /// | DECL INTEREST | - /// |------------------>| -- This is a DeclareInterest e.g. for subscriber declarations/undeclarations. + /// |------------------>| -- Sent in Declare::RequestContinuous. + /// | | This is a DeclareInterest e.g. for subscriber declarations/undeclarations. /// | | /// | DECL SUBSCRIBER | - /// |<------------------| + /// |<------------------| -- Sent in Declare::Response /// | DECL SUBSCRIBER | - /// |<------------------| + /// |<------------------| -- Sent in Declare::Response /// | DECL SUBSCRIBER | - /// |<------------------| + /// |<------------------| -- Sent in Declare::Response /// | | - /// | FINAL INTEREST | - /// |<------------------| -- The FinalInterest signals that all known subscribers have been transmitted. + /// | FINAL | + /// |<------------------| -- Sent in Declare::Response /// | | /// | DECL SUBSCRIBER | - /// |<------------------| -- This is a new subscriber declaration. + /// |<------------------| -- Sent in Declare::Push. This is a new subscriber declaration. /// | UNDECL SUBSCRIBER | - /// |<------------------| -- This is a new subscriber undeclaration. + /// |<------------------| -- Sent in Declare::Push. This is a new subscriber undeclaration. /// | | /// | ... | /// | | - /// | UNDECL INTEREST | - /// |------------------>| -- This is an UndeclareInterest to stop receiving subscriber declarations/undeclarations. + /// | FINAL | + /// |------------------>| -- Sent in Declare::RequestContinuous. + /// | | This stops the transmission of subscriber declarations/undeclarations. /// | | /// ``` /// @@ -713,15 +778,13 @@ pub mod interest { /// /// ```text /// Flags: - /// - C: Current if C==1 then the interest refers to the current declarations. - /// - F: Future if F==1 then the interest refers to the future declarations. Note that if F==0 then: - /// - Declarations SHOULD NOT be sent after the FinalInterest; - /// - UndeclareInterest SHOULD NOT be sent after the FinalInterest. + /// - X: Reserved + /// - X: Reserved /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ - /// |Z|F|C| D_INT | + /// |Z|F|X| D_INT | /// +---------------+ /// ~ intst_id:z32 ~ /// +---------------+ @@ -752,17 +815,6 @@ pub mod interest { } impl DeclareInterest { - pub fn flags(&self) -> u8 { - let mut interest = self.interest; - if self.interest.current() { - interest += Interest::CURRENT; - } - if self.interest.future() { - interest += Interest::FUTURE; - } - interest.flags - } - pub fn options(&self) -> u8 { let mut interest = self.interest; if let Some(we) = self.wire_expr.as_ref() { @@ -801,9 +853,6 @@ pub mod interest { } impl Interest { - // Header - pub const CURRENT: Interest = Interest::flags(interest::flag::C); - pub const FUTURE: Interest = Interest::flags(interest::flag::F); // Flags pub const KEYEXPRS: Interest = Interest::options(1); pub const SUBSCRIBERS: Interest = Interest::options(1 << 1); @@ -820,10 +869,6 @@ pub mod interest { | Interest::TOKENS.options, ); - const fn flags(flags: u8) -> Self { - Self { flags, options: 0 } - } - const fn options(options: u8) -> Self { Self { flags: 0, options } } @@ -835,14 +880,6 @@ pub mod interest { } } - pub const fn current(&self) -> bool { - imsg::has_flag(self.flags, Self::CURRENT.flags) - } - - pub const fn future(&self) -> bool { - imsg::has_flag(self.flags, Self::FUTURE.flags) - } - pub const fn keyexprs(&self) -> bool { imsg::has_flag(self.options, Self::KEYEXPRS.options) } @@ -881,12 +918,6 @@ pub mod interest { let mut rng = rand::thread_rng(); let mut s = Self::empty(); - if rng.gen_bool(0.5) { - s += Interest::CURRENT; - } - if rng.gen_bool(0.5) { - s += Interest::FUTURE; - } if rng.gen_bool(0.5) { s += Interest::KEYEXPRS; } @@ -905,9 +936,7 @@ pub mod interest { impl PartialEq for Interest { fn eq(&self, other: &Self) -> bool { - self.current() == other.current() - && self.future() == other.future() - && self.keyexprs() == other.keyexprs() + self.keyexprs() == other.keyexprs() && self.subscribers() == other.subscribers() && self.queryables() == other.queryables() && self.tokens() == other.tokens() @@ -918,16 +947,6 @@ pub mod interest { impl Debug for Interest { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Interest {{ ")?; - if self.current() { - write!(f, "C:Y, ")?; - } else { - write!(f, "C:N, ")?; - } - if self.future() { - write!(f, "F:Y, ")?; - } else { - write!(f, "F:N, ")?; - } if self.keyexprs() { write!(f, "K:Y, ")?; } else { @@ -1003,38 +1022,6 @@ pub mod interest { } } - /// ```text - /// Flags: - /// - X: Reserved - /// - X: Reserved - /// - Z: Extension If Z==1 then at least one extension is present - /// - /// 7 6 5 4 3 2 1 0 - /// +-+-+-+-+-+-+-+-+ - /// |Z|X|X| F_INT | - /// +---------------+ - /// ~ intst_id:z32 ~ - /// +---------------+ - /// ~ [decl_exts] ~ if Z==1 - /// +---------------+ - /// ``` - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct FinalInterest { - pub id: InterestId, - } - - impl FinalInterest { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let id: InterestId = rng.gen(); - - Self { id } - } - } - /// ```text /// Flags: /// - X: Reserved diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index 0e198ddf0f..cbf9894aef 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -20,9 +20,9 @@ pub mod response; use core::fmt; pub use declare::{ - Declare, DeclareBody, DeclareInterest, DeclareKeyExpr, DeclareQueryable, DeclareSubscriber, - DeclareToken, UndeclareInterest, UndeclareKeyExpr, UndeclareQueryable, UndeclareSubscriber, - UndeclareToken, + Declare, DeclareBody, DeclareFinal, DeclareInterest, DeclareKeyExpr, DeclareMode, + DeclareQueryable, DeclareSubscriber, DeclareToken, UndeclareInterest, UndeclareKeyExpr, + UndeclareQueryable, UndeclareSubscriber, UndeclareToken, }; pub use oam::Oam; pub use push::Push; diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index aaa1d13724..17aa0425b6 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -53,7 +53,7 @@ pub use zenoh_keyexpr::*; pub use zenoh_macros::{kedefine, keformat, kewrite}; use zenoh_protocol::{ core::{key_expr::canon::Canonizable, ExprId, WireExpr}, - network::{declare, DeclareBody, Mapping, UndeclareKeyExpr}, + network::{declare, DeclareBody, DeclareMode, Mapping, UndeclareKeyExpr}, }; use zenoh_result::ZResult; @@ -664,7 +664,7 @@ impl SyncResolve for KeyExprUndeclaration<'_> { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(zenoh_protocol::network::Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index cb565053c9..3531dd2d88 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -211,8 +211,7 @@ impl Primitives for Face { zenoh_protocol::network::DeclareBody::DeclareToken(_m) => todo!(), zenoh_protocol::network::DeclareBody::UndeclareToken(_m) => todo!(), zenoh_protocol::network::DeclareBody::DeclareInterest(_m) => todo!(), - zenoh_protocol::network::DeclareBody::FinalInterest(_m) => todo!(), - zenoh_protocol::network::DeclareBody::UndeclareInterest(_m) => todo!(), + zenoh_protocol::network::DeclareBody::DeclareFinal(_m) => todo!(), } drop(ctrl_lock); } diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 194b97fca8..941b37f916 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -27,7 +27,7 @@ use zenoh_protocol::{ network::{ declare::{ ext, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, - DeclareBody, DeclareKeyExpr, + DeclareBody, DeclareKeyExpr, DeclareMode, }, Mapping, }, @@ -452,7 +452,7 @@ impl Resource { .insert(expr_id, nonwild_prefix.clone()); face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index e85bb77bf9..6c689d3336 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -30,7 +30,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, UndeclareSubscriber, + DeclareMode, DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -53,7 +53,7 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -137,7 +137,7 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -171,7 +171,7 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -206,7 +206,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 5c0bc5349b..28e1d75460 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -33,7 +33,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareMode, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -93,7 +93,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -165,7 +165,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -418,7 +418,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -460,7 +460,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 150c12a632..356793e3a3 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -36,7 +36,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareMode, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -126,7 +126,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -170,7 +170,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -339,7 +339,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -365,7 +365,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index b495248788..5ac0b22846 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -30,7 +30,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, UndeclareSubscriber, + DeclareMode, DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -53,7 +53,7 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -137,7 +137,7 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -171,7 +171,7 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -206,7 +206,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 72c32b9217..c2d62c7658 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -33,7 +33,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareMode, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -93,7 +93,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -165,7 +165,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -412,7 +412,7 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -564,7 +564,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -606,7 +606,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -635,7 +635,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -774,7 +774,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: if forget { dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -800,7 +800,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: }; dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 99e787beb5..e647cf2dc7 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -36,7 +36,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareMode, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -194,7 +194,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -248,7 +248,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -473,7 +473,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -499,7 +499,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -775,7 +775,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -874,7 +874,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links if forget { dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -900,7 +900,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index 0ddf12b82f..77f51c16b3 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -117,8 +117,7 @@ impl RoutingContext { DeclareBody::DeclareToken(m) => Some(&m.wire_expr), DeclareBody::UndeclareToken(m) => Some(&m.ext_wire_expr.wire_expr), DeclareBody::DeclareInterest(m) => m.wire_expr.as_ref(), - DeclareBody::FinalInterest(_) => None, - DeclareBody::UndeclareInterest(m) => Some(&m.ext_wire_expr.wire_expr), + DeclareBody::DeclareFinal(_) => None, }, NetworkBody::OAM(_) => None, } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index d460ee3f1c..a5739d830c 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -39,8 +39,8 @@ use zenoh_protocol::{ }, network::{ declare::{queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo}, - ext, Declare, DeclareBody, DeclareQueryable, DeclareSubscriber, Push, Request, Response, - ResponseFinal, + ext, Declare, DeclareBody, DeclareMode, DeclareQueryable, DeclareSubscriber, Push, Request, + Response, ResponseFinal, }, zenoh::{PushBody, RequestBody}, }; @@ -276,7 +276,7 @@ impl AdminSpace { zlock!(admin.primitives).replace(primitives.clone()); primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, @@ -289,7 +289,7 @@ impl AdminSpace { }); primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 4067f2ad8f..55ff9f0a4d 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -26,7 +26,7 @@ use zenoh_protocol::core::{ key_expr::keyexpr, ExprId, Reliability, WhatAmI, WireExpr, ZenohId, EMPTY_EXPR_ID, }; use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr}; +use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr, DeclareMode}; use zenoh_protocol::zenoh::{PushBody, Put}; #[test] @@ -579,7 +579,7 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -607,7 +607,7 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -629,7 +629,7 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -657,7 +657,7 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -679,7 +679,7 @@ fn client_test() { Primitives::send_declare( primitives2.as_ref(), Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index addb757807..9bc6c9c331 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -71,7 +71,7 @@ use zenoh_protocol::{ network::{ declare::{ self, common::ext::WireExprType, queryable::ext::QueryableInfoType, - subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, + subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareMode, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, ext, @@ -872,7 +872,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1085,7 +1085,7 @@ impl Session { // }; primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1142,7 +1142,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1194,7 +1194,7 @@ impl Session { distance: 0, }; primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1216,7 +1216,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1252,7 +1252,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1277,7 +1277,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -2047,8 +2047,7 @@ impl Primitives for Session { DeclareBody::DeclareToken(_) => todo!(), DeclareBody::UndeclareToken(_) => todo!(), DeclareBody::DeclareInterest(_) => todo!(), - DeclareBody::FinalInterest(_) => todo!(), - DeclareBody::UndeclareInterest(_) => todo!(), + DeclareBody::DeclareFinal(_) => todo!(), } } From 8f8eb2589a57c1074622c125f5111c4afde9a1e7 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 11:19:56 +0200 Subject: [PATCH 122/598] typedefs for complex builder types (#890) --- zenoh/src/publication.rs | 21 +++++++++++++++------ zenoh/src/queryable.rs | 8 ++++++-- zenoh/src/session.rs | 4 ++-- 3 files changed, 23 insertions(+), 10 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 64fa5b49c6..c176ad32e0 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -46,7 +46,8 @@ pub struct PublicationBuilderPut { #[derive(Debug, Clone)] pub struct PublicationBuilderDelete; -/// A builder for initializing a [`put`](crate::Session::put) and [`delete`](crate::Session::delete) operations +/// A builder for initializing [`Session::put`](crate::Session::put), [`Session::delete`](crate::Session::delete), +/// [`Publisher::put`](crate::Publisher::put), and [`Publisher::delete`](crate::Publisher::delete) operations. /// /// # Examples /// ``` @@ -78,6 +79,17 @@ pub struct PublicationBuilder { pub(crate) attachment: Option, } +pub type SessionPutBuilder<'a, 'b> = + PublicationBuilder, PublicationBuilderPut>; + +pub type SessionDeleteBuilder<'a, 'b> = + PublicationBuilder, PublicationBuilderDelete>; + +pub type PublisherPutBuilder<'a> = PublicationBuilder<&'a Publisher<'a>, PublicationBuilderPut>; + +pub type PublisherDeleteBuilder<'a> = + PublicationBuilder<&'a Publisher<'a>, PublicationBuilderDelete>; + impl QoSBuilderTrait for PublicationBuilder, T> { #[inline] fn congestion_control(self, congestion_control: CongestionControl) -> Self { @@ -405,10 +417,7 @@ impl<'a> Publisher<'a> { /// # } /// ``` #[inline] - pub fn put( - &self, - payload: IntoPayload, - ) -> PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> + pub fn put(&self, payload: IntoPayload) -> PublisherPutBuilder<'_> where IntoPayload: Into, { @@ -439,7 +448,7 @@ impl<'a> Publisher<'a> { /// publisher.delete().res().await.unwrap(); /// # } /// ``` - pub fn delete(&self) -> PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { + pub fn delete(&self) -> PublisherDeleteBuilder<'_> { PublicationBuilder { publisher: self, kind: PublicationBuilderDelete, diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 0696fcbe33..c2fd67fcf4 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -129,7 +129,7 @@ impl Query { &self, key_expr: TryIntoKeyExpr, payload: IntoPayload, - ) -> ReplyBuilder<'_, 'b, ReplyBuilderPut> + ) -> ReplyPutBuilder<'_, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -171,7 +171,7 @@ impl Query { pub fn reply_del<'b, TryIntoKeyExpr>( &self, key_expr: TryIntoKeyExpr, - ) -> ReplyBuilder<'_, 'b, ReplyBuilderDelete> + ) -> ReplyDeleteBuilder<'_, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -274,6 +274,10 @@ pub struct ReplyBuilder<'a, 'b, T> { attachment: Option, } +pub type ReplyPutBuilder<'a, 'b> = ReplyBuilder<'a, 'b, ReplyBuilderPut>; + +pub type ReplyDeleteBuilder<'a, 'b> = ReplyBuilder<'a, 'b, ReplyBuilderDelete>; + impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { fn timestamp>>(self, timestamp: U) -> Self { Self { diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index c44cb4f817..d9427f270c 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -705,7 +705,7 @@ impl Session { &'a self, key_expr: TryIntoKeyExpr, payload: IntoPayload, - ) -> PublicationBuilder, PublicationBuilderPut> + ) -> SessionPutBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -745,7 +745,7 @@ impl Session { pub fn delete<'a, 'b: 'a, TryIntoKeyExpr>( &'a self, key_expr: TryIntoKeyExpr, - ) -> PublicationBuilder, PublicationBuilderDelete> + ) -> SessionDeleteBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, From aeb15c311c421980f939de63b3f1ee1ffc67ecd7 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 12:16:31 +0200 Subject: [PATCH 123/598] runtime to api --- zenoh/src/lib.rs | 7 +++++-- zenoh/src/net/routing/hat/client/mod.rs | 2 +- zenoh/src/net/routing/hat/linkstate_peer/mod.rs | 2 +- zenoh/src/net/routing/hat/mod.rs | 2 +- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 2 +- zenoh/src/net/routing/hat/router/mod.rs | 2 +- zenoh/src/plugins/sealed.rs | 2 +- 7 files changed, 11 insertions(+), 8 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 3a5c358ae2..3e4a0ddda9 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -227,9 +227,12 @@ pub mod time { pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; } +pub mod runtime { + pub use crate::net::runtime::{AdminSpace, Runtime}; +} + mod api; -pub(crate) mod net; -pub use net::runtime; +mod net; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; #[cfg(feature = "unstable")] diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 8b7031152a..6ca0af1e17 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -22,7 +22,7 @@ use crate::{ dispatcher::face::Face, router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, - runtime::Runtime, + net::runtime::Runtime, }; use self::{ diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 1b8ea8f7d4..14f0e9f57e 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -30,6 +30,7 @@ use super::{ HatBaseTrait, HatTrait, }; use crate::{ + net::runtime::Runtime, net::{ codec::Zenoh080Routing, protocol::linkstate::LinkStateList, @@ -39,7 +40,6 @@ use crate::{ router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, }, - runtime::Runtime, }; use std::{ any::Any, diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 70e94ac176..3d1ae0f632 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -24,7 +24,7 @@ use super::{ }, router::RoutesIndexes, }; -use crate::runtime::Runtime; +use crate::net::runtime::Runtime; use std::{any::Any, sync::Arc}; use zenoh_buffers::ZBuf; use zenoh_config::{unwrap_or_default, Config, WhatAmI}; diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 1d87c2eb23..929247f3a9 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -18,6 +18,7 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use crate::{ + net::runtime::Runtime, net::{ codec::Zenoh080Routing, protocol::linkstate::LinkStateList, @@ -26,7 +27,6 @@ use crate::{ router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, }, - runtime::Runtime, }; use self::{ diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 52f067037e..cf59a65ea8 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -34,6 +34,7 @@ use super::{ HatBaseTrait, HatTrait, }; use crate::{ + net::runtime::Runtime, net::{ codec::Zenoh080Routing, protocol::linkstate::LinkStateList, @@ -43,7 +44,6 @@ use crate::{ router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, }, - runtime::Runtime, }; use std::{ any::Any, diff --git a/zenoh/src/plugins/sealed.rs b/zenoh/src/plugins/sealed.rs index cc11fc213d..3684324cf2 100644 --- a/zenoh/src/plugins/sealed.rs +++ b/zenoh/src/plugins/sealed.rs @@ -14,7 +14,7 @@ //! `zenohd`'s plugin system. For more details, consult the [detailed documentation](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Plugins/Zenoh%20Plugins.md). -use crate::{prelude::Selector, runtime::Runtime}; +use crate::{net::runtime::Runtime, prelude::Selector}; use zenoh_core::zconfigurable; use zenoh_plugin_trait::{ From 5e4b7d8ac5320ccea2fda66b3b282e42f204b370 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 12:35:03 +0200 Subject: [PATCH 124/598] config in api --- plugins/zenoh-plugin-storage-manager/tests/operations.rs | 1 + plugins/zenoh-plugin-storage-manager/tests/wildcard.rs | 1 + zenoh/src/api/session.rs | 4 ++-- zenoh/src/lib.rs | 9 +++++++-- zenoh/src/net/runtime/adminspace.rs | 4 ++-- zenoh/src/net/runtime/mod.rs | 2 +- zenoh/src/prelude.rs | 2 +- zenoh/tests/connection_retry.rs | 2 +- zenoh/tests/interceptors.rs | 1 + 9 files changed, 17 insertions(+), 9 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 6b64bbd742..d3c6207496 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -20,6 +20,7 @@ use std::str::FromStr; use std::thread::sleep; use async_std::task; +use zenoh::config::ValidatedMap; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 864ec5b79e..1b40a83cd5 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -21,6 +21,7 @@ use std::thread::sleep; // use std::collections::HashMap; use async_std::task; +use zenoh::config::ValidatedMap; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 83a57ce260..5793794815 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -41,8 +41,6 @@ use crate::api::selector::TIME_RANGE_KEY; use crate::api::subscriber::SubscriberBuilder; use crate::api::subscriber::SubscriberState; use crate::api::value::Value; -use crate::config::Config; -use crate::config::Notifier; use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; @@ -71,6 +69,8 @@ use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; +use zenoh_config::Config; +use zenoh_config::Notifier; use zenoh_core::Resolvable; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; use zenoh_protocol::core::Reliability; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 3e4a0ddda9..77cc120c5c 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -231,10 +231,15 @@ pub mod runtime { pub use crate::net::runtime::{AdminSpace, Runtime}; } +pub mod config { + pub use zenoh_config::{ + client, default, peer, Config, ModeDependentValue, PermissionsConf, PluginLoad, + ValidatedMap, + }; +} + mod api; mod net; -#[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] -pub use zenoh_config as config; #[cfg(feature = "unstable")] pub mod plugins; pub mod prelude; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 708d2bb349..97e77246bb 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -70,7 +70,7 @@ pub struct AdminSpace { #[derive(Debug, Clone)] enum PluginDiff { Delete(String), - Start(crate::config::PluginLoad), + Start(zenoh_config::PluginLoad), } impl ConfigValidator for AdminSpace { @@ -93,7 +93,7 @@ impl ConfigValidator for AdminSpace { impl AdminSpace { fn start_plugin( plugin_mgr: &mut plugins::PluginsManager, - config: &crate::config::PluginLoad, + config: &zenoh_config::PluginLoad, start_args: &Runtime, ) -> ZResult<()> { let name = &config.name; diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 1d81811c76..98f781720b 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -23,7 +23,6 @@ pub mod orchestrator; use super::primitives::DeMux; use super::routing; use super::routing::router::Router; -use crate::config::{unwrap_or_default, Config, ModeDependent, Notifier}; use crate::GIT_VERSION; pub use adminspace::AdminSpace; use futures::stream::StreamExt; @@ -34,6 +33,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use uhlc::{HLCBuilder, HLC}; +use zenoh_config::{unwrap_or_default, Config, ModeDependent, Notifier}; use zenoh_link::{EndPoint, Link}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_protocol::core::{Locator, WhatAmI, ZenohId}; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 317cad3a68..2443102d14 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -36,7 +36,7 @@ pub(crate) mod common { #[zenoh_macros::unstable] pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; - pub use crate::config::{self, Config, ValidatedMap}; + pub use crate::config::{self, Config}; pub use crate::handlers::IntoHandler; pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::session::{Session, SessionDeclarations}; diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index fcb071b489..0a3ed01ce7 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -1,4 +1,4 @@ -use config::ConnectionRetryConf; +use zenoh_config::{ConnectionRetryConf, ValidatedMap}; use zenoh::prelude::sync::*; diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 1ee93e4949..a6eff19ec9 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::sync::{Arc, Mutex}; +use zenoh_config::ValidatedMap; use zenoh_core::zlock; struct IntervalCounter { From bf7e0f10beb9d2664f69c147743c3507fa9e2845 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 12:51:03 +0200 Subject: [PATCH 125/598] plugins into api --- zenoh/src/{plugins/sealed.rs => api/plugins.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{plugins/sealed.rs => api/plugins.rs} (100%) diff --git a/zenoh/src/plugins/sealed.rs b/zenoh/src/api/plugins.rs similarity index 100% rename from zenoh/src/plugins/sealed.rs rename to zenoh/src/api/plugins.rs From b4552b2c146d0584469ba99304d1dec77e4e235f Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 12:59:51 +0200 Subject: [PATCH 126/598] plugins, buffers to api --- .../src/replica/storage.rs | 2 +- zenoh/src/api.rs | 2 ++ zenoh/src/lib.rs | 25 +++++++++++++------ zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/plugins/mod.rs | 23 ----------------- 5 files changed, 21 insertions(+), 33 deletions(-) delete mode 100644 zenoh/src/plugins/mod.rs diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 1abe311b65..646aebc837 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -21,7 +21,7 @@ use futures::select; use std::collections::{HashMap, HashSet}; use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; -use zenoh::buffers::buffer::SplitBuffer; +use zenoh::buffers::SplitBuffer; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 1af7da37c5..44496822ea 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -20,6 +20,8 @@ pub(crate) mod info; pub(crate) mod key_expr; pub(crate) mod liveliness; pub(crate) mod payload; +#[cfg(feature = "unstable")] +pub(crate) mod plugins; pub(crate) mod publication; pub(crate) mod query; pub(crate) mod queryable; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 77cc120c5c..60f5e61965 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -79,6 +79,9 @@ extern crate zenoh_core; #[macro_use] extern crate zenoh_result; +mod api; +mod net; + pub(crate) type Id = u32; use git_version::git_version; @@ -117,6 +120,13 @@ pub const FEATURES: &str = concat_enabled_features!( pub use crate::api::session::open; +/// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate +/// reading and writing data. +pub mod buffers { + pub use zenoh_buffers::buffer::SplitBuffer; + pub use zenoh_buffers::{ZBuf, ZSlice}; +} + pub mod key_expr { pub use crate::api::key_expr::kedefine; pub use crate::api::key_expr::keformat; @@ -238,14 +248,13 @@ pub mod config { }; } -mod api; -mod net; -#[cfg(feature = "unstable")] -pub mod plugins; +pub mod plugins { + pub use crate::api::plugins::PluginsManager; + pub use crate::api::plugins::Response; + pub use crate::api::plugins::RunningPlugin; + pub use crate::api::plugins::{RunningPluginTrait, ZenohPlugin}; +} + pub mod prelude; #[cfg(feature = "shared-memory")] pub use zenoh_shm as shm; - -/// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate -/// reading and writing data. -pub use zenoh_buffers as buffers; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 97e77246bb..fa5c5ef1bf 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -14,13 +14,13 @@ use super::routing::dispatcher::face::Face; use super::Runtime; use crate::api::builders::sample::ValueBuilderTrait; use crate::api::key_expr::KeyExpr; +use crate::api::plugins; use crate::api::queryable::Query; use crate::api::queryable::QueryInner; use crate::api::value::Value; use crate::encoding::Encoding; use crate::net::primitives::Primitives; use crate::payload::Payload; -use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::SyncResolve; use log::{error, trace}; use serde_json::json; diff --git a/zenoh/src/plugins/mod.rs b/zenoh/src/plugins/mod.rs deleted file mode 100644 index d72139cc29..0000000000 --- a/zenoh/src/plugins/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! ⚠️ WARNING ⚠️ -//! -//! This module is intended for Zenoh's internal use. -//! -//! [Click here for Zenoh's documentation](../../zenoh/index.html) -pub(crate) mod sealed; - -#[zenoh_macros::unstable] -pub use sealed::*; From be9c672c404602a506ed0557cfdd4bfb875b7a65 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 13:14:03 +0200 Subject: [PATCH 127/598] shm to api --- zenoh/src/lib.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 60f5e61965..3bb919c933 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -255,6 +255,9 @@ pub mod plugins { pub use crate::api::plugins::{RunningPluginTrait, ZenohPlugin}; } -pub mod prelude; #[cfg(feature = "shared-memory")] -pub use zenoh_shm as shm; +pub mod shm { + pub use zenoh_shm::SharedMemoryManager; +} + +pub mod prelude; From 71a9423738076d07ff6a83a043cee250cb06350d Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 5 Apr 2024 14:19:38 +0200 Subject: [PATCH 128/598] Declare message can be Push/Request/RequestContinuous/Response 2 (#906) * Declare message can be Push/Request/RequestContinuous/Response * Address review comments * Remove F: Future flag from DeclareInterest * cargo fmt --all * Remove unused Interest flags field * Update doc * Remove unneeded interest_id field * Update commons/zenoh-protocol/src/network/declare.rs --------- Co-authored-by: Luca Cominardi --- commons/zenoh-codec/src/network/declare.rs | 6 +- commons/zenoh-protocol/src/network/declare.rs | 58 +++++++++++-------- 2 files changed, 34 insertions(+), 30 deletions(-) diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index 173fbe5e4a..6e9dad12ce 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -967,7 +967,6 @@ where fn write(self, writer: &mut W, x: &interest::DeclareInterest) -> Self::Output { let interest::DeclareInterest { - id, interest: _, wire_expr, } = x; @@ -977,7 +976,6 @@ where self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, id)?; self.write(&mut *writer, x.options())?; if let Some(we) = wire_expr.as_ref() { self.write(&mut *writer, we)?; @@ -1012,9 +1010,8 @@ where } // Body - let id: interest::InterestId = self.codec.read(&mut *reader)?; let options: u8 = self.codec.read(&mut *reader)?; - let interest = Interest::from((imsg::flags(self.header), options)); + let interest = Interest::from(options); let mut wire_expr = None; if interest.restricted() { @@ -1035,7 +1032,6 @@ where } Ok(interest::DeclareInterest { - id, interest, wire_expr, }) diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 996e7768ee..6cd2b2200f 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -733,23 +733,23 @@ pub mod interest { /// # DeclareInterest message /// - /// The DECLARE INTEREST message is sent to request the transmission of current and/or future + /// The DECLARE INTEREST message is sent to request the transmission of current and optionally future /// declarations of a given kind matching a target keyexpr. E.g., a declare interest could be /// sent to request the transmisison of all current subscriptions matching `a/*`. /// /// The behaviour of a DECLARE INTEREST depends on the DECLARE MODE in the DECLARE MESSAGE: - /// - Push: only future declarations + /// - Push: invalid /// - Request: only current declarations /// - RequestContinous: current and future declarations /// - Response: invalid /// - /// E.g., the [`DeclareInterest`] message flow is the following: + /// E.g., the [`DeclareInterest`] message flow is the following for a Request: /// /// ```text /// A B /// | DECL INTEREST | - /// |------------------>| -- Sent in Declare::RequestContinuous. - /// | | This is a DeclareInterest e.g. for subscriber declarations/undeclarations. + /// |------------------>| -- Sent in Declare::Request. + /// | | This is a DeclareInterest e.g. for subscriber declarations. /// | | /// | DECL SUBSCRIBER | /// |<------------------| -- Sent in Declare::Response @@ -760,6 +760,26 @@ pub mod interest { /// | | /// | FINAL | /// |<------------------| -- Sent in Declare::Response + /// ``` + /// + /// + /// And the [`DeclareInterest`] message flow is the following for a RequestContinuous: + /// + /// ```text + /// A B + /// | DECL INTEREST | + /// |------------------>| -- Sent in Declare::RequestContinuous. + /// | | This is a DeclareInterest e.g. for subscriber declarations/undeclarations. + /// | | + /// | DECL SUBSCRIBER | + /// |<------------------| -- Sent in Declare::Push + /// | DECL SUBSCRIBER | + /// |<------------------| -- Sent in Declare::Push + /// | DECL SUBSCRIBER | + /// |<------------------| -- Sent in Declare::Push + /// | | + /// | FINAL | + /// |<------------------| -- Sent in Declare::Response /// | | /// | DECL SUBSCRIBER | /// |<------------------| -- Sent in Declare::Push. This is a new subscriber declaration. @@ -784,9 +804,7 @@ pub mod interest { /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ - /// |Z|F|X| D_INT | - /// +---------------+ - /// ~ intst_id:z32 ~ + /// |Z|X|X| D_INT | /// +---------------+ /// |A|M|N|R|T|Q|S|K| (*) /// +---------------+ @@ -809,7 +827,6 @@ pub mod interest { /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct DeclareInterest { - pub id: InterestId, pub interest: Interest, pub wire_expr: Option>, } @@ -834,12 +851,10 @@ pub mod interest { use rand::Rng; let mut rng = rand::thread_rng(); - let id: InterestId = rng.gen(); let wire_expr = rng.gen_bool(0.5).then_some(WireExpr::rand()); let interest = Interest::rand(); Self { - id, wire_expr, interest, } @@ -848,7 +863,6 @@ pub mod interest { #[derive(Clone, Copy)] pub struct Interest { - flags: u8, options: u8, } @@ -870,14 +884,11 @@ pub mod interest { ); const fn options(options: u8) -> Self { - Self { flags: 0, options } + Self { options } } pub const fn empty() -> Self { - Self { - flags: 0, - options: 0, - } + Self { options: 0 } } pub const fn keyexprs(&self) -> bool { @@ -982,17 +993,17 @@ pub mod interest { impl Add for Interest { type Output = Self; + #[allow(clippy::suspicious_arithmetic_impl)] // Allows to implement Add & Sub for Interest fn add(self, rhs: Self) -> Self::Output { Self { - flags: self.flags | rhs.flags, options: self.options | rhs.options, } } } impl AddAssign for Interest { + #[allow(clippy::suspicious_op_assign_impl)] // Allows to implement Add & Sub for Interest fn add_assign(&mut self, rhs: Self) { - self.flags |= rhs.flags; self.options |= rhs.options; } } @@ -1002,7 +1013,6 @@ pub mod interest { fn sub(self, rhs: Self) -> Self::Output { Self { - flags: self.flags & !rhs.flags, options: self.options & !rhs.options, } } @@ -1010,15 +1020,13 @@ pub mod interest { impl SubAssign for Interest { fn sub_assign(&mut self, rhs: Self) { - self.flags &= !rhs.flags; self.options &= !rhs.options; } } - impl From<(u8, u8)> for Interest { - fn from(value: (u8, u8)) -> Self { - let (flags, options) = value; - Self { flags, options } + impl From for Interest { + fn from(options: u8) -> Self { + Self { options } } } From eb1a80ac9ddc7c15942238e477993825f559cd17 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 5 Apr 2024 14:47:11 +0200 Subject: [PATCH 129/598] Fix use and unstable visibility --- zenoh/src/prelude.rs | 5 ++++- zenoh/src/queryable.rs | 3 +-- zenoh/src/sample/builder.rs | 8 +++----- zenoh/src/session.rs | 1 + zenoh/tests/qos.rs | 1 - zenoh/tests/routing.rs | 1 - 6 files changed, 9 insertions(+), 10 deletions(-) diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 850148f506..2e95e8d908 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -62,8 +62,11 @@ pub(crate) mod common { pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; pub use crate::sample::builder::{ - QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, + QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; + + #[zenoh_macros::unstable] + pub use crate::sample::builder::SampleBuilderTrait; } /// Prelude to import when using Zenoh's sync API. diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index c2fd67fcf4..794ff3a504 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -18,13 +18,12 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::builder::SampleBuilder; use crate::sample::{QoSBuilder, SourceInfo}; use crate::Id; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] -use crate::{query::ReplyKeyExpr, sample::Attachment}; +use crate::{query::ReplyKeyExpr, sample::Attachment, sample::builder::SampleBuilder}; use std::fmt; use std::future::Ready; use std::ops::Deref; diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 5fab36617d..fca55edd09 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -15,11 +15,8 @@ use std::marker::PhantomData; #[cfg(feature = "unstable")] -use crate::sample::Attachment; -use crate::sample::QoS; -use crate::sample::QoSBuilder; -#[cfg(feature = "unstable")] -use crate::sample::SourceInfo; +use crate::sample::{Attachment, SourceInfo}; +use crate::sample::{QoS, QoSBuilder}; use crate::Encoding; use crate::KeyExpr; use crate::Payload; @@ -47,6 +44,7 @@ pub trait TimestampBuilderTrait { fn timestamp>>(self, timestamp: T) -> Self; } +#[zenoh_macros::unstable] pub trait SampleBuilderTrait { /// Attach source information #[zenoh_macros::unstable] diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index d9427f270c..67bec5f488 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -41,6 +41,7 @@ use crate::Priority; use crate::Sample; use crate::SampleKind; use crate::Selector; +#[cfg(feature = "unstable")] use crate::SourceInfo; use crate::Value; use log::{error, trace, warn}; diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 5fd3edd985..1885c316ea 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -13,7 +13,6 @@ // use std::time::Duration; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh::{publication::Priority, SessionDeclarations}; use zenoh_core::ztimeout; diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 1031630a68..56bacd7fdd 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -18,7 +18,6 @@ use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh::Result; use zenoh_core::ztimeout; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; From a43e4518875f1c4be314943e78fdc483ae6f9844 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 5 Apr 2024 14:50:16 +0200 Subject: [PATCH 130/598] Add payload and encoding accessors for Query --- zenoh/src/queryable.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 794ff3a504..6fbb4e9090 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -23,7 +23,7 @@ use crate::Id; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] -use crate::{query::ReplyKeyExpr, sample::Attachment, sample::builder::SampleBuilder}; +use crate::{query::ReplyKeyExpr, sample::builder::SampleBuilder, sample::Attachment}; use std::fmt; use std::future::Ready; use std::ops::Deref; @@ -97,6 +97,18 @@ impl Query { self.inner.value.as_ref() } + /// This Query's payload. + #[inline(always)] + pub fn payload(&self) -> Option<&Payload> { + self.inner.value.as_ref().map(|v| &v.payload) + } + + /// This Query's encoding. + #[inline(always)] + pub fn encoding(&self) -> Option<&Encoding> { + self.inner.value.as_ref().map(|v| &v.encoding) + } + #[zenoh_macros::unstable] pub fn attachment(&self) -> Option<&Attachment> { self.inner.attachment.as_ref() From 1ad8c84c8b3f2f0f93f5dadb3a190af198e4e289 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 5 Apr 2024 14:52:08 +0200 Subject: [PATCH 131/598] cargo fmt --all --- zenoh/src/prelude.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 2e95e8d908..e2327c0dcc 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -61,9 +61,7 @@ pub(crate) mod common { pub use crate::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - pub use crate::sample::builder::{ - QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, - }; + pub use crate::sample::builder::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; #[zenoh_macros::unstable] pub use crate::sample::builder::SampleBuilderTrait; From 5ee2bdb26780926b381d9bb93f0d52a262b06488 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 5 Apr 2024 15:16:12 +0200 Subject: [PATCH 132/598] Declare message can be Push/Request/RequestContinuous/Response 3 (#908) * Declare message can be Push/Request/RequestContinuous/Response * Address review comments * Remove F: Future flag from DeclareInterest * cargo fmt --all * Remove unused Interest flags field * Update doc * Remove unneeded interest_id field * Update commons/zenoh-protocol/src/network/declare.rs * Remove unused UndeclareInterest --------- Co-authored-by: Luca Cominardi --- commons/zenoh-protocol/src/network/declare.rs | 34 ------------------- commons/zenoh-protocol/src/network/mod.rs | 4 +-- 2 files changed, 2 insertions(+), 36 deletions(-) diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 6cd2b2200f..31e8adcc6e 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -1029,38 +1029,4 @@ pub mod interest { Self { options } } } - - /// ```text - /// Flags: - /// - X: Reserved - /// - X: Reserved - /// - Z: Extension If Z==1 then at least one extension is present - /// - /// 7 6 5 4 3 2 1 0 - /// +-+-+-+-+-+-+-+-+ - /// |Z|X|X| U_INT | - /// +---------------+ - /// ~ intst_id:z32 ~ - /// +---------------+ - /// ~ [decl_exts] ~ if Z==1 - /// +---------------+ - /// ``` - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct UndeclareInterest { - pub id: InterestId, - pub ext_wire_expr: common::ext::WireExprType, - } - - impl UndeclareInterest { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let id: InterestId = rng.gen(); - let ext_wire_expr = common::ext::WireExprType::rand(); - - Self { id, ext_wire_expr } - } - } } diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index cbf9894aef..e60388f425 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -21,8 +21,8 @@ use core::fmt; pub use declare::{ Declare, DeclareBody, DeclareFinal, DeclareInterest, DeclareKeyExpr, DeclareMode, - DeclareQueryable, DeclareSubscriber, DeclareToken, UndeclareInterest, UndeclareKeyExpr, - UndeclareQueryable, UndeclareSubscriber, UndeclareToken, + DeclareQueryable, DeclareSubscriber, DeclareToken, UndeclareKeyExpr, UndeclareQueryable, + UndeclareSubscriber, UndeclareToken, }; pub use oam::Oam; pub use push::Push; From 518816c47dbd3d885d60986b51fa185cfd9cd6a7 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 17:33:27 +0200 Subject: [PATCH 133/598] publication builders in builders api --- examples/examples/z_formats.rs | 2 +- examples/examples/z_scout.rs | 2 +- .../src/replica/align_queryable.rs | 2 +- .../src/replica/mod.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh/src/api/builders.rs | 1 + zenoh/src/api/builders/publication.rs | 437 ++++++++++++ zenoh/src/api/key_expr.rs | 2 +- zenoh/src/api/publication.rs | 622 +++--------------- zenoh/src/api/session.rs | 6 +- zenoh/src/lib.rs | 6 +- zenoh/src/prelude.rs | 4 +- 12 files changed, 560 insertions(+), 528 deletions(-) create mode 100644 zenoh/src/api/builders/publication.rs diff --git a/examples/examples/z_formats.rs b/examples/examples/z_formats.rs index 69313f0e56..eab5aa035a 100644 --- a/examples/examples/z_formats.rs +++ b/examples/examples/z_formats.rs @@ -12,9 +12,9 @@ // ZettaScale Zenoh Team, // -use zenoh::key_expr::keyexpr; use zenoh::key_expr::kedefine; use zenoh::key_expr::keformat; +use zenoh::key_expr::keyexpr; kedefine!( pub file_format: "user_id/${user_id:*}/file/${file:*/**}", diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index 11ed3a6fd8..a46b7c49fe 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -13,8 +13,8 @@ // use zenoh::config::Config; use zenoh::prelude::r#async::*; -use zenoh::scouting::WhatAmI; use zenoh::scouting::scout; +use zenoh::scouting::WhatAmI; #[tokio::main] async fn main() { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 8654927f9f..5908778867 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -20,8 +20,8 @@ use std::str; use std::str::FromStr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::time::Timestamp; use zenoh::session::Session; +use zenoh::time::Timestamp; pub struct AlignQueryable { session: Arc, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 9a4fd35a11..77741f43fc 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -28,8 +28,8 @@ use std::time::{Duration, SystemTime}; use urlencoding::encode; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::time::Timestamp; use zenoh::session::Session; +use zenoh::time::Timestamp; use zenoh_backend_traits::config::{ReplicaConfig, StorageConfig}; pub mod align_queryable; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index f74d9d547a..aa03571f6f 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -29,9 +29,9 @@ use zenoh::payload::PayloadReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; +use zenoh::session::Session; use zenoh::Error as ZError; use zenoh::Result as ZResult; -use zenoh::session::Session; use zenoh_result::bail; use zenoh_sync::Condition; diff --git a/zenoh/src/api/builders.rs b/zenoh/src/api/builders.rs index 09d12657a5..94dbda2dd0 100644 --- a/zenoh/src/api/builders.rs +++ b/zenoh/src/api/builders.rs @@ -12,4 +12,5 @@ // ZettaScale Zenoh Team, // +pub(crate) mod publication; pub(crate) mod sample; diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs new file mode 100644 index 0000000000..50a8c6ab42 --- /dev/null +++ b/zenoh/src/api/builders/publication.rs @@ -0,0 +1,437 @@ +use std::future::Ready; + +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::api::builders::sample::SampleBuilderTrait; +use crate::api::builders::sample::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; +use crate::api::key_expr::KeyExpr; +use crate::api::publication::Priority; +use crate::api::sample::Locality; +use crate::api::sample::SampleKind; +#[cfg(feature = "unstable")] +use crate::api::sample::SourceInfo; +use crate::api::session::SessionRef; +use crate::api::value::Value; +use crate::api::{ + encoding::Encoding, payload::Payload, publication::Publisher, sample::Attachment, +}; +use zenoh_core::{AsyncResolve, Resolvable, Result as ZResult, SyncResolve}; +use zenoh_protocol::core::CongestionControl; +use zenoh_protocol::network::Mapping; + +pub type SessionPutBuilder<'a, 'b> = + PublicationBuilder, PublicationBuilderPut>; + +pub type SessionDeleteBuilder<'a, 'b> = + PublicationBuilder, PublicationBuilderDelete>; + +pub type PublisherPutBuilder<'a> = PublicationBuilder<&'a Publisher<'a>, PublicationBuilderPut>; + +pub type PublisherDeleteBuilder<'a> = + PublicationBuilder<&'a Publisher<'a>, PublicationBuilderDelete>; + +#[derive(Debug, Clone)] +pub struct PublicationBuilderPut { + pub(crate) payload: Payload, + pub(crate) encoding: Encoding, +} +#[derive(Debug, Clone)] +pub struct PublicationBuilderDelete; + +/// A builder for initializing [`Session::put`](crate::session::Session::put), [`Session::delete`](crate::session::Session::delete), +/// [`Publisher::put`](crate::publication::Publisher::put), and [`Publisher::delete`](crate::publication::Publisher::delete) operations. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::r#async::*; +/// use zenoh::publication::CongestionControl; +/// use zenoh::sample::builder::{ValueBuilderTrait, QoSBuilderTrait}; +/// +/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// session +/// .put("key/expression", "payload") +/// .encoding(Encoding::TEXT_PLAIN) +/// .congestion_control(CongestionControl::Block) +/// .res() +/// .await +/// .unwrap(); +/// # } +/// ``` +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug, Clone)] +pub struct PublicationBuilder { + pub(crate) publisher: P, + pub(crate) kind: T, + pub(crate) timestamp: Option, + #[cfg(feature = "unstable")] + pub(crate) source_info: SourceInfo, + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, +} + +impl QoSBuilderTrait for PublicationBuilder, T> { + #[inline] + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + publisher: self.publisher.congestion_control(congestion_control), + ..self + } + } + #[inline] + fn priority(self, priority: Priority) -> Self { + Self { + publisher: self.publisher.priority(priority), + ..self + } + } + #[inline] + fn express(self, is_express: bool) -> Self { + Self { + publisher: self.publisher.express(is_express), + ..self + } + } +} + +impl PublicationBuilder, T> { + /// Restrict the matching subscribers that will receive the published data + /// to the ones that have the given [`Locality`](crate::prelude::Locality). + #[zenoh_macros::unstable] + #[inline] + pub fn allowed_destination(mut self, destination: Locality) -> Self { + self.publisher = self.publisher.allowed_destination(destination); + self + } +} + +impl

ValueBuilderTrait for PublicationBuilder { + fn encoding>(self, encoding: T) -> Self { + Self { + kind: PublicationBuilderPut { + encoding: encoding.into(), + ..self.kind + }, + ..self + } + } + + fn payload(self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + Self { + kind: PublicationBuilderPut { + payload: payload.into(), + ..self.kind + }, + ..self + } + } + fn value>(self, value: T) -> Self { + let Value { payload, encoding } = value.into(); + Self { + kind: PublicationBuilderPut { payload, encoding }, + ..self + } + } +} + +impl SampleBuilderTrait for PublicationBuilder { + #[cfg(feature = "unstable")] + fn source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info, + ..self + } + } + #[cfg(feature = "unstable")] + fn attachment>>(self, attachment: TA) -> Self { + Self { + attachment: attachment.into(), + ..self + } + } +} + +impl TimestampBuilderTrait for PublicationBuilder { + fn timestamp>>(self, timestamp: TS) -> Self { + Self { + timestamp: timestamp.into(), + ..self + } + } +} + +impl Resolvable for PublicationBuilder { + type To = ZResult<()>; +} + +impl SyncResolve for PublicationBuilder, PublicationBuilderPut> { + #[inline] + fn res_sync(self) -> ::To { + let publisher = self.publisher.create_one_shot_publisher()?; + publisher.resolve_put( + self.kind.payload, + SampleKind::Put, + self.kind.encoding, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + #[cfg(feature = "unstable")] + self.attachment, + ) + } +} + +impl SyncResolve for PublicationBuilder, PublicationBuilderDelete> { + #[inline] + fn res_sync(self) -> ::To { + let publisher = self.publisher.create_one_shot_publisher()?; + publisher.resolve_put( + Payload::empty(), + SampleKind::Delete, + Encoding::ZENOH_BYTES, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + #[cfg(feature = "unstable")] + self.attachment, + ) + } +} + +impl AsyncResolve for PublicationBuilder, PublicationBuilderPut> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +impl AsyncResolve for PublicationBuilder, PublicationBuilderDelete> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +/// A builder for initializing a [`Publisher`]. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::r#async::*; +/// use zenoh::publication::CongestionControl; +/// use zenoh::sample::builder::QoSBuilderTrait; +/// +/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let publisher = session +/// .declare_publisher("key/expression") +/// .congestion_control(CongestionControl::Block) +/// .res() +/// .await +/// .unwrap(); +/// # } +/// ``` +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug)] +pub struct PublisherBuilder<'a, 'b: 'a> { + pub(crate) session: SessionRef<'a>, + pub(crate) key_expr: ZResult>, + pub(crate) congestion_control: CongestionControl, + pub(crate) priority: Priority, + pub(crate) is_express: bool, + pub(crate) destination: Locality, +} + +impl<'a, 'b> Clone for PublisherBuilder<'a, 'b> { + fn clone(&self) -> Self { + Self { + session: self.session.clone(), + key_expr: match &self.key_expr { + Ok(k) => Ok(k.clone()), + Err(e) => Err(zerror!("Cloned KE Error: {}", e).into()), + }, + congestion_control: self.congestion_control, + priority: self.priority, + is_express: self.is_express, + destination: self.destination, + } + } +} + +impl QoSBuilderTrait for PublisherBuilder<'_, '_> { + /// Change the `congestion_control` to apply when routing the data. + #[inline] + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + congestion_control, + ..self + } + } + + /// Change the priority of the written data. + #[inline] + fn priority(self, priority: Priority) -> Self { + Self { priority, ..self } + } + + /// Change the `express` policy to apply when routing the data. + /// When express is set to `true`, then the message will not be batched. + /// This usually has a positive impact on latency but negative impact on throughput. + #[inline] + fn express(self, is_express: bool) -> Self { + Self { is_express, ..self } + } +} + +impl<'a, 'b> PublisherBuilder<'a, 'b> { + /// Restrict the matching subscribers that will receive the published data + /// to the ones that have the given [`Locality`](crate::prelude::Locality). + #[zenoh_macros::unstable] + #[inline] + pub fn allowed_destination(mut self, destination: Locality) -> Self { + self.destination = destination; + self + } + + // internal function for perfroming the publication + fn create_one_shot_publisher(self) -> ZResult> { + Ok(Publisher { + session: self.session, + #[cfg(feature = "unstable")] + eid: 0, // This is a one shot Publisher + key_expr: self.key_expr?, + congestion_control: self.congestion_control, + priority: self.priority, + is_express: self.is_express, + destination: self.destination, + }) + } +} + +impl<'a, 'b> Resolvable for PublisherBuilder<'a, 'b> { + type To = ZResult>; +} + +impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { + fn res_sync(self) -> ::To { + let mut key_expr = self.key_expr?; + if !key_expr.is_fully_optimized(&self.session) { + let session_id = self.session.id; + let expr_id = self.session.declare_prefix(key_expr.as_str()).res_sync(); + let prefix_len = key_expr + .len() + .try_into() + .expect("How did you get a key expression with a length over 2^32!?"); + key_expr = match key_expr.0 { + crate::api::key_expr::KeyExprInner::Borrowed(key_expr) + | crate::api::key_expr::KeyExprInner::BorrowedWire { key_expr, .. } => { + KeyExpr(crate::api::key_expr::KeyExprInner::BorrowedWire { + key_expr, + expr_id, + mapping: Mapping::Sender, + prefix_len, + session_id, + }) + } + crate::api::key_expr::KeyExprInner::Owned(key_expr) + | crate::api::key_expr::KeyExprInner::Wire { key_expr, .. } => { + KeyExpr(crate::api::key_expr::KeyExprInner::Wire { + key_expr, + expr_id, + mapping: Mapping::Sender, + prefix_len, + session_id, + }) + } + } + } + self.session + .declare_publication_intent(key_expr.clone()) + .res_sync()?; + #[cfg(feature = "unstable")] + let eid = self.session.runtime.next_id(); + let publisher = Publisher { + session: self.session, + #[cfg(feature = "unstable")] + eid, + key_expr, + congestion_control: self.congestion_control, + priority: self.priority, + is_express: self.is_express, + destination: self.destination, + }; + log::trace!("publish({:?})", publisher.key_expr); + Ok(publisher) + } +} + +impl<'a, 'b> AsyncResolve for PublisherBuilder<'a, 'b> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { + fn res_sync(self) -> ::To { + self.publisher.resolve_put( + self.kind.payload, + SampleKind::Put, + self.kind.encoding, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + #[cfg(feature = "unstable")] + self.attachment, + ) + } +} + +impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { + fn res_sync(self) -> ::To { + self.publisher.resolve_put( + Payload::empty(), + SampleKind::Delete, + Encoding::ZENOH_BYTES, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + #[cfg(feature = "unstable")] + self.attachment, + ) + } +} + +impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 47d3a71c56..b6148ded41 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -57,7 +57,7 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; -use crate::{net::primitives::Primitives, prelude::Selector, Session, api::session::Undeclarable}; +use crate::{api::session::Undeclarable, net::primitives::Primitives, prelude::Selector, Session}; #[derive(Clone, Debug)] pub(crate) enum KeyExprInner<'a> { diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 2b0f813e72..a065685fa1 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -13,6 +13,10 @@ // //! Publishing primitives. +use crate::api::builders::publication::{ + PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherDeleteBuilder, + PublisherPutBuilder, +}; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; @@ -25,226 +29,20 @@ use crate::{ api::handlers::{Callback, DefaultHandler, IntoHandler}, Id, }; +use futures::Sink; +use std::convert::TryFrom; use std::future::Ready; +use std::pin::Pin; +use std::task::{Context, Poll}; use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; +pub use zenoh_protocol::core::CongestionControl; use zenoh_protocol::network::push::ext; -use zenoh_protocol::network::Mapping; use zenoh_protocol::network::Push; use zenoh_protocol::zenoh::Del; use zenoh_protocol::zenoh::PushBody; use zenoh_protocol::zenoh::Put; -use zenoh_result::ZResult; - -/// The kind of congestion control. -pub use zenoh_protocol::core::CongestionControl; - -#[derive(Debug, Clone)] -pub struct PublicationBuilderPut { - pub(crate) payload: Payload, - pub(crate) encoding: Encoding, -} -#[derive(Debug, Clone)] -pub struct PublicationBuilderDelete; - -/// A builder for initializing [`Session::put`](crate::Session::put), [`Session::delete`](crate::Session::delete), -/// [`Publisher::put`](crate::Publisher::put), and [`Publisher::delete`](crate::Publisher::delete) operations. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::publication::CongestionControl; -/// use zenoh::sample::builder::{ValueBuilderTrait, QoSBuilderTrait}; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// session -/// .put("key/expression", "payload") -/// .encoding(Encoding::TEXT_PLAIN) -/// .congestion_control(CongestionControl::Block) -/// .res() -/// .await -/// .unwrap(); -/// # } -/// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[derive(Debug, Clone)] -pub struct PublicationBuilder { - pub(crate) publisher: P, - pub(crate) kind: T, - pub(crate) timestamp: Option, - #[cfg(feature = "unstable")] - pub(crate) source_info: SourceInfo, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, -} - -pub type SessionPutBuilder<'a, 'b> = - PublicationBuilder, PublicationBuilderPut>; - -pub type SessionDeleteBuilder<'a, 'b> = - PublicationBuilder, PublicationBuilderDelete>; - -pub type PublisherPutBuilder<'a> = PublicationBuilder<&'a Publisher<'a>, PublicationBuilderPut>; - -pub type PublisherDeleteBuilder<'a> = - PublicationBuilder<&'a Publisher<'a>, PublicationBuilderDelete>; - -impl QoSBuilderTrait for PublicationBuilder, T> { - #[inline] - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self { - publisher: self.publisher.congestion_control(congestion_control), - ..self - } - } - #[inline] - fn priority(self, priority: Priority) -> Self { - Self { - publisher: self.publisher.priority(priority), - ..self - } - } - #[inline] - fn express(self, is_express: bool) -> Self { - Self { - publisher: self.publisher.express(is_express), - ..self - } - } -} - -impl PublicationBuilder, T> { - /// Restrict the matching subscribers that will receive the published data - /// to the ones that have the given [`Locality`](crate::prelude::Locality). - #[zenoh_macros::unstable] - #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { - self.publisher = self.publisher.allowed_destination(destination); - self - } -} - -impl

ValueBuilderTrait for PublicationBuilder { - fn encoding>(self, encoding: T) -> Self { - Self { - kind: PublicationBuilderPut { - encoding: encoding.into(), - ..self.kind - }, - ..self - } - } - - fn payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { - Self { - kind: PublicationBuilderPut { - payload: payload.into(), - ..self.kind - }, - ..self - } - } - fn value>(self, value: T) -> Self { - let Value { payload, encoding } = value.into(); - Self { - kind: PublicationBuilderPut { payload, encoding }, - ..self - } - } -} - -impl SampleBuilderTrait for PublicationBuilder { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - source_info, - ..self - } - } - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: TA) -> Self { - Self { - attachment: attachment.into(), - ..self - } - } -} - -impl TimestampBuilderTrait for PublicationBuilder { - fn timestamp>>(self, timestamp: TS) -> Self { - Self { - timestamp: timestamp.into(), - ..self - } - } -} - -impl Resolvable for PublicationBuilder { - type To = ZResult<()>; -} - -impl SyncResolve for PublicationBuilder, PublicationBuilderPut> { - #[inline] - fn res_sync(self) -> ::To { - let publisher = self.publisher.create_one_shot_publisher()?; - resolve_put( - &publisher, - self.kind.payload, - SampleKind::Put, - self.kind.encoding, - self.timestamp, - #[cfg(feature = "unstable")] - self.source_info, - #[cfg(feature = "unstable")] - self.attachment, - ) - } -} - -impl SyncResolve for PublicationBuilder, PublicationBuilderDelete> { - #[inline] - fn res_sync(self) -> ::To { - let publisher = self.publisher.create_one_shot_publisher()?; - resolve_put( - &publisher, - Payload::empty(), - SampleKind::Delete, - Encoding::ZENOH_BYTES, - self.timestamp, - #[cfg(feature = "unstable")] - self.source_info, - #[cfg(feature = "unstable")] - self.attachment, - ) - } -} - -impl AsyncResolve for PublicationBuilder, PublicationBuilderPut> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -impl AsyncResolve for PublicationBuilder, PublicationBuilderDelete> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -use futures::Sink; -use std::convert::TryFrom; -use std::convert::TryInto; -use std::pin::Pin; -use std::task::{Context, Poll}; use zenoh_result::Error; +use zenoh_result::ZResult; #[zenoh_macros::unstable] #[derive(Clone)] @@ -686,54 +484,6 @@ impl Drop for Publisher<'_> { } } -impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { - fn res_sync(self) -> ::To { - resolve_put( - self.publisher, - self.kind.payload, - SampleKind::Put, - self.kind.encoding, - self.timestamp, - #[cfg(feature = "unstable")] - self.source_info, - #[cfg(feature = "unstable")] - self.attachment, - ) - } -} - -impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { - fn res_sync(self) -> ::To { - resolve_put( - self.publisher, - Payload::empty(), - SampleKind::Delete, - Encoding::ZENOH_BYTES, - self.timestamp, - #[cfg(feature = "unstable")] - self.source_info, - #[cfg(feature = "unstable")] - self.attachment, - ) - } -} - -impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - impl<'a> Sink for Publisher<'a> { type Error = Error; @@ -752,8 +502,7 @@ impl<'a> Sink for Publisher<'a> { attachment, .. } = item.into(); - resolve_put( - &self, + self.resolve_put( payload, kind, encoding, @@ -776,267 +525,108 @@ impl<'a> Sink for Publisher<'a> { } } -/// A builder for initializing a [`Publisher`]. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::publication::CongestionControl; -/// use zenoh::sample::builder::QoSBuilderTrait; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let publisher = session -/// .declare_publisher("key/expression") -/// .congestion_control(CongestionControl::Block) -/// .res() -/// .await -/// .unwrap(); -/// # } -/// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[derive(Debug)] -pub struct PublisherBuilder<'a, 'b: 'a> { - pub(crate) session: SessionRef<'a>, - pub(crate) key_expr: ZResult>, - pub(crate) congestion_control: CongestionControl, - pub(crate) priority: Priority, - pub(crate) is_express: bool, - pub(crate) destination: Locality, -} - -impl<'a, 'b> Clone for PublisherBuilder<'a, 'b> { - fn clone(&self) -> Self { - Self { - session: self.session.clone(), - key_expr: match &self.key_expr { - Ok(k) => Ok(k.clone()), - Err(e) => Err(zerror!("Cloned KE Error: {}", e).into()), - }, - congestion_control: self.congestion_control, - priority: self.priority, - is_express: self.is_express, - destination: self.destination, - } - } -} - -impl QoSBuilderTrait for PublisherBuilder<'_, '_> { - /// Change the `congestion_control` to apply when routing the data. - #[inline] - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self { - congestion_control, - ..self - } - } - - /// Change the priority of the written data. - #[inline] - fn priority(self, priority: Priority) -> Self { - Self { priority, ..self } - } - - /// Change the `express` policy to apply when routing the data. - /// When express is set to `true`, then the message will not be batched. - /// This usually has a positive impact on latency but negative impact on throughput. - #[inline] - fn express(self, is_express: bool) -> Self { - Self { is_express, ..self } - } -} - -impl<'a, 'b> PublisherBuilder<'a, 'b> { - /// Restrict the matching subscribers that will receive the published data - /// to the ones that have the given [`Locality`](crate::prelude::Locality). - #[zenoh_macros::unstable] - #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { - self.destination = destination; - self - } - - // internal function for perfroming the publication - fn create_one_shot_publisher(self) -> ZResult> { - Ok(Publisher { - session: self.session, - #[cfg(feature = "unstable")] - eid: 0, // This is a one shot Publisher - key_expr: self.key_expr?, - congestion_control: self.congestion_control, - priority: self.priority, - is_express: self.is_express, - destination: self.destination, - }) - } -} - -impl<'a, 'b> Resolvable for PublisherBuilder<'a, 'b> { - type To = ZResult>; -} - -impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { - fn res_sync(self) -> ::To { - let mut key_expr = self.key_expr?; - if !key_expr.is_fully_optimized(&self.session) { - let session_id = self.session.id; - let expr_id = self.session.declare_prefix(key_expr.as_str()).res_sync(); - let prefix_len = key_expr - .len() - .try_into() - .expect("How did you get a key expression with a length over 2^32!?"); - key_expr = match key_expr.0 { - crate::api::key_expr::KeyExprInner::Borrowed(key_expr) - | crate::api::key_expr::KeyExprInner::BorrowedWire { key_expr, .. } => { - KeyExpr(crate::api::key_expr::KeyExprInner::BorrowedWire { - key_expr, - expr_id, - mapping: Mapping::Sender, - prefix_len, - session_id, - }) - } - crate::api::key_expr::KeyExprInner::Owned(key_expr) - | crate::api::key_expr::KeyExprInner::Wire { key_expr, .. } => { - KeyExpr(crate::api::key_expr::KeyExprInner::Wire { - key_expr, - expr_id, - mapping: Mapping::Sender, - prefix_len, - session_id, - }) - } - } - } - self.session - .declare_publication_intent(key_expr.clone()) - .res_sync()?; - #[cfg(feature = "unstable")] - let eid = self.session.runtime.next_id(); - let publisher = Publisher { - session: self.session, - #[cfg(feature = "unstable")] - eid, - key_expr, - congestion_control: self.congestion_control, - priority: self.priority, - is_express: self.is_express, - destination: self.destination, +impl Publisher<'_> { + pub(crate) fn resolve_put( + &self, + payload: Payload, + kind: SampleKind, + encoding: Encoding, + timestamp: Option, + #[cfg(feature = "unstable")] source_info: SourceInfo, + #[cfg(feature = "unstable")] attachment: Option, + ) -> ZResult<()> { + log::trace!("write({:?}, [...])", &self.key_expr); + let primitives = zread!(self.session.state) + .primitives + .as_ref() + .unwrap() + .clone(); + let timestamp = if timestamp.is_none() { + self.session.runtime.new_timestamp() + } else { + timestamp }; - log::trace!("publish({:?})", publisher.key_expr); - Ok(publisher) - } -} - -impl<'a, 'b> AsyncResolve for PublisherBuilder<'a, 'b> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -fn resolve_put( - publisher: &Publisher<'_>, - payload: Payload, - kind: SampleKind, - encoding: Encoding, - timestamp: Option, - #[cfg(feature = "unstable")] source_info: SourceInfo, - #[cfg(feature = "unstable")] attachment: Option, -) -> ZResult<()> { - log::trace!("write({:?}, [...])", &publisher.key_expr); - let primitives = zread!(publisher.session.state) - .primitives - .as_ref() - .unwrap() - .clone(); - let timestamp = if timestamp.is_none() { - publisher.session.runtime.new_timestamp() - } else { - timestamp - }; - if publisher.destination != Locality::SessionLocal { - primitives.send_push(Push { - wire_expr: publisher.key_expr.to_wire(&publisher.session).to_owned(), - ext_qos: ext::QoSType::new( - publisher.priority.into(), - publisher.congestion_control, - publisher.is_express, - ), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - payload: match kind { - SampleKind::Put => { - #[allow(unused_mut)] - let mut ext_attachment = None; - #[cfg(feature = "unstable")] - { - if let Some(attachment) = attachment.clone() { - ext_attachment = Some(attachment.into()); + if self.destination != Locality::SessionLocal { + primitives.send_push(Push { + wire_expr: self.key_expr.to_wire(&self.session).to_owned(), + ext_qos: ext::QoSType::new( + self.priority.into(), + self.congestion_control, + self.is_express, + ), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + payload: match kind { + SampleKind::Put => { + #[allow(unused_mut)] + let mut ext_attachment = None; + #[cfg(feature = "unstable")] + { + if let Some(attachment) = attachment.clone() { + ext_attachment = Some(attachment.into()); + } } + PushBody::Put(Put { + timestamp, + encoding: encoding.clone().into(), + #[cfg(feature = "unstable")] + ext_sinfo: source_info.into(), + #[cfg(not(feature = "unstable"))] + ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_attachment, + ext_unknown: vec![], + payload: payload.clone().into(), + }) } - PushBody::Put(Put { - timestamp, - encoding: encoding.clone().into(), + SampleKind::Delete => { + #[allow(unused_mut)] + let mut ext_attachment = None; #[cfg(feature = "unstable")] - ext_sinfo: source_info.into(), - #[cfg(not(feature = "unstable"))] - ext_sinfo: None, - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment, - ext_unknown: vec![], - payload: payload.clone().into(), - }) - } - SampleKind::Delete => { - #[allow(unused_mut)] - let mut ext_attachment = None; - #[cfg(feature = "unstable")] - { - if let Some(attachment) = attachment.clone() { - ext_attachment = Some(attachment.into()); + { + if let Some(attachment) = attachment.clone() { + ext_attachment = Some(attachment.into()); + } } + PushBody::Del(Del { + timestamp, + #[cfg(feature = "unstable")] + ext_sinfo: source_info.into(), + #[cfg(not(feature = "unstable"))] + ext_sinfo: None, + ext_attachment, + ext_unknown: vec![], + }) } - PushBody::Del(Del { - timestamp, - #[cfg(feature = "unstable")] - ext_sinfo: source_info.into(), - #[cfg(not(feature = "unstable"))] - ext_sinfo: None, - ext_attachment, - ext_unknown: vec![], - }) - } - }, - }); - } - if publisher.destination != Locality::Remote { - let data_info = DataInfo { - kind, - encoding: Some(encoding), - timestamp, - source_id: None, - source_sn: None, - qos: QoS::from(ext::QoSType::new( - publisher.priority.into(), - publisher.congestion_control, - publisher.is_express, - )), - }; - - publisher.session.handle_data( - true, - &publisher.key_expr.to_wire(&publisher.session), - Some(data_info), - payload.into(), - #[cfg(feature = "unstable")] - attachment, - ); + }, + }); + } + if self.destination != Locality::Remote { + let data_info = DataInfo { + kind, + encoding: Some(encoding), + timestamp, + source_id: None, + source_sn: None, + qos: QoS::from(ext::QoSType::new( + self.priority.into(), + self.congestion_control, + self.is_express, + )), + }; + + self.session.handle_data( + true, + &self.key_expr.to_wire(&self.session), + Some(data_info), + payload.into(), + #[cfg(feature = "unstable")] + attachment, + ); + } + Ok(()) } - Ok(()) } /// The Priority of zenoh messages. diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 5793794815..61ac272039 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -12,6 +12,9 @@ // ZettaScale Zenoh Team, // use crate::api::admin; +use crate::api::builders::publication::PublicationBuilder; +use crate::api::builders::publication::PublicationBuilderDelete; +use crate::api::builders::publication::PublicationBuilderPut; use crate::api::encoding::Encoding; use crate::api::handlers::{Callback, DefaultHandler}; use crate::api::info::SessionInfo; @@ -21,9 +24,6 @@ use crate::api::key_expr::KeyExprInner; use crate::api::liveliness::{Liveliness, LivelinessTokenState}; use crate::api::publication::MatchingListenerState; use crate::api::publication::MatchingStatus; -use crate::api::publication::PublicationBuilder; -use crate::api::publication::PublicationBuilderDelete; -use crate::api::publication::PublicationBuilderPut; use crate::api::query::GetBuilder; use crate::api::query::QueryState; use crate::api::query::Reply; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 3bb919c933..e2cb570a37 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -142,6 +142,8 @@ pub mod key_expr { } pub mod session { + pub use crate::api::builders::publication::SessionDeleteBuilder; + pub use crate::api::builders::publication::SessionPutBuilder; pub use crate::api::session::init; pub use crate::api::session::open; pub use crate::api::session::Session; @@ -159,6 +161,8 @@ pub mod sample { pub use crate::api::sample::Locality; pub use crate::api::sample::Sample; pub use crate::api::sample::SampleKind; + #[zenoh_macros::unstable] + pub use crate::api::sample::SourceInfo; } pub mod value { @@ -192,10 +196,10 @@ pub mod subscriber { } pub mod publication { + pub use crate::api::builders::publication::PublisherBuilder; pub use crate::api::publication::CongestionControl; pub use crate::api::publication::Priority; pub use crate::api::publication::Publisher; - pub use crate::api::publication::PublisherBuilder; #[zenoh_macros::unstable] pub use crate::api::publication::PublisherDeclarations; } diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index a0bec8d9ce..bffd9280b5 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -61,12 +61,12 @@ pub(crate) mod common { pub use crate::api::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - pub use crate::api::sample::builder::{ + pub use crate::api::builders::sample::{ QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; #[zenoh_macros::unstable] - pub use crate::api::sample::builder::SampleBuilderTrait; + pub use crate::api::builders::sample::SampleBuilderTrait; } /// Prelude to import when using Zenoh's sync API. From c0b6751e625dec4e50ab1a1aceba059d280b3a72 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 17:44:28 +0200 Subject: [PATCH 134/598] removed internal typedef Id from root lib.rs --- zenoh/src/api.rs | 2 ++ zenoh/src/api/liveliness.rs | 2 +- zenoh/src/api/publication.rs | 2 +- zenoh/src/api/queryable.rs | 2 +- zenoh/src/api/session.rs | 2 +- zenoh/src/api/subscriber.rs | 2 +- zenoh/src/lib.rs | 2 -- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 44496822ea..14eb3ef2f2 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -12,6 +12,8 @@ // ZettaScale Zenoh Team, // +pub(crate) type Id = u32; + pub(crate) mod admin; pub(crate) mod builders; pub(crate) mod encoding; diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index dac046324d..77d2d66f46 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -17,7 +17,7 @@ //! see [`Liveliness`] use zenoh_protocol::network::request; -use crate::{api::query::Reply, Id}; +use crate::{api::query::Reply, api::Id}; #[zenoh_macros::unstable] use { diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index a065685fa1..39ca9d9f9c 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -27,7 +27,7 @@ use crate::prelude::*; #[cfg(feature = "unstable")] use crate::{ api::handlers::{Callback, DefaultHandler, IntoHandler}, - Id, + api::Id, }; use futures::Sink; use std::convert::TryFrom; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index df76b6441f..479f9aee19 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -22,7 +22,7 @@ use crate::api::session::SessionRef; use crate::api::session::Undeclarable; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::Id; +use crate::api::Id; #[cfg(feature = "unstable")] use crate::{api::query::ReplyKeyExpr, api::sample::Attachment}; use std::fmt; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 61ac272039..3720ea129f 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -49,7 +49,7 @@ use crate::prelude::Locality; use crate::publication::*; use crate::query::*; use crate::queryable::*; -use crate::Id; +use crate::api::Id; use crate::Priority; use crate::Sample; use crate::SampleKind; diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index c549542b3b..560c37a371 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -18,7 +18,7 @@ use crate::api::key_expr::KeyExpr; use crate::api::sample::Sample; use crate::api::session::Undeclarable; use crate::prelude::Locality; -use crate::Id; +use crate::api::Id; use crate::{api::session::SessionRef, Result as ZResult}; use std::fmt; use std::future::Ready; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e2cb570a37..325fa894fb 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -82,8 +82,6 @@ extern crate zenoh_result; mod api; mod net; -pub(crate) type Id = u32; - use git_version::git_version; #[cfg(feature = "unstable")] use prelude::*; From 35378b32ba33757608844a65beb8a765600d7835 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 18:18:31 +0200 Subject: [PATCH 135/598] keyexpr disabled in prelude --- Cargo.lock | 1 - examples/examples/z_delete.rs | 1 + examples/examples/z_forward.rs | 1 + examples/examples/z_ping.rs | 1 + examples/examples/z_pong.rs | 1 + examples/examples/z_pub.rs | 1 + examples/examples/z_pull.rs | 2 +- examples/examples/z_put.rs | 1 + examples/examples/z_put_float.rs | 1 + examples/examples/z_queryable.rs | 1 + examples/examples/z_storage.rs | 1 + examples/examples/z_sub.rs | 1 + plugins/zenoh-backend-example/src/lib.rs | 2 +- plugins/zenoh-backend-traits/src/config.rs | 2 +- plugins/zenoh-backend-traits/src/lib.rs | 6 +++--- plugins/zenoh-plugin-example/src/lib.rs | 1 + .../zenoh-plugin-rest/examples/z_serve_sse.rs | 1 + plugins/zenoh-plugin-rest/src/lib.rs | 1 + .../zenoh-plugin-storage-manager/Cargo.toml | 1 - .../zenoh-plugin-storage-manager/src/lib.rs | 1 + .../src/memory_backend/mod.rs | 1 + .../src/replica/align_queryable.rs | 1 + .../src/replica/aligner.rs | 1 + .../src/replica/mod.rs | 1 + .../src/replica/storage.rs | 9 +++++---- zenoh-ext/src/group.rs | 3 +++ zenoh-ext/src/publication_cache.rs | 1 + zenoh-ext/src/querying_subscriber.rs | 1 + zenoh-ext/src/session_ext.rs | 6 ++++-- zenoh/src/api/admin.rs | 5 +++-- zenoh/src/api/builders/sample.rs | 2 +- zenoh/src/api/key_expr.rs | 3 +-- zenoh/src/api/liveliness.rs | 8 ++++---- zenoh/src/api/publication.rs | 2 ++ zenoh/src/api/query.rs | 2 ++ zenoh/src/api/queryable.rs | 3 ++- zenoh/src/api/session.rs | 2 +- zenoh/src/api/subscriber.rs | 2 +- zenoh/src/lib.rs | 19 +++++++++++++------ zenoh/src/net/routing/dispatcher/face.rs | 2 +- zenoh/src/net/routing/interceptor/mod.rs | 2 +- zenoh/src/prelude.rs | 2 +- zenoh/tests/session.rs | 1 + zenoh/tests/unicity.rs | 1 + 44 files changed, 73 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9dff82ad80..75a045d9b3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4963,7 +4963,6 @@ dependencies = [ "zenoh", "zenoh-collections", "zenoh-core", - "zenoh-keyexpr", "zenoh-plugin-trait", "zenoh-result", "zenoh-util", diff --git a/examples/examples/z_delete.rs b/examples/examples/z_delete.rs index a090458c71..7f48f90c96 100644 --- a/examples/examples/z_delete.rs +++ b/examples/examples/z_delete.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index 486ccc4fdb..349690c8a8 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; use zenoh_ext::SubscriberForward; diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index a57c937e48..a989b34482 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -14,6 +14,7 @@ use clap::Parser; use std::time::{Duration, Instant}; use zenoh::config::Config; +use zenoh::key_expr::keyexpr; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index baa5683f62..60f6db0b68 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::keyexpr; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 8cd3c4edba..176e991fff 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -14,6 +14,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 4e44930f4f..3d4ff30e2b 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::{config::Config, handlers::RingBuffer, prelude::r#async::*}; +use zenoh::{config::Config, handlers::RingBuffer, key_expr::KeyExpr, prelude::r#async::*}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index 7b38490507..b6039d09ba 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_put_float.rs b/examples/examples/z_put_float.rs index 33482e4680..86f1ce3c08 100644 --- a/examples/examples/z_put_float.rs +++ b/examples/examples/z_put_float.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 83ac63ce1f..025f3cc1cc 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index cb2f40c125..50d84001a8 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -17,6 +17,7 @@ use clap::Parser; use futures::select; use std::collections::HashMap; use zenoh::config::Config; +use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index fbce562c2e..f7e232f240 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index f81231a498..3663f3249e 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -14,7 +14,7 @@ use async_std::sync::RwLock; use async_trait::async_trait; use std::collections::{hash_map::Entry, HashMap}; -use zenoh::{prelude::OwnedKeyExpr, time::Timestamp, value::Value}; +use zenoh::{key_expr::OwnedKeyExpr, time::Timestamp, value::Value}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, Volume, diff --git a/plugins/zenoh-backend-traits/src/config.rs b/plugins/zenoh-backend-traits/src/config.rs index 5ab59ebe45..cfbc1566c8 100644 --- a/plugins/zenoh-backend-traits/src/config.rs +++ b/plugins/zenoh-backend-traits/src/config.rs @@ -17,7 +17,7 @@ use schemars::JsonSchema; use serde_json::{Map, Value}; use std::convert::TryFrom; use std::time::Duration; -use zenoh::{prelude::keyexpr, prelude::OwnedKeyExpr, Result as ZResult}; +use zenoh::{key_expr::keyexpr, key_expr::OwnedKeyExpr, Result as ZResult}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_result::{bail, zerror, Error}; diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 40d022f1ec..4340c454fa 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -125,10 +125,10 @@ use async_trait::async_trait; use const_format::concatcp; -use zenoh::prelude::OwnedKeyExpr; +use zenoh::key_expr::{keyexpr, OwnedKeyExpr}; use zenoh::time::Timestamp; use zenoh::value::Value; -pub use zenoh::Result as ZResult; +use zenoh::Result as ZResult; use zenoh_plugin_trait::{PluginControl, PluginInstance, PluginStatusRec, StructVersion}; use zenoh_util::concat_enabled_features; @@ -212,7 +212,7 @@ impl StructVersion for VolumeInstance { } impl PluginControl for VolumeInstance { - fn plugins_status(&self, _names: &zenoh::prelude::keyexpr) -> Vec { + fn plugins_status(&self, _names: &keyexpr) -> Vec { Vec::new() } } diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 5615ce68af..9d25f582fb 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -22,6 +22,7 @@ use std::sync::{ atomic::{AtomicBool, Ordering::Relaxed}, Arc, Mutex, }; +use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::runtime::Runtime; diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 85d730bb41..6a278c4784 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -13,6 +13,7 @@ // use clap::{arg, Command}; use std::time::Duration; +use zenoh::key_expr::keyexpr; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 49c58f5074..e2434c644c 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -29,6 +29,7 @@ use std::sync::Arc; use tide::http::Mime; use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; +use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index 65b15686f7..fe9359f696 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -49,7 +49,6 @@ urlencoding = { workspace = true } zenoh = { workspace = true, features = ["unstable"] } zenoh-collections = { workspace = true } zenoh-core = { workspace = true } -zenoh-keyexpr = { workspace = true } zenoh-plugin-trait = { workspace = true } zenoh-result = { workspace = true } zenoh-util = { workspace = true } diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 78a9814179..7d679ef37d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -27,6 +27,7 @@ use std::convert::TryFrom; use std::sync::Arc; use std::sync::Mutex; use storages_mgt::StorageMessage; +use zenoh::key_expr::keyexpr; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::sync::*; use zenoh::runtime::Runtime; diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index 4e333b8592..d9f330ea8c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -15,6 +15,7 @@ use async_std::sync::RwLock; use async_trait::async_trait; use std::collections::HashMap; use std::sync::Arc; +use zenoh::key_expr::OwnedKeyExpr; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; use zenoh_backend_traits::config::{StorageConfig, VolumeConfig}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 5908778867..d73b9b2b6d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -18,6 +18,7 @@ use std::cmp::Ordering; use std::collections::{BTreeSet, HashMap, HashSet}; use std::str; use std::str::FromStr; +use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::session::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 23bf066263..0553710851 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -18,6 +18,7 @@ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; +use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::sample::SampleBuilder; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 77741f43fc..5289fc47af 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -26,6 +26,7 @@ use std::str; use std::str::FromStr; use std::time::{Duration, SystemTime}; use urlencoding::encode; +use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::session::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 646aebc837..007b21083b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -23,6 +23,11 @@ use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::SplitBuffer; use zenoh::buffers::ZBuf; +use zenoh::key_expr::keyexpr_tree::KeyedSetProvider; +use zenoh::key_expr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; +use zenoh::key_expr::keyexpr_tree::{KeBoxTree, NonWild, UnknownWildness}; +use zenoh::key_expr::KeyExpr; +use zenoh::key_expr::OwnedKeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; use zenoh::sample::SampleBuilder; @@ -32,10 +37,6 @@ use zenoh::value::Value; use zenoh::{session::Session, Result as ZResult}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; -use zenoh_keyexpr::key_expr::OwnedKeyExpr; -use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; -use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; -use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; use zenoh_result::bail; use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer}; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index aa03571f6f..7ede485784 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -25,6 +25,9 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; use tokio::task::JoinHandle; +use zenoh::key_expr::keyexpr; +use zenoh::key_expr::KeyExpr; +use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::PayloadReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index b8b7c79cec..41766fa1fa 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -15,6 +15,7 @@ use flume::{bounded, Sender}; use std::collections::{HashMap, VecDeque}; use std::convert::TryInto; use std::future::Ready; +use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; use zenoh::session::SessionRef; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 24501f9eca..6ad417f774 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -18,6 +18,7 @@ use std::mem::swap; use std::sync::{Arc, Mutex}; use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; use zenoh::sample::SampleBuilder; diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index 2c9826c98b..2dd0fbd873 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -14,8 +14,10 @@ use super::PublicationCacheBuilder; use std::convert::TryInto; use std::sync::Arc; -use zenoh::prelude::KeyExpr; -use zenoh::session::{Session, SessionRef}; +use zenoh::{ + key_expr::KeyExpr, + session::{Session, SessionRef}, +}; /// Some extensions to the [`zenoh::Session`](zenoh::Session) pub trait SessionExt<'s, 'a> { diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index 678f6d1bbb..1a5d52fb4c 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -13,10 +13,10 @@ // use crate::{ api::encoding::Encoding, + api::key_expr::KeyExpr, api::queryable::Query, api::sample::DataInfo, - keyexpr, - prelude::sync::{KeyExpr, Locality, SampleKind}, + prelude::sync::{Locality, SampleKind}, Payload, Session, }; use std::{ @@ -25,6 +25,7 @@ use std::{ sync::Arc, }; use zenoh_core::{Result as ZResult, SyncResolve}; +use zenoh_keyexpr::keyexpr; use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; use zenoh_transport::{ TransportEventHandler, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 87b2b928ff..abf8e2446a 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -14,6 +14,7 @@ use std::marker::PhantomData; +use crate::api::key_expr::KeyExpr; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::QoS; @@ -22,7 +23,6 @@ use crate::api::sample::QoSBuilder; use crate::api::sample::SourceInfo; use crate::api::value::Value; use crate::Encoding; -use crate::KeyExpr; use crate::Payload; use crate::Priority; use crate::Sample; diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index b6148ded41..f3a5b90a98 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -49,8 +49,7 @@ use std::{ str::FromStr, }; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -pub use zenoh_keyexpr::*; -pub use zenoh_macros::{kedefine, keformat, kewrite}; +use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; use zenoh_protocol::{ core::{key_expr::canon::Canonizable, ExprId, WireExpr}, network::{declare, DeclareBody, Mapping, UndeclareKeyExpr}, diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 77d2d66f46..7fc830be3d 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -15,20 +15,18 @@ //! Liveliness primitives. //! //! see [`Liveliness`] -use zenoh_protocol::network::request; - -use crate::{api::query::Reply, api::Id}; - #[zenoh_macros::unstable] use { crate::{ api::handlers::locked, api::handlers::DefaultHandler, + api::key_expr::KeyExpr, api::session::SessionRef, api::session::Undeclarable, api::subscriber::{Subscriber, SubscriberInner}, prelude::*, }, + crate::{api::query::Reply, api::Id}, std::convert::TryInto, std::future::Ready, std::sync::Arc, @@ -38,7 +36,9 @@ use { zenoh_core::Resolvable, zenoh_core::Result as ZResult, zenoh_core::SyncResolve, + zenoh_keyexpr::keyexpr, zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo, + zenoh_protocol::network::request, }; #[zenoh_macros::unstable] diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 39ca9d9f9c..98064a1b99 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -17,6 +17,7 @@ use crate::api::builders::publication::{ PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherDeleteBuilder, PublisherPutBuilder, }; +use crate::api::key_expr::KeyExpr; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; @@ -35,6 +36,7 @@ use std::future::Ready; use std::pin::Pin; use std::task::{Context, Poll}; use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh_keyexpr::keyexpr; pub use zenoh_protocol::core::CongestionControl; use zenoh_protocol::network::push::ext; use zenoh_protocol::network::Push; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 1e749132da..ed9bff7776 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -14,6 +14,7 @@ //! Query primitives. use crate::api::handlers::{locked, Callback, DefaultHandler}; +use crate::api::key_expr::KeyExpr; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::QoSBuilder; @@ -23,6 +24,7 @@ use std::collections::HashMap; use std::future::Ready; use std::time::Duration; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_keyexpr::OwnedKeyExpr; use zenoh_result::ZResult; /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 479f9aee19..e46dab3c49 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -14,15 +14,16 @@ //! Queryable primitives. +use super::key_expr::KeyExpr; use crate::api::builders::sample::SampleBuilder; use crate::api::encoding::Encoding; use crate::api::handlers::{locked, DefaultHandler}; use crate::api::sample::{QoSBuilder, SourceInfo}; use crate::api::session::SessionRef; use crate::api::session::Undeclarable; +use crate::api::Id; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::api::Id; #[cfg(feature = "unstable")] use crate::{api::query::ReplyKeyExpr, api::sample::Attachment}; use std::fmt; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 3720ea129f..59fdcf78e7 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -41,6 +41,7 @@ use crate::api::selector::TIME_RANGE_KEY; use crate::api::subscriber::SubscriberBuilder; use crate::api::subscriber::SubscriberState; use crate::api::value::Value; +use crate::api::Id; use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; @@ -49,7 +50,6 @@ use crate::prelude::Locality; use crate::publication::*; use crate::query::*; use crate::queryable::*; -use crate::api::Id; use crate::Priority; use crate::Sample; use crate::SampleKind; diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 560c37a371..0c1303e638 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -17,8 +17,8 @@ use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; use crate::api::sample::Sample; use crate::api::session::Undeclarable; -use crate::prelude::Locality; use crate::api::Id; +use crate::prelude::Locality; use crate::{api::session::SessionRef, Result as ZResult}; use std::fmt; use std::future::Ready; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 325fa894fb..055be82543 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -126,15 +126,22 @@ pub mod buffers { } pub mod key_expr { - pub use crate::api::key_expr::kedefine; - pub use crate::api::key_expr::keformat; - pub use crate::api::key_expr::keyexpr; - pub use crate::api::key_expr::OwnedKeyExpr; + pub mod keyexpr_tree { + pub use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; + pub use zenoh_keyexpr::keyexpr_tree::{ + support::NonWild, support::UnknownWildness, KeBoxTree, + }; + pub use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; + } + pub use crate::api::key_expr::KeyExpr; + pub use zenoh_keyexpr::keyexpr; + pub use zenoh_keyexpr::OwnedKeyExpr; + pub use zenoh_macros::{kedefine, keformat, kewrite}; // keyexpr format macro support pub mod format { - pub use crate::api::key_expr::format::*; + pub use zenoh_keyexpr::format::*; pub mod macro_support { - pub use crate::api::key_expr::format::macro_support::*; + pub use zenoh_keyexpr::format::macro_support::*; } } } diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index cb565053c9..7ac6e10995 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -14,9 +14,9 @@ use super::super::router::*; use super::tables::TablesLock; use super::{resource::*, tables}; +use crate::api::key_expr::KeyExpr; use crate::net::primitives::{McastMux, Mux, Primitives}; use crate::net::routing::interceptor::{InterceptorTrait, InterceptorsChain}; -use crate::KeyExpr; use std::any::Any; use std::collections::HashMap; use std::fmt; diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 9dfc03ac7e..23d90c30c7 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -18,7 +18,7 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use super::RoutingContext; -use crate::KeyExpr; +use crate::api::key_expr::KeyExpr; use std::any::Any; use zenoh_config::Config; use zenoh_protocol::network::NetworkMessage; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index bffd9280b5..6e27fc3d6d 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -24,7 +24,7 @@ pub use common::*; pub(crate) mod common { - pub use crate::api::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; + // pub use crate::api::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; pub use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 8c2d2e9937..35f3dac2c6 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -14,6 +14,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_core::ztimeout; diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index f34704fb7e..0033ed6468 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -15,6 +15,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use tokio::runtime::Handle; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_core::ztimeout; From 142ca2fef862d0650b2279b6436b3e7a49e58842 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 18:22:54 +0200 Subject: [PATCH 136/598] resolve disabled in prelude --- zenoh-ext/src/publication_cache.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/api/liveliness.rs | 1 + zenoh/src/api/queryable.rs | 2 +- zenoh/src/prelude.rs | 12 ++++++------ 5 files changed, 10 insertions(+), 9 deletions(-) diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 41766fa1fa..e1a974b6ff 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -20,7 +20,7 @@ use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; use zenoh::session::SessionRef; use zenoh::subscriber::FlumeSubscriber; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_result::{bail, ZResult}; use zenoh_util::core::ResolveFuture; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 6ad417f774..bdcab8f220 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -26,7 +26,7 @@ use zenoh::session::SessionRef; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; -use zenoh_core::{zlock, AsyncResolve, Resolvable, SyncResolve}; +use zenoh_core::{zlock, AsyncResolve, Resolvable, Resolve, SyncResolve}; use crate::ExtractSample; diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 7fc830be3d..51ff59e5cc 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -40,6 +40,7 @@ use { zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo, zenoh_protocol::network::request, }; +use {zenoh_core::Resolve}; #[zenoh_macros::unstable] pub(crate) static PREFIX_LIVELINESS: &str = crate::net::routing::PREFIX_LIVELINESS; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index e46dab3c49..37afb900aa 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -31,7 +31,7 @@ use std::future::Ready; use std::ops::Deref; use std::sync::Arc; use uhlc::Timestamp; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_protocol::{ core::{EntityId, WireExpr}, network::{response, Mapping, RequestId, Response, ResponseFinal}, diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 6e27fc3d6d..d26c146d4c 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -25,12 +25,12 @@ pub use common::*; pub(crate) mod common { // pub use crate::api::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; - pub use zenoh_buffers::{ - buffer::{Buffer, SplitBuffer}, - reader::HasReader, - writer::HasWriter, - }; - pub use zenoh_core::Resolve; + // pub use zenoh_buffers::{ + // buffer::{Buffer, SplitBuffer}, + // reader::HasReader, + // writer::HasWriter, + // }; + // pub use zenoh_core::Resolve; pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; #[zenoh_macros::unstable] From 62d8e7854a9ff9343c2fdb0ecb633e2e937d6d21 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sat, 6 Apr 2024 15:41:54 +0200 Subject: [PATCH 137/598] allowed build zenoh without unstable feature set --- zenoh/src/publication.rs | 1 + zenoh/src/query.rs | 2 ++ zenoh/src/queryable.rs | 12 ++++++++++-- zenoh/src/sample/builder.rs | 1 + zenoh/src/sample/mod.rs | 8 ++++---- zenoh/src/session.rs | 6 +++++- zenoh/src/subscriber.rs | 3 --- 7 files changed, 23 insertions(+), 10 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index c176ad32e0..4f31c73a24 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -157,6 +157,7 @@ impl

ValueBuilderTrait for PublicationBuilder { } } +#[zenoh_macros::unstable] impl SampleBuilderTrait for PublicationBuilder { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index cb1116130d..3a380bd1c9 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -133,6 +133,7 @@ pub struct GetBuilder<'a, 'b, Handler> { pub(crate) source_info: SourceInfo, } +#[zenoh_macros::unstable] impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { @@ -430,6 +431,7 @@ where self.value, #[cfg(feature = "unstable")] self.attachment, + #[cfg(feature = "unstable")] self.source_info, callback, ) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 6fbb4e9090..0ad3a36c07 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -18,12 +18,15 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::{QoSBuilder, SourceInfo}; +use crate::sample::builder::SampleBuilder; +use crate::sample::QoSBuilder; +#[cfg(feature = "unstable")] +use crate::sample::SourceInfo; use crate::Id; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] -use crate::{query::ReplyKeyExpr, sample::builder::SampleBuilder, sample::Attachment}; +use crate::{query::ReplyKeyExpr, sample::Attachment}; use std::fmt; use std::future::Ready; use std::ops::Deref; @@ -155,7 +158,9 @@ impl Query { encoding: Encoding::default(), }, timestamp: None, + #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] attachment: None, } } @@ -193,7 +198,9 @@ impl Query { qos: response::ext::QoSType::RESPONSE.into(), kind: ReplyBuilderDelete, timestamp: None, + #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] attachment: None, } } @@ -298,6 +305,7 @@ impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { } } +#[cfg(feature = "unstable")] impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] fn attachment>>(self, attachment: U) -> Self { diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index fca55edd09..bad35024ef 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -163,6 +163,7 @@ impl TimestampBuilderTrait for SampleBuilder { } } +#[cfg(feature = "unstable")] impl SampleBuilderTrait for SampleBuilder { #[zenoh_macros::unstable] fn source_info(self, source_info: SourceInfo) -> Self { diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index 6e457578a3..0ef8462d2a 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -22,9 +22,9 @@ use crate::Priority; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; +use zenoh_protocol::core::CongestionControl; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::network::declare::ext::QoSType; -use zenoh_protocol::{core::CongestionControl, zenoh}; pub mod builder; @@ -178,12 +178,12 @@ impl SourceInfo { } #[zenoh_macros::unstable] -impl From for Option { - fn from(source_info: SourceInfo) -> Option { +impl From for Option { + fn from(source_info: SourceInfo) -> Option { if source_info.is_empty() { None } else { - Some(zenoh::put::ext::SourceInfoType { + Some(zenoh_protocol::zenoh::put::ext::SourceInfoType { id: source_info.source_id.unwrap_or_default(), sn: source_info.source_sn.unwrap_or_default() as u32, }) diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index f694eb6420..181976dcb0 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -61,6 +61,8 @@ use zenoh_config::unwrap_or_default; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; #[cfg(feature = "unstable")] use zenoh_protocol::network::declare::SubscriberId; +#[cfg(feature = "unstable")] +use zenoh_protocol::network::ext; use zenoh_protocol::network::AtomicRequestId; use zenoh_protocol::network::RequestId; use zenoh_protocol::zenoh::reply::ReplyBody; @@ -77,7 +79,6 @@ use zenoh_protocol::{ subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareMode, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, - ext, request::{self, ext::TargetType, Request}, Mapping, Push, Response, ResponseFinal, }, @@ -1687,7 +1688,10 @@ impl Session { payload: RequestBody::Query(zenoh_protocol::zenoh::Query { consolidation, parameters: selector.parameters().to_string(), + #[cfg(feature = "unstable")] ext_sinfo: source.into(), + #[cfg(not(feature = "unstable"))] + ext_sinfo: None, ext_body: value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 60a31a6577..47d41ebb1f 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -202,9 +202,6 @@ pub struct SubscriberBuilder<'a, 'b, Handler> { #[cfg(not(feature = "unstable"))] pub(crate) reliability: Reliability, - #[cfg(not(feature = "unstable"))] - pub(crate) mode: Mode, - #[cfg(feature = "unstable")] pub origin: Locality, #[cfg(not(feature = "unstable"))] From e1cb5df7451aceaaecad7e5cd4ec5454aef03ed0 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sat, 6 Apr 2024 16:15:48 +0200 Subject: [PATCH 138/598] unfinished - priority resolving problem --- zenoh/src/api.rs | 2 +- zenoh/src/api/admin.rs | 9 +++------ zenoh/src/api/builders/publication.rs | 7 +++---- zenoh/src/api/builders/sample.rs | 10 +++++----- zenoh/src/api/key_expr.rs | 7 +++++-- zenoh/src/api/liveliness.rs | 2 +- zenoh/src/api/query.rs | 3 ++- zenoh/src/api/queryable.rs | 10 +++++++++- zenoh/src/api/sample.rs | 2 +- zenoh/src/api/session.rs | 15 +++++++++------ zenoh/src/api/subscriber.rs | 2 +- zenoh/src/lib.rs | 6 ++++++ zenoh/src/prelude.rs | 4 ++-- 13 files changed, 48 insertions(+), 31 deletions(-) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 14eb3ef2f2..ab38844ea6 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -20,9 +20,9 @@ pub(crate) mod encoding; pub(crate) mod handlers; pub(crate) mod info; pub(crate) mod key_expr; +#[cfg(feature = "unstable")] pub(crate) mod liveliness; pub(crate) mod payload; -#[cfg(feature = "unstable")] pub(crate) mod plugins; pub(crate) mod publication; pub(crate) mod query; diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index 1a5d52fb4c..917afdc18f 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -11,13 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // +use crate::api::sample::Locality; use crate::{ - api::encoding::Encoding, - api::key_expr::KeyExpr, - api::queryable::Query, - api::sample::DataInfo, - prelude::sync::{Locality, SampleKind}, - Payload, Session, + api::encoding::Encoding, api::key_expr::KeyExpr, api::payload::Payload, api::queryable::Query, + api::sample::DataInfo, api::sample::SampleKind, api::session::Session, }; use std::{ collections::hash_map::DefaultHasher, diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index 50a8c6ab42..cead1427f0 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -16,16 +16,15 @@ use std::future::Ready; use crate::api::builders::sample::SampleBuilderTrait; use crate::api::builders::sample::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; use crate::api::key_expr::KeyExpr; -use crate::api::publication::Priority; +#[cfg(feature = "unstable")] +use crate::api::sample::Attachment; use crate::api::sample::Locality; use crate::api::sample::SampleKind; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; use crate::api::session::SessionRef; use crate::api::value::Value; -use crate::api::{ - encoding::Encoding, payload::Payload, publication::Publisher, sample::Attachment, -}; +use crate::api::{encoding::Encoding, payload::Payload, publication::Publisher}; use zenoh_core::{AsyncResolve, Resolvable, Result as ZResult, SyncResolve}; use zenoh_protocol::core::CongestionControl; use zenoh_protocol::network::Mapping; diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index c6ec22d8a3..e89a13606b 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -14,20 +14,20 @@ use std::marker::PhantomData; +use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; +use crate::api::payload::Payload; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::QoS; use crate::api::sample::QoSBuilder; +use crate::api::sample::Sample; +use crate::api::sample::SampleKind; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; use crate::api::value::Value; -use crate::Encoding; -use crate::Payload; -use crate::Priority; -use crate::Sample; -use crate::SampleKind; use uhlc::Timestamp; +use zenoh_config::Priority; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index a2edc8085c..bed9fd5b95 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -56,7 +56,10 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; -use crate::{api::session::Undeclarable, net::primitives::Primitives, prelude::Selector, Session}; +use crate::{ + api::session::Session, api::session::Undeclarable, net::primitives::Primitives, + prelude::Selector, +}; #[derive(Clone, Debug)] pub(crate) enum KeyExprInner<'a> { @@ -552,7 +555,7 @@ impl<'a> KeyExpr<'a> { _ => false, } } - pub(crate) fn to_wire(&'a self, session: &crate::Session) -> WireExpr<'a> { + pub(crate) fn to_wire(&'a self, session: &Session) -> WireExpr<'a> { match &self.0 { KeyExprInner::Wire { key_expr, diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 51ff59e5cc..1f7d03ddca 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -15,6 +15,7 @@ //! Liveliness primitives. //! //! see [`Liveliness`] +use zenoh_core::Resolve; #[zenoh_macros::unstable] use { crate::{ @@ -40,7 +41,6 @@ use { zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo, zenoh_protocol::network::request, }; -use {zenoh_core::Resolve}; #[zenoh_macros::unstable] pub(crate) static PREFIX_LIVELINESS: &str = crate::net::routing::PREFIX_LIVELINESS; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index daefe6a59e..d5314bb204 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -18,11 +18,12 @@ use crate::api::key_expr::KeyExpr; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::QoSBuilder; +use crate::api::session::Session; use crate::prelude::*; -use crate::Session; use std::collections::HashMap; use std::future::Ready; use std::time::Duration; +use zenoh_config::Priority; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_keyexpr::OwnedKeyExpr; use zenoh_result::ZResult; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 37afb900aa..f8f58e5e66 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -18,7 +18,9 @@ use super::key_expr::KeyExpr; use crate::api::builders::sample::SampleBuilder; use crate::api::encoding::Encoding; use crate::api::handlers::{locked, DefaultHandler}; -use crate::api::sample::{QoSBuilder, SourceInfo}; +use crate::api::sample::QoSBuilder; +#[cfg(feature = "unstable")] +use crate::api::sample::SourceInfo; use crate::api::session::SessionRef; use crate::api::session::Undeclarable; use crate::api::Id; @@ -31,6 +33,7 @@ use std::future::Ready; use std::ops::Deref; use std::sync::Arc; use uhlc::Timestamp; +use zenoh_config::Priority; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_protocol::{ core::{EntityId, WireExpr}, @@ -145,7 +148,9 @@ impl Query { encoding: Encoding::default(), }, timestamp: None, + #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] attachment: None, } } @@ -183,7 +188,9 @@ impl Query { qos: response::ext::QoSType::RESPONSE.into(), kind: ReplyBuilderDelete, timestamp: None, + #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] attachment: None, } } @@ -284,6 +291,7 @@ impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { } } +#[cfg(feature = "unstable")] impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] fn attachment>>(self, attachment: U) -> Self { diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 121780006c..8475e11ddf 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -18,12 +18,12 @@ use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; use crate::api::value::Value; use crate::payload::Payload; -use crate::Priority; #[zenoh_macros::unstable] pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; +use zenoh_config::Priority; use zenoh_protocol::core::CongestionControl; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::core::Timestamp; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 54ea6a08d5..3e82bcd16e 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -22,7 +22,10 @@ use crate::api::key_expr::KeyExpr; use crate::api::key_expr::KeyExprInner; #[zenoh_macros::unstable] use crate::api::liveliness::{Liveliness, LivelinessTokenState}; +use crate::api::payload::Payload; +#[zenoh_macros::unstable] use crate::api::publication::MatchingListenerState; +#[zenoh_macros::unstable] use crate::api::publication::MatchingStatus; use crate::api::query::GetBuilder; use crate::api::query::QueryState; @@ -34,7 +37,12 @@ use crate::api::queryable::QueryableState; use crate::api::sample::Attachment; use crate::api::sample::DataInfo; use crate::api::sample::DataInfoIntoSample; +use crate::api::sample::Locality; use crate::api::sample::QoS; +use crate::api::sample::Sample; +use crate::api::sample::SampleKind; +#[cfg(feature = "unstable")] +use crate::api::sample::SourceInfo; use crate::api::selector::Parameters; use crate::api::selector::Selector; use crate::api::selector::TIME_RANGE_KEY; @@ -45,15 +53,9 @@ use crate::api::Id; use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; -use crate::payload::Payload; -use crate::prelude::Locality; use crate::publication::*; use crate::query::*; use crate::queryable::*; -use crate::Priority; -use crate::Sample; -use crate::SampleKind; -use crate::SourceInfo; use log::{error, trace, warn}; use std::collections::HashMap; use std::convert::TryFrom; @@ -71,6 +73,7 @@ use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; use zenoh_config::Config; use zenoh_config::Notifier; +use zenoh_config::Priority; use zenoh_core::Resolvable; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; use zenoh_protocol::core::Reliability; diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 735f582cf2..39e863b1fa 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -15,10 +15,10 @@ //! Subscribing primitives. use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; +use crate::api::sample::Locality; use crate::api::sample::Sample; use crate::api::session::Undeclarable; use crate::api::Id; -use crate::prelude::Locality; use crate::{api::session::SessionRef, Result as ZResult}; use std::fmt; use std::future::Ready; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 055be82543..c83c3bb598 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -149,6 +149,7 @@ pub mod key_expr { pub mod session { pub use crate::api::builders::publication::SessionDeleteBuilder; pub use crate::api::builders::publication::SessionPutBuilder; + #[zenoh_macros::unstable] pub use crate::api::session::init; pub use crate::api::session::open; pub use crate::api::session::Session; @@ -162,7 +163,9 @@ pub mod sample { pub use crate::api::builders::sample::SampleBuilderTrait; pub use crate::api::builders::sample::TimestampBuilderTrait; pub use crate::api::builders::sample::ValueBuilderTrait; + #[zenoh_macros::unstable] pub use crate::api::sample::Attachment; + #[zenoh_macros::unstable] pub use crate::api::sample::Locality; pub use crate::api::sample::Sample; pub use crate::api::sample::SampleKind; @@ -212,7 +215,9 @@ pub mod publication { pub mod query { pub use crate::api::query::Mode; pub use crate::api::query::Reply; + #[zenoh_macros::unstable] pub use crate::api::query::ReplyKeyExpr; + #[zenoh_macros::unstable] pub use crate::api::query::REPLY_KEY_EXPR_ANY_SEL_PARAM; pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; } @@ -236,6 +241,7 @@ pub mod scouting { pub use crate::api::scouting::WhatAmI; } +#[cfg(feature = "unstable")] pub mod liveliness { pub use crate::api::liveliness::Liveliness; pub use crate::api::liveliness::LivelinessSubscriberBuilder; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index d26c146d4c..e8c7e8a3b4 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -50,11 +50,11 @@ pub(crate) mod common { #[zenoh_macros::unstable] pub use crate::api::sample::Locality; + #[cfg(not(feature = "unstable"))] + pub(crate) use crate::api::sample::Locality; #[zenoh_macros::unstable] pub use crate::api::sample::SourceInfo; pub use crate::api::sample::{Sample, SampleKind}; - #[cfg(not(feature = "unstable"))] - pub(crate) use crate::sample::Locality; pub use crate::api::publication::Priority; #[zenoh_macros::unstable] From 22ece82e0dff4f691bba8e9e235ad0b4007b07e6 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sat, 6 Apr 2024 16:23:11 +0200 Subject: [PATCH 139/598] priority import fix --- zenoh/src/api/builders/publication.rs | 1 + zenoh/src/api/builders/sample.rs | 2 +- zenoh/src/api/query.rs | 2 +- zenoh/src/api/queryable.rs | 2 +- zenoh/src/api/sample.rs | 2 +- zenoh/src/api/session.rs | 2 +- 6 files changed, 6 insertions(+), 5 deletions(-) diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index cead1427f0..20ceb086f9 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -13,6 +13,7 @@ use std::future::Ready; // Contributors: // ZettaScale Zenoh Team, // +use crate::api::publication::Priority; use crate::api::builders::sample::SampleBuilderTrait; use crate::api::builders::sample::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; use crate::api::key_expr::KeyExpr; diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index e89a13606b..838d67010e 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -26,8 +26,8 @@ use crate::api::sample::SampleKind; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; use crate::api::value::Value; +use crate::api::publication::Priority; use uhlc::Timestamp; -use zenoh_config::Priority; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index d5314bb204..59fd9cc9e8 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -23,7 +23,7 @@ use crate::prelude::*; use std::collections::HashMap; use std::future::Ready; use std::time::Duration; -use zenoh_config::Priority; +use crate::api::publication::Priority; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_keyexpr::OwnedKeyExpr; use zenoh_result::ZResult; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index f8f58e5e66..e1391968f0 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -33,7 +33,7 @@ use std::future::Ready; use std::ops::Deref; use std::sync::Arc; use uhlc::Timestamp; -use zenoh_config::Priority; +use crate::api::publication::Priority; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_protocol::{ core::{EntityId, WireExpr}, diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 8475e11ddf..6c0a49646e 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -23,7 +23,7 @@ pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; -use zenoh_config::Priority; +use crate::api::publication::Priority; use zenoh_protocol::core::CongestionControl; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::core::Timestamp; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 3e82bcd16e..9221233262 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -73,7 +73,7 @@ use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; use zenoh_config::Config; use zenoh_config::Notifier; -use zenoh_config::Priority; +use crate::api::publication::Priority; use zenoh_core::Resolvable; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; use zenoh_protocol::core::Reliability; From 84d42f4d0177e2a1690755345119a84ecec8a2f3 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sat, 6 Apr 2024 16:41:46 +0200 Subject: [PATCH 140/598] priority, endpoint fixes --- examples/examples/z_info.rs | 1 + zenoh/src/api/builders/publication.rs | 2 +- zenoh/src/api/builders/sample.rs | 2 +- zenoh/src/api/query.rs | 3 ++- zenoh/src/api/queryable.rs | 4 ++-- zenoh/src/api/sample.rs | 8 +++----- zenoh/src/api/session.rs | 2 +- zenoh/src/lib.rs | 4 ++-- zenoh/tests/connection_retry.rs | 2 +- zenoh/tests/matching.rs | 1 + zenoh/tests/unicity.rs | 1 + zenohd/src/main.rs | 3 ++- 12 files changed, 18 insertions(+), 15 deletions(-) diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index 1d047f9454..6a919d8d38 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -15,6 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; +use zenoh::config::ZenohId; #[tokio::main] async fn main() { diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index 20ceb086f9..eb60021dbd 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -13,10 +13,10 @@ use std::future::Ready; // Contributors: // ZettaScale Zenoh Team, // -use crate::api::publication::Priority; use crate::api::builders::sample::SampleBuilderTrait; use crate::api::builders::sample::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; use crate::api::key_expr::KeyExpr; +use crate::api::publication::Priority; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::Locality; diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 838d67010e..2af1a0a71c 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -17,6 +17,7 @@ use std::marker::PhantomData; use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; use crate::api::payload::Payload; +use crate::api::publication::Priority; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::QoS; @@ -26,7 +27,6 @@ use crate::api::sample::SampleKind; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; use crate::api::value::Value; -use crate::api::publication::Priority; use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 59fd9cc9e8..e9bedbe2e5 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -15,6 +15,7 @@ //! Query primitives. use crate::api::handlers::{locked, Callback, DefaultHandler}; use crate::api::key_expr::KeyExpr; +use crate::api::publication::Priority; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::QoSBuilder; @@ -23,9 +24,9 @@ use crate::prelude::*; use std::collections::HashMap; use std::future::Ready; use std::time::Duration; -use crate::api::publication::Priority; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_keyexpr::OwnedKeyExpr; +use zenoh_protocol::core::ZenohId; use zenoh_result::ZResult; /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index e1391968f0..590b88bfa5 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -13,11 +13,11 @@ // //! Queryable primitives. - use super::key_expr::KeyExpr; use crate::api::builders::sample::SampleBuilder; use crate::api::encoding::Encoding; use crate::api::handlers::{locked, DefaultHandler}; +use crate::api::publication::Priority; use crate::api::sample::QoSBuilder; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; @@ -33,8 +33,8 @@ use std::future::Ready; use std::ops::Deref; use std::sync::Arc; use uhlc::Timestamp; -use crate::api::publication::Priority; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh_protocol::core::ZenohId; use zenoh_protocol::{ core::{EntityId, WireExpr}, network::{response, Mapping, RequestId, Response, ResponseFinal}, diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 6c0a49646e..359af2a436 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -16,6 +16,7 @@ use crate::api::builders::sample::{QoSBuilderTrait, ValueBuilderTrait}; use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; +use crate::api::publication::Priority; use crate::api::value::Value; use crate::payload::Payload; #[zenoh_macros::unstable] @@ -23,7 +24,6 @@ pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; -use crate::api::publication::Priority; use zenoh_protocol::core::CongestionControl; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::core::Timestamp; @@ -154,10 +154,8 @@ pub struct SourceInfo { #[test] #[cfg(feature = "unstable")] fn source_info_stack_size() { - use crate::{ - api::sample::{SourceInfo, SourceSn}, - ZenohId, - }; + use crate::api::sample::{SourceInfo, SourceSn}; + use zenoh_protocol::core::ZenohId; assert_eq!(std::mem::size_of::(), 16); assert_eq!(std::mem::size_of::>(), 17); diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 9221233262..ae0593790e 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -27,6 +27,7 @@ use crate::api::payload::Payload; use crate::api::publication::MatchingListenerState; #[zenoh_macros::unstable] use crate::api::publication::MatchingStatus; +use crate::api::publication::Priority; use crate::api::query::GetBuilder; use crate::api::query::QueryState; use crate::api::query::Reply; @@ -73,7 +74,6 @@ use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; use zenoh_config::Config; use zenoh_config::Notifier; -use crate::api::publication::Priority; use zenoh_core::Resolvable; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; use zenoh_protocol::core::Reliability; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c83c3bb598..0fd2c1b0f9 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -258,8 +258,8 @@ pub mod runtime { pub mod config { pub use zenoh_config::{ - client, default, peer, Config, ModeDependentValue, PermissionsConf, PluginLoad, - ValidatedMap, + client, default, peer, Config, EndPoint, Locator, ModeDependentValue, PermissionsConf, + PluginLoad, ValidatedMap, ZenohId, }; } diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index 0a3ed01ce7..a62becfaa9 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -1,4 +1,4 @@ -use zenoh_config::{ConnectionRetryConf, ValidatedMap}; +use zenoh_config::{ConnectionRetryConf, ValidatedMap, EndPoint}; use zenoh::prelude::sync::*; diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index e56036f5de..6dd6835b7e 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -16,6 +16,7 @@ use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh_core::ztimeout; use zenoh_result::ZResult as Result; +use zenoh::config::Locator; const TIMEOUT: Duration = Duration::from_secs(60); const RECV_TIMEOUT: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 0033ed6468..374f773ff5 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -18,6 +18,7 @@ use tokio::runtime::Handle; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_core::ztimeout; +use zenoh::config::EndPoint; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 4faa10534c..781fc308df 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -15,10 +15,11 @@ use clap::Parser; use futures::future; use git_version::git_version; use std::collections::HashSet; +use zenoh::config::EndPoint; use zenoh::config::{Config, ModeDependentValue, PermissionsConf, PluginLoad, ValidatedMap}; use zenoh::plugins::PluginsManager; -use zenoh::prelude::{EndPoint, WhatAmI}; use zenoh::runtime::{AdminSpace, Runtime}; +use zenoh::scouting::WhatAmI; use zenoh::Result; const GIT_VERSION: &str = git_version!(prefix = "v", cargo_prefix = "v"); From 314e708c7cd829e97ddaf07b14e85084f6016eed Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 11:33:06 +0200 Subject: [PATCH 141/598] relude endpoint commented --- zenoh/src/lib.rs | 2 -- zenoh/src/prelude.rs | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 0fd2c1b0f9..c6b06259ec 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -83,8 +83,6 @@ mod api; mod net; use git_version::git_version; -#[cfg(feature = "unstable")] -use prelude::*; use zenoh_util::concat_enabled_features; /// A zenoh error. diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index e8c7e8a3b4..7087270c42 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -32,7 +32,7 @@ pub(crate) mod common { // }; // pub use zenoh_core::Resolve; - pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; + // pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; #[zenoh_macros::unstable] pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; From 0eafd2bc04027fd3d0fd8975e2bc694ad504c2c3 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 11:40:56 +0200 Subject: [PATCH 142/598] entityid commented in prelude --- examples/examples/z_get_liveliness.rs | 1 + examples/examples/z_liveliness.rs | 1 + examples/examples/z_pub_shm.rs | 1 + examples/examples/z_sub_liveliness.rs | 1 + zenoh/src/api/publication.rs | 4 ++++ zenoh/src/api/queryable.rs | 4 +++- zenoh/src/prelude.rs | 4 ++-- 7 files changed, 13 insertions(+), 3 deletions(-) diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 487f3c25d6..5e6fd06c84 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -14,6 +14,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_liveliness.rs b/examples/examples/z_liveliness.rs index 937868e091..2a93f50db8 100644 --- a/examples/examples/z_liveliness.rs +++ b/examples/examples/z_liveliness.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 2aadcf33de..bc239ebf41 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -14,6 +14,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::shm::SharedMemoryManager; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_sub_liveliness.rs b/examples/examples/z_sub_liveliness.rs index 50ba40c7ac..690299dbeb 100644 --- a/examples/examples/z_sub_liveliness.rs +++ b/examples/examples/z_sub_liveliness.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 98064a1b99..08f4586a1b 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -38,6 +38,10 @@ use std::task::{Context, Poll}; use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_keyexpr::keyexpr; pub use zenoh_protocol::core::CongestionControl; +#[zenoh_macros::unstable] +use zenoh_protocol::core::EntityGlobalId; +#[zenoh_macros::unstable] +use zenoh_protocol::core::EntityId; use zenoh_protocol::network::push::ext; use zenoh_protocol::network::Push; use zenoh_protocol::zenoh::Del; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 590b88bfa5..38ee61d5ff 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -13,10 +13,10 @@ // //! Queryable primitives. -use super::key_expr::KeyExpr; use crate::api::builders::sample::SampleBuilder; use crate::api::encoding::Encoding; use crate::api::handlers::{locked, DefaultHandler}; +use crate::api::key_expr::KeyExpr; use crate::api::publication::Priority; use crate::api::sample::QoSBuilder; #[cfg(feature = "unstable")] @@ -34,6 +34,8 @@ use std::ops::Deref; use std::sync::Arc; use uhlc::Timestamp; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +#[zenoh_macros::unstable] +use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::core::ZenohId; use zenoh_protocol::{ core::{EntityId, WireExpr}, diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 7087270c42..112127271c 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -33,8 +33,8 @@ pub(crate) mod common { // pub use zenoh_core::Resolve; // pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; - #[zenoh_macros::unstable] - pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; + // #[zenoh_macros::unstable] + // pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; pub use crate::config::{self, Config}; pub use crate::handlers::IntoHandler; From cfaef46a53cc721bebab2787346786ad453c58ad Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 11:52:58 +0200 Subject: [PATCH 143/598] config commented in prelude --- plugins/zenoh-plugin-rest/examples/z_serve_sse.rs | 1 + plugins/zenoh-plugin-storage-manager/tests/operations.rs | 4 ++-- plugins/zenoh-plugin-storage-manager/tests/wildcard.rs | 4 ++-- zenoh/src/api/publication.rs | 2 ++ zenoh/src/prelude.rs | 2 +- zenoh/tests/attachments.rs | 2 ++ zenoh/tests/connection_retry.rs | 2 +- zenoh/tests/events.rs | 3 ++- zenoh/tests/handler.rs | 2 ++ zenoh/tests/interceptors.rs | 2 +- zenoh/tests/liveliness.rs | 1 + zenoh/tests/matching.rs | 9 +++++---- zenoh/tests/session.rs | 1 + zenoh/tests/unicity.rs | 3 ++- 14 files changed, 25 insertions(+), 13 deletions(-) diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 6a278c4784..366c6b7638 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -13,6 +13,7 @@ // use clap::{arg, Command}; use std::time::Duration; +use zenoh::config::Config; use zenoh::key_expr::keyexpr; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index d3c6207496..1d16ec23ea 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -20,12 +20,12 @@ use std::str::FromStr; use std::thread::sleep; use async_std::task; -use zenoh::config::ValidatedMap; +use zenoh::config::{Config, ValidatedMap}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::session::Session; -use zenoh::{prelude::Config, time::Timestamp}; +use zenoh::time::Timestamp; use zenoh_core::zasync_executor_init; use zenoh_plugin_trait::Plugin; diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 1b40a83cd5..4d8e72d55f 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -21,12 +21,12 @@ use std::thread::sleep; // use std::collections::HashMap; use async_std::task; -use zenoh::config::ValidatedMap; +use zenoh::config::{Config, ValidatedMap}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::session::Session; -use zenoh::{prelude::Config, time::Timestamp}; +use zenoh::time::Timestamp; use zenoh_core::zasync_executor_init; use zenoh_plugin_trait::Plugin; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 08f4586a1b..9a80366ce0 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -1090,6 +1090,8 @@ impl Drop for MatchingListenerInner<'_> { #[cfg(test)] mod tests { + use zenoh_config::Config; + #[test] fn priority_from() { use super::Priority as APrio; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 112127271c..fe4d02825a 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -36,7 +36,7 @@ pub(crate) mod common { // #[zenoh_macros::unstable] // pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; - pub use crate::config::{self, Config}; + // pub use crate::config::{self, Config}; pub use crate::handlers::IntoHandler; pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::session::{Session, SessionDeclarations}; diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 7580984c8d..967397ea99 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -15,6 +15,7 @@ #[test] fn pubsub() { use zenoh::prelude::sync::*; + use zenoh_config::Config; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh @@ -62,6 +63,7 @@ fn pubsub() { #[test] fn queries() { use zenoh::{prelude::sync::*, sample::Attachment, sample::SampleBuilderTrait}; + use zenoh_config::Config; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index a62becfaa9..675b4eb879 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -1,4 +1,4 @@ -use zenoh_config::{ConnectionRetryConf, ValidatedMap, EndPoint}; +use zenoh_config::{Config, ConnectionRetryConf, EndPoint, ValidatedMap}; use zenoh::prelude::sync::*; diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 201f4941f9..380b3fcfbb 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -14,12 +14,13 @@ use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::query::Reply; +use zenoh_config::peer; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(10); async fn open_session(listen: &[&str], connect: &[&str]) -> Session { - let mut config = config::peer(); + let mut config = peer(); config.listen.endpoints = listen .iter() .map(|e| e.parse().unwrap()) diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index ceed15e2c3..8330c454ad 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -1,3 +1,5 @@ +use zenoh_config::Config; + // // Copyright (c) 2024 ZettaScale Technology // diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index a6eff19ec9..ecb6724e22 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use std::sync::{Arc, Mutex}; -use zenoh_config::ValidatedMap; +use zenoh_config::{Config, ValidatedMap}; use zenoh_core::zlock; struct IntervalCounter { diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 0e2870d808..4762c5cf91 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -13,6 +13,7 @@ // use std::time::Duration; use zenoh::prelude::r#async::*; +use zenoh_config as config; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index 6dd6835b7e..c678c423d0 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -13,10 +13,12 @@ // use std::str::FromStr; use std::time::Duration; +use zenoh::config::Locator; use zenoh::prelude::r#async::*; +use zenoh_config as config; +use zenoh_config::peer; use zenoh_core::ztimeout; use zenoh_result::ZResult as Result; -use zenoh::config::Locator; const TIMEOUT: Duration = Duration::from_secs(60); const RECV_TIMEOUT: Duration = Duration::from_secs(1); @@ -104,10 +106,9 @@ async fn zenoh_matching_status_any() -> Result<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_remote() -> Result<()> { use flume::RecvTimeoutError; + let session1 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); - let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); - - let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + let session2 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); let publisher1 = ztimeout!(session1 .declare_publisher("zenoh_matching_status_remote_test") diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 35f3dac2c6..bae00d37f2 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -16,6 +16,7 @@ use std::sync::Arc; use std::time::Duration; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh_config as config; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 374f773ff5..6ba59ef242 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -15,10 +15,11 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use tokio::runtime::Handle; +use zenoh::config::EndPoint; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh_config as config; use zenoh_core::ztimeout; -use zenoh::config::EndPoint; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); From af656d50810aec1ebfe24f2be186ef25c0abd457 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 11:59:55 +0200 Subject: [PATCH 144/598] into handler removed --- zenoh-ext/src/querying_subscriber.rs | 6 +++--- zenoh/src/api/liveliness.rs | 2 ++ zenoh/src/api/publication.rs | 2 +- zenoh/src/api/query.rs | 2 +- zenoh/src/api/queryable.rs | 4 ++-- zenoh/src/api/scouting.rs | 10 +++++----- zenoh/src/api/subscriber.rs | 2 +- zenoh/src/prelude.rs | 2 +- 8 files changed, 16 insertions(+), 14 deletions(-) diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index bdcab8f220..90491e58b9 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -17,7 +17,7 @@ use std::future::Ready; use std::mem::swap; use std::sync::{Arc, Mutex}; use std::time::Duration; -use zenoh::handlers::{locked, DefaultHandler}; +use zenoh::handlers::{locked, DefaultHandler, IntoHandler}; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; @@ -107,7 +107,7 @@ impl<'a, 'b, KeySpace> QueryingSubscriberBuilder<'a, 'b, KeySpace, DefaultHandle handler: Handler, ) -> QueryingSubscriberBuilder<'a, 'b, KeySpace, Handler> where - Handler: zenoh::prelude::IntoHandler<'static, Sample>, + Handler: IntoHandler<'static, Sample>, { let QueryingSubscriberBuilder { session, @@ -464,7 +464,7 @@ where handler: Handler, ) -> FetchingSubscriberBuilder<'a, 'b, KeySpace, Handler, Fetch, TryIntoSample> where - Handler: zenoh::prelude::IntoHandler<'static, Sample>, + Handler: IntoHandler<'static, Sample>, { let FetchingSubscriberBuilder { session, diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 1f7d03ddca..2177a6ce5b 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -15,7 +15,9 @@ //! Liveliness primitives. //! //! see [`Liveliness`] +use crate::api::handlers::IntoHandler; use zenoh_core::Resolve; + #[zenoh_macros::unstable] use { crate::{ diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 9a80366ce0..3f3fedc3c4 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -875,7 +875,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { #[zenoh_macros::unstable] pub fn with(self, handler: Handler) -> MatchingListenerBuilder<'a, Handler> where - Handler: crate::prelude::IntoHandler<'static, MatchingStatus>, + Handler: IntoHandler<'static, MatchingStatus>, { let MatchingListenerBuilder { publisher, diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index e9bedbe2e5..e8729e3803 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -13,7 +13,7 @@ // //! Query primitives. -use crate::api::handlers::{locked, Callback, DefaultHandler}; +use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; use crate::api::publication::Priority; #[zenoh_macros::unstable] diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 38ee61d5ff..f1ac55413d 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -15,7 +15,7 @@ //! Queryable primitives. use crate::api::builders::sample::SampleBuilder; use crate::api::encoding::Encoding; -use crate::api::handlers::{locked, DefaultHandler}; +use crate::api::handlers::{locked, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; use crate::api::publication::Priority; use crate::api::sample::QoSBuilder; @@ -755,7 +755,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { #[inline] pub fn with(self, handler: Handler) -> QueryableBuilder<'a, 'b, Handler> where - Handler: crate::prelude::IntoHandler<'static, Query>, + Handler: IntoHandler<'static, Query>, { let QueryableBuilder { session, diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 56f8d4c1a4..bcc1482f1b 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::api::handlers::{locked, Callback, DefaultHandler}; +use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::net::runtime::{orchestrator::Loop, Runtime}; use futures::StreamExt; use std::{fmt, future::Ready, net::SocketAddr, ops::Deref}; @@ -139,7 +139,7 @@ impl ScoutBuilder { #[inline] pub fn with(self, handler: Handler) -> ScoutBuilder where - Handler: crate::prelude::IntoHandler<'static, Hello>, + Handler: IntoHandler<'static, Hello>, { let ScoutBuilder { what, @@ -156,7 +156,7 @@ impl ScoutBuilder { impl Resolvable for ScoutBuilder where - Handler: crate::prelude::IntoHandler<'static, Hello> + Send, + Handler: IntoHandler<'static, Hello> + Send, Handler::Handler: Send, { type To = ZResult>; @@ -164,7 +164,7 @@ where impl SyncResolve for ScoutBuilder where - Handler: crate::prelude::IntoHandler<'static, Hello> + Send, + Handler: IntoHandler<'static, Hello> + Send, Handler::Handler: Send, { fn res_sync(self) -> ::To { @@ -175,7 +175,7 @@ where impl AsyncResolve for ScoutBuilder where - Handler: crate::prelude::IntoHandler<'static, Hello> + Send, + Handler: IntoHandler<'static, Hello> + Send, Handler::Handler: Send, { type Future = Ready; diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 39e863b1fa..34df5569f4 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -309,7 +309,7 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { #[inline] pub fn with(self, handler: Handler) -> SubscriberBuilder<'a, 'b, Handler> where - Handler: crate::prelude::IntoHandler<'static, Sample>, + Handler: IntoHandler<'static, Sample>, { let SubscriberBuilder { session, diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index fe4d02825a..2ba784d06f 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -37,7 +37,7 @@ pub(crate) mod common { // pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; // pub use crate::config::{self, Config}; - pub use crate::handlers::IntoHandler; + // pub use crate::handlers::IntoHandler; pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::session::{Session, SessionDeclarations}; From 045823959b7e1e90337a23bcb608e17518d7a18d Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 12:18:29 +0200 Subject: [PATCH 145/598] api.rs to mod.rs --- zenoh/src/{api.rs => api/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{api.rs => api/mod.rs} (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api/mod.rs similarity index 100% rename from zenoh/src/api.rs rename to zenoh/src/api/mod.rs From 4dfb118a09dc987747053890d72bc5edaedfd1f4 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 12:39:45 +0200 Subject: [PATCH 146/598] parameters commented in prelude --- examples/examples/z_get.rs | 1 + plugins/zenoh-plugin-rest/src/lib.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/lib.rs | 1 + .../src/replica/align_queryable.rs | 1 + zenoh-ext/src/publication_cache.rs | 1 + zenoh-ext/src/querying_subscriber.rs | 1 + zenoh/src/api/key_expr.rs | 4 ++-- zenoh/src/api/plugins.rs | 2 +- zenoh/src/api/query.rs | 1 + zenoh/src/api/queryable.rs | 2 ++ zenoh/src/net/runtime/adminspace.rs | 4 ++-- zenoh/src/prelude.rs | 2 +- 12 files changed, 15 insertions(+), 7 deletions(-) diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 8735ae8daa..4ac31df3e4 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -15,6 +15,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh::selector::Selector; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index e2434c644c..a9dbdce912 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -35,7 +35,7 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; -use zenoh::selector::TIME_RANGE_KEY; +use zenoh::selector::{Parameters, Selector, TIME_RANGE_KEY}; use zenoh::session::Session; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, zerror, ZResult}; diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 7d679ef37d..12fbede21d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -31,6 +31,7 @@ use zenoh::key_expr::keyexpr; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::sync::*; use zenoh::runtime::Runtime; +use zenoh::selector::Selector; use zenoh::session::Session; use zenoh_backend_traits::config::ConfigDiff; use zenoh_backend_traits::config::PluginConfig; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index d73b9b2b6d..89769bea1c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -21,6 +21,7 @@ use std::str::FromStr; use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; +use zenoh::selector::Selector; use zenoh::session::Session; use zenoh::time::Timestamp; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index e1a974b6ff..ba1491aa6f 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -18,6 +18,7 @@ use std::future::Ready; use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; +use zenoh::selector::Parameters; use zenoh::session::SessionRef; use zenoh::subscriber::FlumeSubscriber; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 90491e58b9..8b98483de3 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -22,6 +22,7 @@ use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; use zenoh::sample::SampleBuilder; +use zenoh::selector::Selector; use zenoh::session::SessionRef; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index bed9fd5b95..1f381486a0 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -57,8 +57,8 @@ use zenoh_protocol::{ use zenoh_result::ZResult; use crate::{ - api::session::Session, api::session::Undeclarable, net::primitives::Primitives, - prelude::Selector, + api::selector::Selector, api::session::Session, api::session::Undeclarable, + net::primitives::Primitives, }; #[derive(Clone, Debug)] diff --git a/zenoh/src/api/plugins.rs b/zenoh/src/api/plugins.rs index 3684324cf2..36cde5ba34 100644 --- a/zenoh/src/api/plugins.rs +++ b/zenoh/src/api/plugins.rs @@ -14,7 +14,7 @@ //! `zenohd`'s plugin system. For more details, consult the [detailed documentation](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Plugins/Zenoh%20Plugins.md). -use crate::{net::runtime::Runtime, prelude::Selector}; +use crate::{api::selector::Selector, net::runtime::Runtime}; use zenoh_core::zconfigurable; use zenoh_plugin_trait::{ diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index e8729e3803..76ee714828 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -19,6 +19,7 @@ use crate::api::publication::Priority; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::QoSBuilder; +use crate::api::selector::Selector; use crate::api::session::Session; use crate::prelude::*; use std::collections::HashMap; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index f1ac55413d..7319575d89 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -21,6 +21,8 @@ use crate::api::publication::Priority; use crate::api::sample::QoSBuilder; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; +use crate::api::selector::Parameters; +use crate::api::selector::Selector; use crate::api::session::SessionRef; use crate::api::session::Undeclarable; use crate::api::Id; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index a87fea5f08..be992621e6 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -530,7 +530,7 @@ fn router_data(context: &AdminContext, query: Query) { }); #[cfg(feature = "stats")] { - let stats = crate::prelude::Parameters::decode(&query.selector()) + let stats = crate::api::selector::Parameters::decode(&query.selector()) .any(|(k, v)| k.as_ref() == "_stats" && v != "false"); if stats { json.as_object_mut().unwrap().insert( @@ -561,7 +561,7 @@ fn router_data(context: &AdminContext, query: Query) { #[cfg(feature = "stats")] { - let stats = crate::prelude::Parameters::decode(&query.selector()) + let stats = crate::api::selector::Parameters::decode(&query.selector()) .any(|(k, v)| k.as_ref() == "_stats" && v != "false"); if stats { json.as_object_mut().unwrap().insert( diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 2ba784d06f..265f4f2c48 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -38,7 +38,7 @@ pub(crate) mod common { // pub use crate::config::{self, Config}; // pub use crate::handlers::IntoHandler; - pub use crate::selector::{Parameter, Parameters, Selector}; + // pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::session::{Session, SessionDeclarations}; pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; From 2f0c7b405f883cf926c969a5a56cc6d9b673737b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 13:02:25 +0200 Subject: [PATCH 147/598] session commented in api --- examples/examples/z_forward.rs | 1 + examples/examples/z_info.rs | 3 ++- examples/examples/z_ping.rs | 1 + examples/examples/z_pong.rs | 1 + examples/examples/z_pub.rs | 1 + examples/examples/z_pub_thr.rs | 1 + examples/examples/z_pull.rs | 5 ++++- examples/examples/z_queryable.rs | 1 + examples/examples/z_storage.rs | 1 + examples/examples/z_sub.rs | 1 + examples/examples/z_sub_thr.rs | 1 + plugins/zenoh-plugin-example/src/lib.rs | 1 + plugins/zenoh-plugin-rest/examples/z_serve_sse.rs | 1 + plugins/zenoh-plugin-rest/src/lib.rs | 2 +- .../src/replica/align_queryable.rs | 1 + plugins/zenoh-plugin-storage-manager/src/replica/mod.rs | 1 + .../zenoh-plugin-storage-manager/src/replica/storage.rs | 1 + zenoh-ext/examples/z_query_sub.rs | 1 + zenoh-ext/src/group.rs | 1 + zenoh-ext/src/publication_cache.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/api/liveliness.rs | 2 +- zenoh/src/api/publication.rs | 2 ++ zenoh/src/prelude.rs | 2 +- zenoh/tests/attachments.rs | 8 ++++++-- zenoh/tests/events.rs | 1 + zenoh/tests/handler.rs | 1 + zenoh/tests/interceptors.rs | 1 + zenoh/tests/liveliness.rs | 2 ++ zenoh/tests/matching.rs | 4 ++++ zenoh/tests/routing.rs | 1 + zenoh/tests/session.rs | 1 + zenoh/tests/unicity.rs | 1 + 33 files changed, 47 insertions(+), 9 deletions(-) diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index 349690c8a8..a4c3cb4ced 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -15,6 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; use zenoh_ext::SubscriberForward; diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index 6a919d8d38..c63e5974e9 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -13,9 +13,10 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::config::ZenohId; use zenoh::prelude::r#async::*; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; -use zenoh::config::ZenohId; #[tokio::main] async fn main() { diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index a989b34482..6070fb1e94 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -17,6 +17,7 @@ use zenoh::config::Config; use zenoh::key_expr::keyexpr; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index 60f6db0b68..7446456938 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -16,6 +16,7 @@ use zenoh::config::Config; use zenoh::key_expr::keyexpr; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 176e991fff..10209b04e6 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -16,6 +16,7 @@ use std::time::Duration; use zenoh::config::Config; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 4354ad2e68..b23e3ce1bd 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -16,6 +16,7 @@ use clap::Parser; use std::convert::TryInto; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 3d4ff30e2b..6a07de8358 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,7 +13,10 @@ // use clap::Parser; use std::time::Duration; -use zenoh::{config::Config, handlers::RingBuffer, key_expr::KeyExpr, prelude::r#async::*}; +use zenoh::{ + config::Config, handlers::RingBuffer, key_expr::KeyExpr, prelude::r#async::*, + session::SessionDeclarations, +}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 025f3cc1cc..ac58d9f094 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -15,6 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 50d84001a8..4c6ed0ede5 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -19,6 +19,7 @@ use std::collections::HashMap; use zenoh::config::Config; use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::prelude::r#async::*; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index f7e232f240..df77429356 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -15,6 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index 0a8426edf0..b4b6ecd0e5 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -15,6 +15,7 @@ use clap::Parser; use std::time::Instant; use zenoh::config::Config; use zenoh::prelude::sync::*; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; struct Stats { diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 9d25f582fb..a1e2af6574 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -26,6 +26,7 @@ use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::runtime::Runtime; +use zenoh::session::SessionDeclarations; use zenoh_core::zlock; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, ZResult}; diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 366c6b7638..28627999f3 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -17,6 +17,7 @@ use zenoh::config::Config; use zenoh::key_expr::keyexpr; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; +use zenoh::session::SessionDeclarations; const HTML: &str = r#"

diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index a9dbdce912..c69f83794b 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -36,7 +36,7 @@ use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; use zenoh::selector::{Parameters, Selector, TIME_RANGE_KEY}; -use zenoh::session::Session; +use zenoh::session::{Session, SessionDeclarations}; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, zerror, ZResult}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 89769bea1c..792b0f351f 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -23,6 +23,7 @@ use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::selector::Selector; use zenoh::session::Session; +use zenoh::session::SessionDeclarations; use zenoh::time::Timestamp; pub struct AlignQueryable { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 5289fc47af..c24d9b1fef 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -30,6 +30,7 @@ use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::session::Session; +use zenoh::session::SessionDeclarations; use zenoh::time::Timestamp; use zenoh_backend_traits::config::{ReplicaConfig, StorageConfig}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 007b21083b..88b21f0a0c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -32,6 +32,7 @@ use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; use zenoh::sample::SampleBuilder; use zenoh::sample::{Sample, SampleKind}; +use zenoh::session::SessionDeclarations; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; use zenoh::{session::Session, Result as ZResult}; diff --git a/zenoh-ext/examples/z_query_sub.rs b/zenoh-ext/examples/z_query_sub.rs index d88519789b..61ea0eac92 100644 --- a/zenoh-ext/examples/z_query_sub.rs +++ b/zenoh-ext/examples/z_query_sub.rs @@ -16,6 +16,7 @@ use clap::Command; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::query::ReplyKeyExpr; +use zenoh::session::SessionDeclarations; use zenoh_ext::*; #[tokio::main] diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 7ede485784..5e9b9e66f3 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -33,6 +33,7 @@ use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; use zenoh::session::Session; +use zenoh::session::SessionDeclarations; use zenoh::Error as ZError; use zenoh::Result as ZResult; use zenoh_result::bail; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index ba1491aa6f..85d3157d3c 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -19,7 +19,7 @@ use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; use zenoh::selector::Parameters; -use zenoh::session::SessionRef; +use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::FlumeSubscriber; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_result::{bail, ZResult}; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 8b98483de3..e58786628f 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -23,7 +23,7 @@ use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; use zenoh::sample::SampleBuilder; use zenoh::selector::Selector; -use zenoh::session::SessionRef; +use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 2177a6ce5b..634aedd1fb 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -16,8 +16,8 @@ //! //! see [`Liveliness`] use crate::api::handlers::IntoHandler; +use crate::api::session::Session; use zenoh_core::Resolve; - #[zenoh_macros::unstable] use { crate::{ diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 3f3fedc3c4..db90c1dc22 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -1092,6 +1092,8 @@ impl Drop for MatchingListenerInner<'_> { mod tests { use zenoh_config::Config; + use crate::api::session::SessionDeclarations; + #[test] fn priority_from() { use super::Priority as APrio; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 265f4f2c48..9863f42a5c 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -39,7 +39,7 @@ pub(crate) mod common { // pub use crate::config::{self, Config}; // pub use crate::handlers::IntoHandler; // pub use crate::selector::{Parameter, Parameters, Selector}; - pub use crate::session::{Session, SessionDeclarations}; + // pub use crate::session::{Session, SessionDeclarations}; pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 967397ea99..073d5537bd 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,7 +14,7 @@ #[cfg(feature = "unstable")] #[test] fn pubsub() { - use zenoh::prelude::sync::*; + use zenoh::{prelude::sync::*, session::SessionDeclarations}; use zenoh_config::Config; let zenoh = zenoh::open(Config::default()).res().unwrap(); @@ -62,7 +62,11 @@ fn pubsub() { #[cfg(feature = "unstable")] #[test] fn queries() { - use zenoh::{prelude::sync::*, sample::Attachment, sample::SampleBuilderTrait}; + use zenoh::{ + prelude::sync::*, + sample::{Attachment, SampleBuilderTrait}, + session::SessionDeclarations, + }; use zenoh_config::Config; let zenoh = zenoh::open(Config::default()).res().unwrap(); diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 380b3fcfbb..aafdfdf7d5 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -14,6 +14,7 @@ use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::query::Reply; +use zenoh::session::{Session, SessionDeclarations}; use zenoh_config::peer; use zenoh_core::ztimeout; diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index 8330c454ad..fdb8e225fa 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -1,3 +1,4 @@ +use zenoh::session::SessionDeclarations; use zenoh_config::Config; // diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index ecb6724e22..1ff1f49651 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::sync::{Arc, Mutex}; +use zenoh::session::SessionDeclarations; use zenoh_config::{Config, ValidatedMap}; use zenoh_core::zlock; diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 4762c5cf91..0af0b64164 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -22,6 +22,8 @@ const SLEEP: Duration = Duration::from_secs(1); #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_liveliness() { + use zenoh::session::SessionDeclarations; + let mut c1 = config::peer(); c1.listen .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index c678c423d0..304af977d2 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -15,6 +15,7 @@ use std::str::FromStr; use std::time::Duration; use zenoh::config::Locator; use zenoh::prelude::r#async::*; +use zenoh::session::Session; use zenoh_config as config; use zenoh_config::peer; use zenoh_core::ztimeout; @@ -45,6 +46,7 @@ async fn create_session_pair(locator: &str) -> (Session, Session) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_any() -> Result<()> { use flume::RecvTimeoutError; + use zenoh::session::SessionDeclarations; let (session1, session2) = create_session_pair("tcp/127.0.0.1:18001").await; @@ -106,6 +108,7 @@ async fn zenoh_matching_status_any() -> Result<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_remote() -> Result<()> { use flume::RecvTimeoutError; + use zenoh::session::SessionDeclarations; let session1 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); let session2 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); @@ -169,6 +172,7 @@ async fn zenoh_matching_status_remote() -> Result<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_local() -> Result<()> { use flume::RecvTimeoutError; + use zenoh::session::SessionDeclarations; let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 9803d62c4e..b0d789312a 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -19,6 +19,7 @@ use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; use zenoh::sample::QoSBuilderTrait; +use zenoh::session::{Session, SessionDeclarations}; use zenoh::Result; use zenoh_core::ztimeout; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index bae00d37f2..54337729ae 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -16,6 +16,7 @@ use std::sync::Arc; use std::time::Duration; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::session::{Session, SessionDeclarations}; use zenoh_config as config; use zenoh_core::ztimeout; diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 6ba59ef242..f37f6cb852 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -18,6 +18,7 @@ use tokio::runtime::Handle; use zenoh::config::EndPoint; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::session::{Session, SessionDeclarations}; use zenoh_config as config; use zenoh_core::ztimeout; From bd717db83cc2f9d1cb051090db8de904348bf0c9 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 13:05:57 +0200 Subject: [PATCH 148/598] query commented in prelude --- examples/examples/z_get.rs | 1 + zenoh/src/api/liveliness.rs | 2 ++ zenoh/src/prelude.rs | 2 +- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 4ac31df3e4..6326ddf6c6 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -15,6 +15,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh::query::QueryTarget; use zenoh::selector::Selector; use zenoh_examples::CommonArgs; diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 634aedd1fb..17cb2246b0 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -16,8 +16,10 @@ //! //! see [`Liveliness`] use crate::api::handlers::IntoHandler; +use crate::api::query::{QueryConsolidation, QueryTarget}; use crate::api::session::Session; use zenoh_core::Resolve; + #[zenoh_macros::unstable] use { crate::{ diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 9863f42a5c..14ccee0252 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -41,7 +41,7 @@ pub(crate) mod common { // pub use crate::selector::{Parameter, Parameters, Selector}; // pub use crate::session::{Session, SessionDeclarations}; - pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; + // pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; pub use crate::api::encoding::Encoding; pub use crate::api::value::Value; From 42fd23fb2ca416eef028aee0db8f039d87e3df90 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 13:08:59 +0200 Subject: [PATCH 149/598] encoding commented in prelude --- plugins/zenoh-plugin-rest/src/lib.rs | 1 + zenoh/src/api/publication.rs | 1 + zenoh/src/api/query.rs | 1 + zenoh/src/prelude.rs | 2 +- 4 files changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index c69f83794b..5ca7169dfd 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -29,6 +29,7 @@ use std::sync::Arc; use tide::http::Mime; use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; +use zenoh::encoding::Encoding; use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index db90c1dc22..6a52e3bde2 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -17,6 +17,7 @@ use crate::api::builders::publication::{ PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherDeleteBuilder, PublisherPutBuilder, }; +use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; #[zenoh_macros::unstable] use crate::api::sample::Attachment; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 76ee714828..20ebfd2f70 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -13,6 +13,7 @@ // //! Query primitives. +use crate::api::encoding::Encoding; use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; use crate::api::publication::Priority; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 14ccee0252..53eed06f1f 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -43,7 +43,7 @@ pub(crate) mod common { // pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; - pub use crate::api::encoding::Encoding; + // pub use crate::api::encoding::Encoding; pub use crate::api::value::Value; /// The encoding of a zenoh `Value`. pub use crate::payload::{Deserialize, Payload, Serialize}; From 0faa5c21c3098f456cbf395a6ce073ceaffd58ac Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 13:15:08 +0200 Subject: [PATCH 150/598] value commented in prelude --- plugins/zenoh-plugin-rest/src/lib.rs | 1 + plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs | 2 +- .../zenoh-plugin-storage-manager/src/replica/align_queryable.rs | 1 + plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs | 1 + zenoh/src/api/query.rs | 1 + zenoh/src/api/queryable.rs | 1 + zenoh/src/prelude.rs | 2 +- zenoh/tests/session.rs | 1 + 8 files changed, 8 insertions(+), 2 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 5ca7169dfd..0b855a54fc 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -38,6 +38,7 @@ use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; use zenoh::selector::{Parameters, Selector, TIME_RANGE_KEY}; use zenoh::session::{Session, SessionDeclarations}; +use zenoh::value::Value; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, zerror, ZResult}; diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index d9f330ea8c..0924279cb2 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -16,8 +16,8 @@ use async_trait::async_trait; use std::collections::HashMap; use std::sync::Arc; use zenoh::key_expr::OwnedKeyExpr; -use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; +use zenoh::value::Value; use zenoh_backend_traits::config::{StorageConfig, VolumeConfig}; use zenoh_backend_traits::*; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 792b0f351f..00d198bef6 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -25,6 +25,7 @@ use zenoh::selector::Selector; use zenoh::session::Session; use zenoh::session::SessionDeclarations; use zenoh::time::Timestamp; +use zenoh::value::Value; pub struct AlignQueryable { session: Arc, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 0553710851..0d750c8810 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -24,6 +24,7 @@ use zenoh::prelude::r#async::*; use zenoh::sample::SampleBuilder; use zenoh::session::Session; use zenoh::time::Timestamp; +use zenoh::value::Value; pub struct Aligner { session: Arc, diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 20ebfd2f70..5f881053a2 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -22,6 +22,7 @@ use crate::api::sample::Attachment; use crate::api::sample::QoSBuilder; use crate::api::selector::Selector; use crate::api::session::Session; +use crate::api::value::Value; use crate::prelude::*; use std::collections::HashMap; use std::future::Ready; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 7319575d89..db41c6a815 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -25,6 +25,7 @@ use crate::api::selector::Parameters; use crate::api::selector::Selector; use crate::api::session::SessionRef; use crate::api::session::Undeclarable; +use crate::api::value::Value; use crate::api::Id; use crate::net::primitives::Primitives; use crate::prelude::*; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 53eed06f1f..75f96ea1ed 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -44,7 +44,7 @@ pub(crate) mod common { // pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; // pub use crate::api::encoding::Encoding; - pub use crate::api::value::Value; + // pub use crate::api::value::Value; /// The encoding of a zenoh `Value`. pub use crate::payload::{Deserialize, Payload, Serialize}; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 54337729ae..f36775bca2 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -17,6 +17,7 @@ use std::time::Duration; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::session::{Session, SessionDeclarations}; +use zenoh::value::Value; use zenoh_config as config; use zenoh_core::ztimeout; From 33e3d7eaecac8531d1f651e5f67f9cd6a0ceb8b4 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 13:17:40 +0200 Subject: [PATCH 151/598] payload commented in prelude --- examples/examples/z_ping.rs | 1 + examples/examples/z_pub_thr.rs | 1 + plugins/zenoh-plugin-rest/src/lib.rs | 2 +- zenoh/src/api/publication.rs | 1 + zenoh/src/api/query.rs | 1 + zenoh/src/api/queryable.rs | 1 + zenoh/src/prelude.rs | 2 +- 7 files changed, 7 insertions(+), 2 deletions(-) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 6070fb1e94..ef9bc08617 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -15,6 +15,7 @@ use clap::Parser; use std::time::{Duration, Instant}; use zenoh::config::Config; use zenoh::key_expr::keyexpr; +use zenoh::payload::Payload; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; use zenoh::session::SessionDeclarations; diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index b23e3ce1bd..75e2d72fbd 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -14,6 +14,7 @@ use clap::Parser; use std::convert::TryInto; +use zenoh::payload::Payload; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; use zenoh::session::SessionDeclarations; diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 0b855a54fc..83085fd449 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -31,7 +31,7 @@ use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; use zenoh::encoding::Encoding; use zenoh::key_expr::{keyexpr, KeyExpr}; -use zenoh::payload::StringOrBase64; +use zenoh::payload::{Payload, StringOrBase64}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 6a52e3bde2..83f813eb84 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -19,6 +19,7 @@ use crate::api::builders::publication::{ }; use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; +use crate::api::payload::Payload; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 5f881053a2..bc1cacd769 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -16,6 +16,7 @@ use crate::api::encoding::Encoding; use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; +use crate::api::payload::Payload; use crate::api::publication::Priority; #[zenoh_macros::unstable] use crate::api::sample::Attachment; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index db41c6a815..3ab8264beb 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -17,6 +17,7 @@ use crate::api::builders::sample::SampleBuilder; use crate::api::encoding::Encoding; use crate::api::handlers::{locked, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; +use crate::api::payload::Payload; use crate::api::publication::Priority; use crate::api::sample::QoSBuilder; #[cfg(feature = "unstable")] diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 75f96ea1ed..443ec47545 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -46,7 +46,7 @@ pub(crate) mod common { // pub use crate::api::encoding::Encoding; // pub use crate::api::value::Value; /// The encoding of a zenoh `Value`. - pub use crate::payload::{Deserialize, Payload, Serialize}; + // pub use crate::payload::{Deserialize, Payload, Serialize}; #[zenoh_macros::unstable] pub use crate::api::sample::Locality; From 0ededb147fed2bef0baabb146bf765844b0622b2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 13:30:06 +0200 Subject: [PATCH 152/598] locality commented in prelude --- plugins/zenoh-plugin-storage-manager/src/replica/mod.rs | 1 + zenoh-ext/src/publication_cache.rs | 1 + zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/api/liveliness.rs | 1 + zenoh/src/api/publication.rs | 4 +++- zenoh/src/api/query.rs | 1 + zenoh/src/api/queryable.rs | 1 + zenoh/src/prelude.rs | 6 ++---- zenoh/tests/matching.rs | 6 +++--- 9 files changed, 14 insertions(+), 9 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index c24d9b1fef..7192e3ab7e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -29,6 +29,7 @@ use urlencoding::encode; use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; +use zenoh::sample::Locality; use zenoh::session::Session; use zenoh::session::SessionDeclarations; use zenoh::time::Timestamp; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 85d3157d3c..733509f619 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -18,6 +18,7 @@ use std::future::Ready; use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; +use zenoh::sample::Locality; use zenoh::selector::Parameters; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::FlumeSubscriber; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index e58786628f..23ba054ded 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -21,7 +21,7 @@ use zenoh::handlers::{locked, DefaultHandler, IntoHandler}; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample::SampleBuilder; +use zenoh::sample::{Locality, SampleBuilder}; use zenoh::selector::Selector; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::{Reliability, Subscriber}; diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 17cb2246b0..5c151315ab 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -17,6 +17,7 @@ //! see [`Liveliness`] use crate::api::handlers::IntoHandler; use crate::api::query::{QueryConsolidation, QueryTarget}; +use crate::api::sample::Locality; use crate::api::session::Session; use zenoh_core::Resolve; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 83f813eb84..3d9df2b5bb 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -22,11 +22,13 @@ use crate::api::key_expr::KeyExpr; use crate::api::payload::Payload; #[zenoh_macros::unstable] use crate::api::sample::Attachment; +use crate::api::sample::Locality; +#[zenoh_macros::unstable] +use crate::api::sample::SourceInfo; use crate::api::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; use crate::api::session::SessionRef; use crate::api::session::Undeclarable; use crate::net::primitives::Primitives; -use crate::prelude::*; #[cfg(feature = "unstable")] use crate::{ api::handlers::{Callback, DefaultHandler, IntoHandler}, diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index bc1cacd769..38e8a12b88 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -20,6 +20,7 @@ use crate::api::payload::Payload; use crate::api::publication::Priority; #[zenoh_macros::unstable] use crate::api::sample::Attachment; +use crate::api::sample::Locality; use crate::api::sample::QoSBuilder; use crate::api::selector::Selector; use crate::api::session::Session; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 3ab8264beb..bec6b5e29b 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -19,6 +19,7 @@ use crate::api::handlers::{locked, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; use crate::api::payload::Payload; use crate::api::publication::Priority; +use crate::api::sample::Locality; use crate::api::sample::QoSBuilder; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 443ec47545..66b1b07adf 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -48,10 +48,8 @@ pub(crate) mod common { /// The encoding of a zenoh `Value`. // pub use crate::payload::{Deserialize, Payload, Serialize}; - #[zenoh_macros::unstable] - pub use crate::api::sample::Locality; - #[cfg(not(feature = "unstable"))] - pub(crate) use crate::api::sample::Locality; + // #[zenoh_macros::unstable] + // pub use crate::api::sample::Locality; #[zenoh_macros::unstable] pub use crate::api::sample::SourceInfo; pub use crate::api::sample::{Sample, SampleKind}; diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index 304af977d2..b22d8dd1c8 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -46,7 +46,7 @@ async fn create_session_pair(locator: &str) -> (Session, Session) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_any() -> Result<()> { use flume::RecvTimeoutError; - use zenoh::session::SessionDeclarations; + use zenoh::{sample::Locality, session::SessionDeclarations}; let (session1, session2) = create_session_pair("tcp/127.0.0.1:18001").await; @@ -108,7 +108,7 @@ async fn zenoh_matching_status_any() -> Result<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_remote() -> Result<()> { use flume::RecvTimeoutError; - use zenoh::session::SessionDeclarations; + use zenoh::{sample::Locality, session::SessionDeclarations}; let session1 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); let session2 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); @@ -172,7 +172,7 @@ async fn zenoh_matching_status_remote() -> Result<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_local() -> Result<()> { use flume::RecvTimeoutError; - use zenoh::session::SessionDeclarations; + use zenoh::{sample::Locality, session::SessionDeclarations}; let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); From dd6c1dd009c9f47d4abb4d7145ca98b93d907743 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 13:40:38 +0200 Subject: [PATCH 153/598] sourceinfo commented out --- zenoh/src/api/liveliness.rs | 1 + zenoh/src/api/query.rs | 1 + zenoh/src/prelude.rs | 4 ++-- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 5c151315ab..ad4cd3f7ec 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -18,6 +18,7 @@ use crate::api::handlers::IntoHandler; use crate::api::query::{QueryConsolidation, QueryTarget}; use crate::api::sample::Locality; +use crate::api::sample::SourceInfo; use crate::api::session::Session; use zenoh_core::Resolve; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 38e8a12b88..5ef4c14ce7 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -22,6 +22,7 @@ use crate::api::publication::Priority; use crate::api::sample::Attachment; use crate::api::sample::Locality; use crate::api::sample::QoSBuilder; +use crate::api::sample::SourceInfo; use crate::api::selector::Selector; use crate::api::session::Session; use crate::api::value::Value; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 66b1b07adf..77f4087d3f 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -50,8 +50,8 @@ pub(crate) mod common { // #[zenoh_macros::unstable] // pub use crate::api::sample::Locality; - #[zenoh_macros::unstable] - pub use crate::api::sample::SourceInfo; + // #[zenoh_macros::unstable] + // pub use crate::api::sample::SourceInfo; pub use crate::api::sample::{Sample, SampleKind}; pub use crate::api::publication::Priority; From 9c1a1f9a0be7adfbe92adb96fa225c2f69e47c66 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 14:31:05 +0200 Subject: [PATCH 154/598] relative use --- zenoh/src/api/admin.rs | 12 ++-- zenoh/src/api/encoding.rs | 2 +- zenoh/src/api/handlers.rs | 2 +- zenoh/src/api/info.rs | 2 +- zenoh/src/api/key_expr.rs | 10 +-- zenoh/src/api/liveliness.rs | 45 ++++--------- zenoh/src/api/plugins.rs | 4 +- zenoh/src/api/publication.rs | 64 +++++++++--------- zenoh/src/api/query.rs | 35 +++++----- zenoh/src/api/queryable.rs | 54 +++++++-------- zenoh/src/api/sample.rs | 25 ++++--- zenoh/src/api/scouting.rs | 8 +-- zenoh/src/api/selector.rs | 10 ++- zenoh/src/api/session.rs | 125 ++++++++++++++--------------------- zenoh/src/api/subscriber.rs | 31 ++++----- zenoh/src/api/time.rs | 1 - zenoh/src/api/value.rs | 3 +- zenoh/src/lib.rs | 10 ++- 18 files changed, 200 insertions(+), 243 deletions(-) diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index 917afdc18f..74c913b419 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -11,10 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::api::sample::Locality; -use crate::{ - api::encoding::Encoding, api::key_expr::KeyExpr, api::payload::Payload, api::queryable::Query, - api::sample::DataInfo, api::sample::SampleKind, api::session::Session, +use super::{ + encoding::Encoding, + key_expr::KeyExpr, + payload::Payload, + queryable::Query, + sample::Locality, + sample::{DataInfo, SampleKind}, + session::Session, }; use std::{ collections::hash_map::DefaultHasher, diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index d9fa725ed5..7518671eed 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::payload::Payload; +use super::payload::Payload; use phf::phf_map; use std::{borrow::Cow, convert::Infallible, fmt, str::FromStr}; use zenoh_buffers::{ZBuf, ZSlice}; diff --git a/zenoh/src/api/handlers.rs b/zenoh/src/api/handlers.rs index 6aecda34b9..7610fe43d8 100644 --- a/zenoh/src/api/handlers.rs +++ b/zenoh/src/api/handlers.rs @@ -13,7 +13,7 @@ // //! Callback handler trait. -use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; +use super::session::API_DATA_RECEPTION_CHANNEL_SIZE; use std::sync::{Arc, Mutex, Weak}; use zenoh_collections::RingBuffer as RingBufferInner; use zenoh_result::ZResult; diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index 1f7a903ba4..dbcad9c50c 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -13,7 +13,7 @@ // //! Tools to access information about the current zenoh [`Session`](crate::Session). -use crate::api::session::SessionRef; +use super::session::SessionRef; use std::future::Ready; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::{WhatAmI, ZenohId}; diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 1f381486a0..0eb7515181 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -43,6 +43,11 @@ //! [`kedefine`] also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, //! as the [`keformat`] and [`kewrite`] macros will be able to tell you if you're attempting to set fields of the format that do not exist. +use super::{ + selector::Selector, + session::{Session, Undeclarable}, +}; +use crate::net::primitives::Primitives; use std::{ convert::{TryFrom, TryInto}, future::Ready, @@ -56,11 +61,6 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; -use crate::{ - api::selector::Selector, api::session::Session, api::session::Undeclarable, - net::primitives::Primitives, -}; - #[derive(Clone, Debug)] pub(crate) enum KeyExprInner<'a> { Borrowed(&'a keyexpr), diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index ad4cd3f7ec..c2a075ee52 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -15,38 +15,21 @@ //! Liveliness primitives. //! //! see [`Liveliness`] -use crate::api::handlers::IntoHandler; -use crate::api::query::{QueryConsolidation, QueryTarget}; -use crate::api::sample::Locality; -use crate::api::sample::SourceInfo; -use crate::api::session::Session; -use zenoh_core::Resolve; - -#[zenoh_macros::unstable] -use { - crate::{ - api::handlers::locked, - api::handlers::DefaultHandler, - api::key_expr::KeyExpr, - api::session::SessionRef, - api::session::Undeclarable, - api::subscriber::{Subscriber, SubscriberInner}, - prelude::*, - }, - crate::{api::query::Reply, api::Id}, - std::convert::TryInto, - std::future::Ready, - std::sync::Arc, - std::time::Duration, - zenoh_config::unwrap_or_default, - zenoh_core::AsyncResolve, - zenoh_core::Resolvable, - zenoh_core::Result as ZResult, - zenoh_core::SyncResolve, - zenoh_keyexpr::keyexpr, - zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo, - zenoh_protocol::network::request, +use super::{ + handlers::{locked, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + query::{QueryConsolidation, QueryTarget, Reply}, + sample::{Locality, Sample, SourceInfo}, + session::{Session, SessionRef, Undeclarable}, + subscriber::{Subscriber, SubscriberInner}, + Id, }; +use std::{convert::TryInto, future::Ready, sync::Arc, time::Duration}; +use zenoh_config::unwrap_or_default; +use zenoh_core::Resolve; +use zenoh_core::{AsyncResolve, Resolvable, Result as ZResult, SyncResolve}; +use zenoh_keyexpr::keyexpr; +use zenoh_protocol::network::{declare::subscriber::ext::SubscriberInfo, request}; #[zenoh_macros::unstable] pub(crate) static PREFIX_LIVELINESS: &str = crate::net::routing::PREFIX_LIVELINESS; diff --git a/zenoh/src/api/plugins.rs b/zenoh/src/api/plugins.rs index 36cde5ba34..23f8b2b811 100644 --- a/zenoh/src/api/plugins.rs +++ b/zenoh/src/api/plugins.rs @@ -14,9 +14,9 @@ //! `zenohd`'s plugin system. For more details, consult the [detailed documentation](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Plugins/Zenoh%20Plugins.md). -use crate::{api::selector::Selector, net::runtime::Runtime}; +use super::selector::Selector; +use crate::net::runtime::Runtime; use zenoh_core::zconfigurable; - use zenoh_plugin_trait::{ Plugin, PluginControl, PluginInstance, PluginReport, PluginStatusRec, StructVersion, }; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 3d9df2b5bb..87ba666dda 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -13,46 +13,42 @@ // //! Publishing primitives. -use crate::api::builders::publication::{ - PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherDeleteBuilder, - PublisherPutBuilder, +use super::{ + builders::publication::{ + PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, + PublisherDeleteBuilder, PublisherPutBuilder, + }, + encoding::Encoding, + key_expr::KeyExpr, + payload::Payload, + sample::{DataInfo, Locality, QoS, Sample, SampleFields, SampleKind}, + session::{SessionRef, Undeclarable}, }; -use crate::api::encoding::Encoding; -use crate::api::key_expr::KeyExpr; -use crate::api::payload::Payload; -#[zenoh_macros::unstable] -use crate::api::sample::Attachment; -use crate::api::sample::Locality; -#[zenoh_macros::unstable] -use crate::api::sample::SourceInfo; -use crate::api::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; -use crate::api::session::SessionRef; -use crate::api::session::Undeclarable; use crate::net::primitives::Primitives; -#[cfg(feature = "unstable")] -use crate::{ - api::handlers::{Callback, DefaultHandler, IntoHandler}, - api::Id, -}; use futures::Sink; -use std::convert::TryFrom; -use std::future::Ready; -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + convert::TryFrom, + future::Ready, + pin::Pin, + task::{Context, Poll}, +}; use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_keyexpr::keyexpr; -pub use zenoh_protocol::core::CongestionControl; -#[zenoh_macros::unstable] -use zenoh_protocol::core::EntityGlobalId; +use zenoh_protocol::{ + core::CongestionControl, + network::{push::ext, Push}, + zenoh::{Del, PushBody, Put}, +}; +use zenoh_result::{Error, ZResult}; + #[zenoh_macros::unstable] -use zenoh_protocol::core::EntityId; -use zenoh_protocol::network::push::ext; -use zenoh_protocol::network::Push; -use zenoh_protocol::zenoh::Del; -use zenoh_protocol::zenoh::PushBody; -use zenoh_protocol::zenoh::Put; -use zenoh_result::Error; -use zenoh_result::ZResult; +use { + crate::api::handlers::{Callback, DefaultHandler, IntoHandler}, + crate::api::sample::{Attachment, SourceInfo}, + crate::api::Id, + zenoh_protocol::core::EntityGlobalId, + zenoh_protocol::core::EntityId, +}; #[zenoh_macros::unstable] #[derive(Clone)] diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 5ef4c14ce7..567bfe2c64 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -13,28 +13,27 @@ // //! Query primitives. -use crate::api::encoding::Encoding; -use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; -use crate::api::key_expr::KeyExpr; -use crate::api::payload::Payload; -use crate::api::publication::Priority; -#[zenoh_macros::unstable] -use crate::api::sample::Attachment; -use crate::api::sample::Locality; -use crate::api::sample::QoSBuilder; -use crate::api::sample::SourceInfo; -use crate::api::selector::Selector; -use crate::api::session::Session; -use crate::api::value::Value; -use crate::prelude::*; -use std::collections::HashMap; -use std::future::Ready; -use std::time::Duration; +use super::{ + builders::sample::{QoSBuilderTrait, SampleBuilderTrait, ValueBuilderTrait}, + encoding::Encoding, + handlers::{locked, Callback, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + payload::Payload, + publication::Priority, + sample::{Locality, QoSBuilder, Sample, SourceInfo}, + selector::Selector, + session::Session, + value::Value, +}; +use std::{collections::HashMap, future::Ready, time::Duration}; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_keyexpr::OwnedKeyExpr; -use zenoh_protocol::core::ZenohId; +use zenoh_protocol::core::{CongestionControl, ZenohId}; use zenoh_result::ZResult; +#[zenoh_macros::unstable] +use crate::api::sample::Attachment; + /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index bec6b5e29b..a60adfe74f 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -13,42 +13,42 @@ // //! Queryable primitives. -use crate::api::builders::sample::SampleBuilder; -use crate::api::encoding::Encoding; -use crate::api::handlers::{locked, DefaultHandler, IntoHandler}; -use crate::api::key_expr::KeyExpr; -use crate::api::payload::Payload; -use crate::api::publication::Priority; -use crate::api::sample::Locality; -use crate::api::sample::QoSBuilder; -#[cfg(feature = "unstable")] -use crate::api::sample::SourceInfo; -use crate::api::selector::Parameters; -use crate::api::selector::Selector; -use crate::api::session::SessionRef; -use crate::api::session::Undeclarable; -use crate::api::value::Value; -use crate::api::Id; +use super::{ + builders::sample::{ + QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, + ValueBuilderTrait, + }, + encoding::Encoding, + handlers::{locked, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + payload::Payload, + publication::Priority, + sample::{Locality, QoSBuilder, Sample, SampleKind}, + selector::{Parameters, Selector}, + session::{SessionRef, Undeclarable}, + value::Value, + Id, +}; use crate::net::primitives::Primitives; -use crate::prelude::*; -#[cfg(feature = "unstable")] -use crate::{api::query::ReplyKeyExpr, api::sample::Attachment}; -use std::fmt; -use std::future::Ready; -use std::ops::Deref; -use std::sync::Arc; +use std::{fmt, future::Ready, ops::Deref, sync::Arc}; use uhlc::Timestamp; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; -#[zenoh_macros::unstable] -use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::core::ZenohId; use zenoh_protocol::{ - core::{EntityId, WireExpr}, + core::{CongestionControl, EntityId, WireExpr, ZenohId}, network::{response, Mapping, RequestId, Response, ResponseFinal}, zenoh::{self, reply::ReplyBody, Del, Put, ResponseBody}, }; use zenoh_result::ZResult; +#[zenoh_macros::unstable] +use { + super::{ + query::ReplyKeyExpr, + sample::{Attachment, SourceInfo}, + }, + zenoh_protocol::core::EntityGlobalId, +}; + pub(crate) struct QueryInner { /// The key expression of this Query. pub(crate) key_expr: KeyExpr<'static>, diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 359af2a436..a51264e1a4 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -13,21 +13,24 @@ // //! Sample primitives -use crate::api::builders::sample::{QoSBuilderTrait, ValueBuilderTrait}; -use crate::api::encoding::Encoding; -use crate::api::key_expr::KeyExpr; -use crate::api::publication::Priority; -use crate::api::value::Value; -use crate::payload::Payload; +use super::{ + builders::sample::{QoSBuilderTrait, ValueBuilderTrait}, + encoding::Encoding, + key_expr::KeyExpr, + payload::Payload, + publication::Priority, + value::Value, +}; +use std::{convert::TryFrom, fmt}; +use zenoh_protocol::{ + core::{CongestionControl, EntityGlobalId, Timestamp}, + network::declare::ext::QoSType, +}; + #[zenoh_macros::unstable] pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; #[zenoh_macros::unstable] use serde::Serialize; -use std::{convert::TryFrom, fmt}; -use zenoh_protocol::core::CongestionControl; -use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::core::Timestamp; -use zenoh_protocol::network::declare::ext::QoSType; pub type SourceSn = u64; diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index bcc1482f1b..c15e9955a3 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -17,15 +17,9 @@ use futures::StreamExt; use std::{fmt, future::Ready, net::SocketAddr, ops::Deref}; use tokio::net::UdpSocket; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -use zenoh_protocol::core::WhatAmIMatcher; +use zenoh_protocol::{core::WhatAmIMatcher, scouting::Hello}; use zenoh_result::ZResult; -/// Constants and helpers for zenoh `whatami` flags. -pub use zenoh_protocol::core::WhatAmI; - -/// A zenoh Hello message. -pub use zenoh_protocol::scouting::Hello; - /// A builder for initializing a [`Scout`]. /// /// # Examples diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 51b8296634..144b4ee8a0 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -14,12 +14,7 @@ //! [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries -use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; -use zenoh_result::ZResult; -pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; - -use crate::{api::key_expr::KeyExpr, api::queryable::Query}; - +use super::{key_expr::KeyExpr, queryable::Query}; use std::{ borrow::{Borrow, Cow}, collections::HashMap, @@ -27,6 +22,9 @@ use std::{ hash::Hash, str::FromStr, }; +use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; +use zenoh_result::ZResult; +pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; /// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index ae0593790e..880bf4405f 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -11,85 +11,50 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::api::admin; -use crate::api::builders::publication::PublicationBuilder; -use crate::api::builders::publication::PublicationBuilderDelete; -use crate::api::builders::publication::PublicationBuilderPut; -use crate::api::encoding::Encoding; -use crate::api::handlers::{Callback, DefaultHandler}; -use crate::api::info::SessionInfo; -use crate::api::key_expr::KeyExpr; -use crate::api::key_expr::KeyExprInner; -#[zenoh_macros::unstable] -use crate::api::liveliness::{Liveliness, LivelinessTokenState}; -use crate::api::payload::Payload; -#[zenoh_macros::unstable] -use crate::api::publication::MatchingListenerState; -#[zenoh_macros::unstable] -use crate::api::publication::MatchingStatus; -use crate::api::publication::Priority; -use crate::api::query::GetBuilder; -use crate::api::query::QueryState; -use crate::api::query::Reply; -use crate::api::queryable::Query; -use crate::api::queryable::QueryInner; -use crate::api::queryable::QueryableState; -#[cfg(feature = "unstable")] -use crate::api::sample::Attachment; -use crate::api::sample::DataInfo; -use crate::api::sample::DataInfoIntoSample; -use crate::api::sample::Locality; -use crate::api::sample::QoS; -use crate::api::sample::Sample; -use crate::api::sample::SampleKind; -#[cfg(feature = "unstable")] -use crate::api::sample::SourceInfo; -use crate::api::selector::Parameters; -use crate::api::selector::Selector; -use crate::api::selector::TIME_RANGE_KEY; -use crate::api::subscriber::SubscriberBuilder; -use crate::api::subscriber::SubscriberState; -use crate::api::value::Value; -use crate::api::Id; -use crate::net::primitives::Primitives; -use crate::net::routing::dispatcher::face::Face; -use crate::net::runtime::Runtime; -use crate::publication::*; -use crate::query::*; -use crate::queryable::*; +use super::{ + admin, + builders::publication::{ + PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, + }, + encoding::Encoding, + handlers::{Callback, DefaultHandler}, + info::SessionInfo, + key_expr::{KeyExpr, KeyExprInner}, + payload::Payload, + publication::{Priority, Publisher}, + query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, + queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, + sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, + selector::{Parameters, Selector, TIME_RANGE_KEY}, + subscriber::{SubscriberBuilder, SubscriberState}, + value::Value, + Id, +}; +use crate::net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}; use log::{error, trace, warn}; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::convert::TryInto; -use std::fmt; -use std::future::Ready; -use std::ops::Deref; -use std::sync::atomic::{AtomicU16, Ordering}; -use std::sync::Arc; -use std::sync::RwLock; -use std::time::Duration; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, + fmt, + future::Ready, + ops::Deref, + sync::{ + atomic::{AtomicU16, Ordering}, + Arc, RwLock, + }, + time::Duration, +}; use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; -use zenoh_config::unwrap_or_default; -use zenoh_config::Config; -use zenoh_config::Notifier; -use zenoh_core::Resolvable; -use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; -use zenoh_protocol::core::Reliability; -#[cfg(feature = "unstable")] -use zenoh_protocol::network::declare::SubscriberId; -#[cfg(feature = "unstable")] -use zenoh_protocol::network::ext; -use zenoh_protocol::network::AtomicRequestId; -use zenoh_protocol::network::RequestId; -use zenoh_protocol::zenoh::reply::ReplyBody; -use zenoh_protocol::zenoh::Del; -use zenoh_protocol::zenoh::Put; +use zenoh_config::{unwrap_or_default, Config, Notifier}; +use zenoh_core::{ + zconfigurable, zread, Resolvable, Resolve, ResolveClosure, ResolveFuture, SyncResolve, +}; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, - AtomicExprId, CongestionControl, ExprId, WireExpr, ZenohId, EMPTY_EXPR_ID, + AtomicExprId, CongestionControl, ExprId, Reliability, WireExpr, ZenohId, EMPTY_EXPR_ID, }, network::{ declare::{ @@ -98,16 +63,28 @@ use zenoh_protocol::{ DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, request::{self, ext::TargetType, Request}, - Mapping, Push, Response, ResponseFinal, + AtomicRequestId, Mapping, Push, RequestId, Response, ResponseFinal, }, zenoh::{ query::{self, ext::QueryBodyType, Consolidation}, - PushBody, RequestBody, ResponseBody, + reply::ReplyBody, + Del, PushBody, Put, RequestBody, ResponseBody, }, }; use zenoh_result::ZResult; use zenoh_util::core::AsyncResolve; +#[zenoh_macros::unstable] +use { + super::{ + liveliness::{Liveliness, LivelinessTokenState}, + publication::{MatchingListenerState, MatchingStatus}, + sample::{Attachment, SourceInfo}, + }, + zenoh_protocol::network::declare::SubscriberId, + zenoh_protocol::network::ext, +}; + zconfigurable! { pub(crate) static ref API_DATA_RECEPTION_CHANNEL_SIZE: usize = 256; pub(crate) static ref API_QUERY_RECEPTION_CHANNEL_SIZE: usize = 256; diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 34df5569f4..7ad0160ae3 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -13,24 +13,25 @@ // //! Subscribing primitives. -use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; -use crate::api::key_expr::KeyExpr; -use crate::api::sample::Locality; -use crate::api::sample::Sample; -use crate::api::session::Undeclarable; -use crate::api::Id; -use crate::{api::session::SessionRef, Result as ZResult}; -use std::fmt; -use std::future::Ready; -use std::ops::{Deref, DerefMut}; -use std::sync::Arc; +use super::{ + handlers::{locked, Callback, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + sample::{Locality, Sample}, + session::{SessionRef, Undeclarable}, + Id, +}; +use std::{ + fmt, + future::Ready, + ops::{Deref, DerefMut}, + sync::Arc, +}; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_protocol::{core::Reliability, network::declare::subscriber::ext::SubscriberInfo}; +use zenoh_result::ZResult; + #[cfg(feature = "unstable")] use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; - -/// The kind of reliability. -pub use zenoh_protocol::core::Reliability; pub(crate) struct SubscriberState { pub(crate) id: Id, diff --git a/zenoh/src/api/time.rs b/zenoh/src/api/time.rs index cbdabe3a7e..5d0d06765d 100644 --- a/zenoh/src/api/time.rs +++ b/zenoh/src/api/time.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // use std::convert::TryFrom; - use zenoh_protocol::core::{Timestamp, TimestampId}; /// Generates a reception [`Timestamp`] with id=0x01. diff --git a/zenoh/src/api/value.rs b/zenoh/src/api/value.rs index f75abd4241..60586ad040 100644 --- a/zenoh/src/api/value.rs +++ b/zenoh/src/api/value.rs @@ -13,8 +13,7 @@ // //! Value primitives. -use crate::api::builders::sample::ValueBuilderTrait; -use crate::{api::encoding::Encoding, payload::Payload}; +use super::{builders::sample::ValueBuilderTrait, encoding::Encoding, payload::Payload}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c6b06259ec..c6e890cb27 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -196,18 +196,19 @@ pub mod selector { pub mod subscriber { pub use crate::api::subscriber::FlumeSubscriber; - pub use crate::api::subscriber::Reliability; pub use crate::api::subscriber::Subscriber; pub use crate::api::subscriber::SubscriberBuilder; + /// The kind of reliability. + pub use zenoh_protocol::core::Reliability; } pub mod publication { pub use crate::api::builders::publication::PublisherBuilder; - pub use crate::api::publication::CongestionControl; pub use crate::api::publication::Priority; pub use crate::api::publication::Publisher; #[zenoh_macros::unstable] pub use crate::api::publication::PublisherDeclarations; + pub use zenoh_protocol::core::CongestionControl; } pub mod query { @@ -236,7 +237,10 @@ pub mod handlers { pub mod scouting { pub use crate::api::scouting::scout; pub use crate::api::scouting::ScoutBuilder; - pub use crate::api::scouting::WhatAmI; + /// Constants and helpers for zenoh `whatami` flags. + pub use zenoh_protocol::core::WhatAmI; + /// A zenoh Hello message. + pub use zenoh_protocol::scouting::Hello; } #[cfg(feature = "unstable")] From d0f87047d943b02138467f9ac7e9531bb20d301e Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 14:38:39 +0200 Subject: [PATCH 155/598] sample commented in prelude --- examples/examples/z_storage.rs | 1 + plugins/zenoh-plugin-example/src/lib.rs | 1 + plugins/zenoh-plugin-rest/src/lib.rs | 1 + .../src/replica/align_queryable.rs | 1 + plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs | 2 +- plugins/zenoh-plugin-storage-manager/tests/operations.rs | 1 + plugins/zenoh-plugin-storage-manager/tests/wildcard.rs | 1 + zenoh-ext/src/publication_cache.rs | 3 +-- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh-ext/src/subscriber_ext.rs | 2 +- zenoh/src/api/publication.rs | 6 +++--- zenoh/src/prelude.rs | 3 +-- zenoh/tests/events.rs | 1 + zenoh/tests/liveliness.rs | 2 +- zenoh/tests/session.rs | 1 + 15 files changed, 17 insertions(+), 11 deletions(-) diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 4c6ed0ede5..07f54ce5ff 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -19,6 +19,7 @@ use std::collections::HashMap; use zenoh::config::Config; use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::prelude::r#async::*; +use zenoh::sample::{Sample, SampleKind}; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index a1e2af6574..6e9d0a917d 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -26,6 +26,7 @@ use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::runtime::Runtime; +use zenoh::sample::Sample; use zenoh::session::SessionDeclarations; use zenoh_core::zlock; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 83085fd449..6465d74efd 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -36,6 +36,7 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; +use zenoh::sample::{Sample, SampleKind}; use zenoh::selector::{Parameters, Selector, TIME_RANGE_KEY}; use zenoh::session::{Session, SessionDeclarations}; use zenoh::value::Value; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 00d198bef6..e33b0c519d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -21,6 +21,7 @@ use std::str::FromStr; use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; +use zenoh::sample::Sample; use zenoh::selector::Selector; use zenoh::session::Session; use zenoh::session::SessionDeclarations; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 0d750c8810..466b415d73 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -21,7 +21,7 @@ use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample::SampleBuilder; +use zenoh::sample::{Sample, SampleBuilder}; use zenoh::session::Session; use zenoh::time::Timestamp; use zenoh::value::Value; diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 1d16ec23ea..bd612bec87 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -24,6 +24,7 @@ use zenoh::config::{Config, ValidatedMap}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; +use zenoh::sample::Sample; use zenoh::session::Session; use zenoh::time::Timestamp; use zenoh_core::zasync_executor_init; diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 4d8e72d55f..054be54ce8 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -25,6 +25,7 @@ use zenoh::config::{Config, ValidatedMap}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; +use zenoh::sample::Sample; use zenoh::session::Session; use zenoh::time::Timestamp; use zenoh_core::zasync_executor_init; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 733509f619..25311ca647 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -16,9 +16,8 @@ use std::collections::{HashMap, VecDeque}; use std::convert::TryInto; use std::future::Ready; use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; -use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; -use zenoh::sample::Locality; +use zenoh::sample::{Locality, Sample}; use zenoh::selector::Parameters; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::FlumeSubscriber; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 23ba054ded..34b0ee9bb5 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -21,7 +21,7 @@ use zenoh::handlers::{locked, DefaultHandler, IntoHandler}; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample::{Locality, SampleBuilder}; +use zenoh::sample::{Locality, Sample, SampleBuilder}; use zenoh::selector::Selector; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::{Reliability, Subscriber}; diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 7d77fac05b..c758f910c2 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -19,8 +19,8 @@ use zenoh::sample::Locality; use zenoh::Result as ZResult; use zenoh::{ liveliness::LivelinessSubscriberBuilder, - prelude::Sample, query::{QueryConsolidation, QueryTarget}, + sample::Sample, subscriber::{Reliability, Subscriber, SubscriberBuilder}, }; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 87ba666dda..2c3d21eec4 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -1090,9 +1090,9 @@ impl Drop for MatchingListenerInner<'_> { #[cfg(test)] mod tests { + use crate::api::{sample::SampleKind, session::SessionDeclarations}; use zenoh_config::Config; - - use crate::api::session::SessionDeclarations; + use zenoh_core::SyncResolve; #[test] fn priority_from() { @@ -1120,7 +1120,7 @@ mod tests { #[test] fn sample_kind_integrity_in_publication() { - use crate::{api::session::open, prelude::sync::*}; + use crate::api::session::open; const KEY_EXPR: &str = "test/sample_kind_integrity/publication"; const VALUE: &str = "zenoh"; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 77f4087d3f..0ca952bc16 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -52,8 +52,7 @@ pub(crate) mod common { // pub use crate::api::sample::Locality; // #[zenoh_macros::unstable] // pub use crate::api::sample::SourceInfo; - pub use crate::api::sample::{Sample, SampleKind}; - + // pub use crate::api::sample::{Sample, SampleKind}; pub use crate::api::publication::Priority; #[zenoh_macros::unstable] pub use crate::api::publication::PublisherDeclarations; diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index aafdfdf7d5..8b5c79bb97 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -14,6 +14,7 @@ use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::query::Reply; +use zenoh::sample::SampleKind; use zenoh::session::{Session, SessionDeclarations}; use zenoh_config::peer; use zenoh_core::ztimeout; diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 0af0b64164..b672227ab9 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -22,7 +22,7 @@ const SLEEP: Duration = Duration::from_secs(1); #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_liveliness() { - use zenoh::session::SessionDeclarations; + use zenoh::{sample::SampleKind, session::SessionDeclarations}; let mut c1 = config::peer(); c1.listen diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index f36775bca2..44354b5ddb 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -16,6 +16,7 @@ use std::sync::Arc; use std::time::Duration; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::sample::SampleKind; use zenoh::session::{Session, SessionDeclarations}; use zenoh::value::Value; use zenoh_config as config; From 118b8c08ea74d44c4725d6eee550237fc9861446 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 14:39:55 +0200 Subject: [PATCH 156/598] priority commented in prelude --- examples/examples/z_pub_thr.rs | 2 +- zenoh-ext/src/group.rs | 1 + zenoh/src/prelude.rs | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 75e2d72fbd..8450855956 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -16,7 +16,7 @@ use clap::Parser; use std::convert::TryInto; use zenoh::payload::Payload; use zenoh::prelude::sync::*; -use zenoh::publication::CongestionControl; +use zenoh::publication::{CongestionControl, Priority}; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 5e9b9e66f3..60c55fdc2f 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -30,6 +30,7 @@ use zenoh::key_expr::KeyExpr; use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::PayloadReader; use zenoh::prelude::r#async::*; +use zenoh::publication::Priority; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; use zenoh::session::Session; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 0ca952bc16..242476898a 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -53,7 +53,7 @@ pub(crate) mod common { // #[zenoh_macros::unstable] // pub use crate::api::sample::SourceInfo; // pub use crate::api::sample::{Sample, SampleKind}; - pub use crate::api::publication::Priority; + // pub use crate::api::publication::Priority; #[zenoh_macros::unstable] pub use crate::api::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; From 0406c5f60bf2c9dc18898c935493532d693640e2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 14:43:41 +0200 Subject: [PATCH 157/598] congestion control commented in prelude --- zenoh/src/prelude.rs | 7 +++---- zenoh/tests/qos.rs | 1 + zenoh/tests/routing.rs | 1 + zenoh/tests/session.rs | 2 ++ zenoh/tests/unicity.rs | 2 ++ 5 files changed, 9 insertions(+), 4 deletions(-) diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 242476898a..76e41fe579 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -54,10 +54,9 @@ pub(crate) mod common { // pub use crate::api::sample::SourceInfo; // pub use crate::api::sample::{Sample, SampleKind}; // pub use crate::api::publication::Priority; - #[zenoh_macros::unstable] - pub use crate::api::publication::PublisherDeclarations; - pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - + // #[zenoh_macros::unstable] + // pub use crate::api::publication::PublisherDeclarations; + // pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; pub use crate::api::builders::sample::{ QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index b9f3ab3945..e11fcf4e22 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -13,6 +13,7 @@ // use std::time::Duration; use zenoh::prelude::r#async::*; +use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::{publication::Priority, session::SessionDeclarations}; use zenoh_core::ztimeout; diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index b0d789312a..98c2f002f7 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -18,6 +18,7 @@ use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; +use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::session::{Session, SessionDeclarations}; use zenoh::Result; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 44354b5ddb..313b30e141 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -16,8 +16,10 @@ use std::sync::Arc; use std::time::Duration; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::publication::CongestionControl; use zenoh::sample::SampleKind; use zenoh::session::{Session, SessionDeclarations}; +use zenoh::subscriber::Reliability; use zenoh::value::Value; use zenoh_config as config; use zenoh_core::ztimeout; diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index f37f6cb852..1c76b1f8fc 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -11,6 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // +use config::WhatAmI; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; @@ -18,6 +19,7 @@ use tokio::runtime::Handle; use zenoh::config::EndPoint; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::publication::CongestionControl; use zenoh::session::{Session, SessionDeclarations}; use zenoh_config as config; use zenoh_core::ztimeout; From 77b5759e0866ad301b338ff88bb187e905ddff31 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 15:14:54 +0200 Subject: [PATCH 158/598] builder traits removed from prelude --- examples/examples/z_get.rs | 1 + examples/examples/z_ping.rs | 1 + examples/examples/z_pong.rs | 1 + examples/examples/z_pub_thr.rs | 1 + plugins/zenoh-plugin-rest/examples/z_serve_sse.rs | 1 + plugins/zenoh-plugin-rest/src/lib.rs | 2 +- .../src/replica/align_queryable.rs | 2 ++ .../zenoh-plugin-storage-manager/src/replica/aligner.rs | 2 +- .../zenoh-plugin-storage-manager/src/replica/storage.rs | 4 ++-- zenoh-ext/src/group.rs | 1 + zenoh-ext/src/querying_subscriber.rs | 3 +-- zenoh/src/api/query.rs | 9 ++++++--- zenoh/src/api/queryable.rs | 6 ++---- zenoh/src/api/session.rs | 5 +++-- zenoh/src/prelude.rs | 6 +++--- zenoh/tests/attachments.rs | 2 +- zenoh/tests/handler.rs | 2 +- zenoh/tests/session.rs | 2 +- zenoh/tests/unicity.rs | 1 + 19 files changed, 31 insertions(+), 21 deletions(-) diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 6326ddf6c6..67a393c61f 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -16,6 +16,7 @@ use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::query::QueryTarget; +use zenoh::sample::ValueBuilderTrait; use zenoh::selector::Selector; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index ef9bc08617..c9cdd0635b 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -18,6 +18,7 @@ use zenoh::key_expr::keyexpr; use zenoh::payload::Payload; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index 7446456938..ea992ab5bc 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -16,6 +16,7 @@ use zenoh::config::Config; use zenoh::key_expr::keyexpr; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 8450855956..d8b94c88b0 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -17,6 +17,7 @@ use std::convert::TryInto; use zenoh::payload::Payload; use zenoh::prelude::sync::*; use zenoh::publication::{CongestionControl, Priority}; +use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 28627999f3..c7d22988e9 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -17,6 +17,7 @@ use zenoh::config::Config; use zenoh::key_expr::keyexpr; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; +use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; const HTML: &str = r#" diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 6465d74efd..ddd866efcb 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -36,7 +36,7 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; -use zenoh::sample::{Sample, SampleKind}; +use zenoh::sample::{Sample, SampleKind, ValueBuilderTrait}; use zenoh::selector::{Parameters, Selector, TIME_RANGE_KEY}; use zenoh::session::{Session, SessionDeclarations}; use zenoh::value::Value; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index e33b0c519d..581e93cf6b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -22,6 +22,8 @@ use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::sample::Sample; +use zenoh::sample::TimestampBuilderTrait; +use zenoh::sample::ValueBuilderTrait; use zenoh::selector::Selector; use zenoh::session::Session; use zenoh::session::SessionDeclarations; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 466b415d73..61d5d074e4 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -21,7 +21,7 @@ use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample::{Sample, SampleBuilder}; +use zenoh::sample::{Sample, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; use zenoh::session::Session; use zenoh::time::Timestamp; use zenoh::value::Value; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 88b21f0a0c..c568835831 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -30,8 +30,8 @@ use zenoh::key_expr::KeyExpr; use zenoh::key_expr::OwnedKeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; -use zenoh::sample::SampleBuilder; -use zenoh::sample::{Sample, SampleKind}; +use zenoh::sample::{Sample, SampleKind, TimestampBuilderTrait}; +use zenoh::sample::{SampleBuilder, ValueBuilderTrait}; use zenoh::session::SessionDeclarations; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 60c55fdc2f..12e349f299 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -33,6 +33,7 @@ use zenoh::prelude::r#async::*; use zenoh::publication::Priority; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; +use zenoh::sample::QoSBuilderTrait; use zenoh::session::Session; use zenoh::session::SessionDeclarations; use zenoh::Error as ZError; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 34b0ee9bb5..d5ed3e9987 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -19,9 +19,8 @@ use std::sync::{Arc, Mutex}; use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler, IntoHandler}; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample::{Locality, Sample, SampleBuilder}; +use zenoh::sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}; use zenoh::selector::Selector; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::{Reliability, Subscriber}; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 567bfe2c64..8b0f0c9f6c 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -14,13 +14,13 @@ //! Query primitives. use super::{ - builders::sample::{QoSBuilderTrait, SampleBuilderTrait, ValueBuilderTrait}, + builders::sample::{QoSBuilderTrait, ValueBuilderTrait}, encoding::Encoding, handlers::{locked, Callback, DefaultHandler, IntoHandler}, key_expr::KeyExpr, payload::Payload, publication::Priority, - sample::{Locality, QoSBuilder, Sample, SourceInfo}, + sample::{Locality, QoSBuilder, Sample}, selector::Selector, session::Session, value::Value, @@ -32,7 +32,10 @@ use zenoh_protocol::core::{CongestionControl, ZenohId}; use zenoh_result::ZResult; #[zenoh_macros::unstable] -use crate::api::sample::Attachment; +use super::{ + builders::sample::SampleBuilderTrait, + sample::{Attachment, SourceInfo}, +}; /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index a60adfe74f..c966e02101 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -14,10 +14,7 @@ //! Queryable primitives. use super::{ - builders::sample::{ - QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, - ValueBuilderTrait, - }, + builders::sample::{QoSBuilderTrait, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}, encoding::Encoding, handlers::{locked, DefaultHandler, IntoHandler}, key_expr::KeyExpr, @@ -43,6 +40,7 @@ use zenoh_result::ZResult; #[zenoh_macros::unstable] use { super::{ + builders::sample::SampleBuilderTrait, query::ReplyKeyExpr, sample::{Attachment, SourceInfo}, }, diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 880bf4405f..136dfda1bf 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -21,7 +21,7 @@ use super::{ info::SessionInfo, key_expr::{KeyExpr, KeyExprInner}, payload::Payload, - publication::{Priority, Publisher}, + publication::Priority, query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, @@ -74,10 +74,11 @@ use zenoh_protocol::{ use zenoh_result::ZResult; use zenoh_util::core::AsyncResolve; -#[zenoh_macros::unstable] +#[cfg(feature = "unstable")] use { super::{ liveliness::{Liveliness, LivelinessTokenState}, + publication::Publisher, publication::{MatchingListenerState, MatchingStatus}, sample::{Attachment, SourceInfo}, }, diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 76e41fe579..ef304b011b 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -57,9 +57,9 @@ pub(crate) mod common { // #[zenoh_macros::unstable] // pub use crate::api::publication::PublisherDeclarations; // pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - pub use crate::api::builders::sample::{ - QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, - }; + // pub use crate::api::builders::sample::{ + // QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, + // }; #[zenoh_macros::unstable] pub use crate::api::builders::sample::SampleBuilderTrait; diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 073d5537bd..e83e5700d8 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -64,7 +64,7 @@ fn pubsub() { fn queries() { use zenoh::{ prelude::sync::*, - sample::{Attachment, SampleBuilderTrait}, + sample::{Attachment, SampleBuilderTrait, ValueBuilderTrait}, session::SessionDeclarations, }; use zenoh_config::Config; diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index fdb8e225fa..a0c4129f3a 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -1,4 +1,4 @@ -use zenoh::session::SessionDeclarations; +use zenoh::{sample::ValueBuilderTrait, session::SessionDeclarations}; use zenoh_config::Config; // diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 313b30e141..0ecd529c33 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -17,7 +17,7 @@ use std::time::Duration; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; -use zenoh::sample::SampleKind; +use zenoh::sample::{QoSBuilderTrait, SampleKind}; use zenoh::session::{Session, SessionDeclarations}; use zenoh::subscriber::Reliability; use zenoh::value::Value; diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 1c76b1f8fc..84e243e801 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -20,6 +20,7 @@ use zenoh::config::EndPoint; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; +use zenoh::sample::QoSBuilderTrait; use zenoh::session::{Session, SessionDeclarations}; use zenoh_config as config; use zenoh_core::ztimeout; From 59625a63dfccb6645d47e797563cf7e782fd61ca Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 15:17:39 +0200 Subject: [PATCH 159/598] samplebuilder removed from prelude --- examples/examples/z_pub.rs | 1 + zenoh/src/prelude.rs | 76 +++++++++++++++++++------------------- zenoh/tests/attachments.rs | 2 +- 3 files changed, 40 insertions(+), 39 deletions(-) diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 10209b04e6..a09754914b 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -16,6 +16,7 @@ use std::time::Duration; use zenoh::config::Config; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::sample::SampleBuilderTrait; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index ef304b011b..d482ffae75 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -22,56 +22,56 @@ //! use zenoh::prelude::r#async::*; //! ``` -pub use common::*; -pub(crate) mod common { - // pub use crate::api::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; - // pub use zenoh_buffers::{ - // buffer::{Buffer, SplitBuffer}, - // reader::HasReader, - // writer::HasWriter, - // }; - // pub use zenoh_core::Resolve; +// pub use common::*; +// pub(crate) mod common { +// pub use crate::api::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; +// pub use zenoh_buffers::{ +// buffer::{Buffer, SplitBuffer}, +// reader::HasReader, +// writer::HasWriter, +// }; +// pub use zenoh_core::Resolve; - // pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; - // #[zenoh_macros::unstable] - // pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; +// pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; +// #[zenoh_macros::unstable] +// pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; - // pub use crate::config::{self, Config}; - // pub use crate::handlers::IntoHandler; - // pub use crate::selector::{Parameter, Parameters, Selector}; - // pub use crate::session::{Session, SessionDeclarations}; +// pub use crate::config::{self, Config}; +// pub use crate::handlers::IntoHandler; +// pub use crate::selector::{Parameter, Parameters, Selector}; +// pub use crate::session::{Session, SessionDeclarations}; - // pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; +// pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; - // pub use crate::api::encoding::Encoding; - // pub use crate::api::value::Value; - /// The encoding of a zenoh `Value`. - // pub use crate::payload::{Deserialize, Payload, Serialize}; +// pub use crate::api::encoding::Encoding; +// pub use crate::api::value::Value; +/// The encoding of a zenoh `Value`. +// pub use crate::payload::{Deserialize, Payload, Serialize}; - // #[zenoh_macros::unstable] - // pub use crate::api::sample::Locality; - // #[zenoh_macros::unstable] - // pub use crate::api::sample::SourceInfo; - // pub use crate::api::sample::{Sample, SampleKind}; - // pub use crate::api::publication::Priority; - // #[zenoh_macros::unstable] - // pub use crate::api::publication::PublisherDeclarations; - // pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - // pub use crate::api::builders::sample::{ - // QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, - // }; +// #[zenoh_macros::unstable] +// pub use crate::api::sample::Locality; +// #[zenoh_macros::unstable] +// pub use crate::api::sample::SourceInfo; +// pub use crate::api::sample::{Sample, SampleKind}; +// pub use crate::api::publication::Priority; +// #[zenoh_macros::unstable] +// pub use crate::api::publication::PublisherDeclarations; +// pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; +// pub use crate::api::builders::sample::{ +// QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, +// }; - #[zenoh_macros::unstable] - pub use crate::api::builders::sample::SampleBuilderTrait; -} +// #[zenoh_macros::unstable] +// pub use crate::api::builders::sample::SampleBuilderTrait; +// } /// Prelude to import when using Zenoh's sync API. pub mod sync { - pub use super::common::*; + // pub use super::common::*; pub use zenoh_core::SyncResolve; } /// Prelude to import when using Zenoh's async API. pub mod r#async { - pub use super::common::*; + // pub use super::common::*; pub use zenoh_core::AsyncResolve; } diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index e83e5700d8..99177b17de 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,7 +14,7 @@ #[cfg(feature = "unstable")] #[test] fn pubsub() { - use zenoh::{prelude::sync::*, session::SessionDeclarations}; + use zenoh::{prelude::sync::*, sample::SampleBuilderTrait, session::SessionDeclarations}; use zenoh_config::Config; let zenoh = zenoh::open(Config::default()).res().unwrap(); From 687b9f07ce8e9711e8acaf4f12476e194a6e028e Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 16:50:11 +0200 Subject: [PATCH 160/598] prelude disabled --- examples/examples/z_delete.rs | 2 +- examples/examples/z_forward.rs | 2 +- examples/examples/z_get.rs | 2 +- examples/examples/z_info.rs | 2 +- examples/examples/z_ping.rs | 2 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_pub.rs | 2 +- examples/examples/z_pub_thr.rs | 2 +- examples/examples/z_pull.rs | 4 ++-- examples/examples/z_put.rs | 2 +- examples/examples/z_put_float.rs | 2 +- examples/examples/z_queryable.rs | 2 +- examples/examples/z_scout.rs | 2 +- examples/examples/z_storage.rs | 2 +- examples/examples/z_sub.rs | 2 +- examples/examples/z_sub_thr.rs | 2 +- plugins/zenoh-plugin-example/src/lib.rs | 2 +- plugins/zenoh-plugin-rest/examples/z_serve_sse.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/lib.rs | 2 +- .../src/replica/align_queryable.rs | 2 +- .../zenoh-plugin-storage-manager/src/replica/aligner.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/replica/mod.rs | 2 +- .../zenoh-plugin-storage-manager/src/replica/storage.rs | 2 +- plugins/zenoh-plugin-storage-manager/tests/operations.rs | 2 +- plugins/zenoh-plugin-storage-manager/tests/wildcard.rs | 2 +- zenoh-ext/examples/z_member.rs | 2 +- zenoh-ext/examples/z_pub_cache.rs | 2 +- zenoh-ext/examples/z_query_sub.rs | 2 +- zenoh-ext/examples/z_view_size.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh/src/api/publication.rs | 3 ++- zenoh/src/lib.rs | 7 ++++++- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/tests/attachments.rs | 5 +++-- zenoh/tests/connection_retry.rs | 3 +-- zenoh/tests/events.rs | 2 +- zenoh/tests/handler.rs | 6 ++++-- zenoh/tests/interceptors.rs | 7 +------ zenoh/tests/liveliness.rs | 2 +- zenoh/tests/matching.rs | 2 +- zenoh/tests/qos.rs | 2 +- zenoh/tests/routing.rs | 2 +- zenoh/tests/session.rs | 2 +- zenoh/tests/unicity.rs | 2 +- 45 files changed, 57 insertions(+), 54 deletions(-) diff --git a/examples/examples/z_delete.rs b/examples/examples/z_delete.rs index 7f48f90c96..f441c1b68d 100644 --- a/examples/examples/z_delete.rs +++ b/examples/examples/z_delete.rs @@ -13,8 +13,8 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index a4c3cb4ced..06d85b3931 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -13,8 +13,8 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; use zenoh_ext::SubscriberForward; diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 67a393c61f..77304770a4 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -14,7 +14,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::core::AsyncResolve; use zenoh::query::QueryTarget; use zenoh::sample::ValueBuilderTrait; use zenoh::selector::Selector; diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index c63e5974e9..bb81030b3a 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -14,7 +14,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::config::ZenohId; -use zenoh::prelude::r#async::*; +use zenoh::core::AsyncResolve; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index c9cdd0635b..08cd9e8817 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -14,9 +14,9 @@ use clap::Parser; use std::time::{Duration, Instant}; use zenoh::config::Config; +use zenoh::core::SyncResolve; use zenoh::key_expr::keyexpr; use zenoh::payload::Payload; -use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index ea992ab5bc..c3225809fa 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -13,8 +13,8 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::core::SyncResolve; use zenoh::key_expr::keyexpr; -use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index a09754914b..a0d8edadaf 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -14,8 +14,8 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh::sample::SampleBuilderTrait; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index d8b94c88b0..fd50118022 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -14,8 +14,8 @@ use clap::Parser; use std::convert::TryInto; +use zenoh::core::SyncResolve; use zenoh::payload::Payload; -use zenoh::prelude::sync::*; use zenoh::publication::{CongestionControl, Priority}; use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 6a07de8358..b405fd331b 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,9 +13,9 @@ // use clap::Parser; use std::time::Duration; +use zenoh::core::AsyncResolve; use zenoh::{ - config::Config, handlers::RingBuffer, key_expr::KeyExpr, prelude::r#async::*, - session::SessionDeclarations, + config::Config, handlers::RingBuffer, key_expr::KeyExpr, session::SessionDeclarations, }; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index b6039d09ba..a2c6ac2574 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -13,8 +13,8 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_put_float.rs b/examples/examples/z_put_float.rs index 86f1ce3c08..5fce2a5935 100644 --- a/examples/examples/z_put_float.rs +++ b/examples/examples/z_put_float.rs @@ -13,8 +13,8 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index ac58d9f094..49a5b946a7 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -13,8 +13,8 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index a46b7c49fe..b0d34061d3 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::core::AsyncResolve; use zenoh::scouting::scout; use zenoh::scouting::WhatAmI; diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 07f54ce5ff..ed9a2b0b89 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -17,8 +17,8 @@ use clap::Parser; use futures::select; use std::collections::HashMap; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::{keyexpr, KeyExpr}; -use zenoh::prelude::r#async::*; use zenoh::sample::{Sample, SampleKind}; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index df77429356..ae1e7292e0 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -13,8 +13,8 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index b4b6ecd0e5..d94ca4fa0f 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -14,7 +14,7 @@ use clap::Parser; use std::time::Instant; use zenoh::config::Config; -use zenoh::prelude::sync::*; +use zenoh::core::SyncResolve; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 6e9d0a917d..f5565f841c 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -24,11 +24,11 @@ use std::sync::{ }; use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; -use zenoh::prelude::r#async::*; use zenoh::runtime::Runtime; use zenoh::sample::Sample; use zenoh::session::SessionDeclarations; use zenoh_core::zlock; +use zenoh_core::AsyncResolve; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, ZResult}; diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index c7d22988e9..40b03c3a59 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -14,8 +14,8 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::keyexpr; -use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index ddd866efcb..44b1013936 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -29,11 +29,11 @@ use std::sync::Arc; use tide::http::Mime; use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; +use zenoh::core::AsyncResolve; use zenoh::encoding::Encoding; use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::payload::{Payload, StringOrBase64}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; -use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; use zenoh::sample::{Sample, SampleKind, ValueBuilderTrait}; diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 12fbede21d..bea144e39a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -27,9 +27,9 @@ use std::convert::TryFrom; use std::sync::Arc; use std::sync::Mutex; use storages_mgt::StorageMessage; +use zenoh::core::SyncResolve; use zenoh::key_expr::keyexpr; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; -use zenoh::prelude::sync::*; use zenoh::runtime::Runtime; use zenoh::selector::Selector; use zenoh::session::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 581e93cf6b..d7620aad13 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -18,9 +18,9 @@ use std::cmp::Ordering; use std::collections::{BTreeSet, HashMap, HashSet}; use std::str; use std::str::FromStr; +use zenoh::core::AsyncResolve; use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; -use zenoh::prelude::r#async::*; use zenoh::sample::Sample; use zenoh::sample::TimestampBuilderTrait; use zenoh::sample::ValueBuilderTrait; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 61d5d074e4..7e604c1f01 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -18,9 +18,9 @@ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; +use zenoh::core::AsyncResolve; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; -use zenoh::prelude::r#async::*; use zenoh::sample::{Sample, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; use zenoh::session::Session; use zenoh::time::Timestamp; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 7192e3ab7e..3aec2ec476 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -26,9 +26,9 @@ use std::str; use std::str::FromStr; use std::time::{Duration, SystemTime}; use urlencoding::encode; +use zenoh::core::AsyncResolve; use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; -use zenoh::prelude::r#async::*; use zenoh::sample::Locality; use zenoh::session::Session; use zenoh::session::SessionDeclarations; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index c568835831..f0733c2371 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -23,12 +23,12 @@ use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::SplitBuffer; use zenoh::buffers::ZBuf; +use zenoh::core::AsyncResolve; use zenoh::key_expr::keyexpr_tree::KeyedSetProvider; use zenoh::key_expr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; use zenoh::key_expr::keyexpr_tree::{KeBoxTree, NonWild, UnknownWildness}; use zenoh::key_expr::KeyExpr; use zenoh::key_expr::OwnedKeyExpr; -use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; use zenoh::sample::{Sample, SampleKind, TimestampBuilderTrait}; use zenoh::sample::{SampleBuilder, ValueBuilderTrait}; diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index bd612bec87..1def746449 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -21,8 +21,8 @@ use std::thread::sleep; use async_std::task; use zenoh::config::{Config, ValidatedMap}; +use zenoh::core::AsyncResolve; use zenoh::payload::StringOrBase64; -use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::sample::Sample; use zenoh::session::Session; diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 054be54ce8..bf1ecf707f 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -22,8 +22,8 @@ use std::thread::sleep; // use std::collections::HashMap; use async_std::task; use zenoh::config::{Config, ValidatedMap}; +use zenoh::core::AsyncResolve; use zenoh::payload::StringOrBase64; -use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::sample::Sample; use zenoh::session::Session; diff --git a/zenoh-ext/examples/z_member.rs b/zenoh-ext/examples/z_member.rs index fb10ac4cd8..217c0d90e3 100644 --- a/zenoh-ext/examples/z_member.rs +++ b/zenoh-ext/examples/z_member.rs @@ -15,7 +15,7 @@ use futures::StreamExt; use std::sync::Arc; use std::time::Duration; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::core::AsyncResolve; use zenoh_ext::group::*; #[tokio::main] diff --git a/zenoh-ext/examples/z_pub_cache.rs b/zenoh-ext/examples/z_pub_cache.rs index e564ffb8f1..50b6d11c53 100644 --- a/zenoh-ext/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/z_pub_cache.rs @@ -14,7 +14,7 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::config::{Config, ModeDependentValue}; -use zenoh::prelude::r#async::*; +use zenoh::core::AsyncResolve; use zenoh_ext::*; #[tokio::main] diff --git a/zenoh-ext/examples/z_query_sub.rs b/zenoh-ext/examples/z_query_sub.rs index 61ea0eac92..c3fc363069 100644 --- a/zenoh-ext/examples/z_query_sub.rs +++ b/zenoh-ext/examples/z_query_sub.rs @@ -14,7 +14,7 @@ use clap::arg; use clap::Command; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::core::AsyncResolve; use zenoh::query::ReplyKeyExpr; use zenoh::session::SessionDeclarations; use zenoh_ext::*; diff --git a/zenoh-ext/examples/z_view_size.rs b/zenoh-ext/examples/z_view_size.rs index 64e7b3ea4c..8496629646 100644 --- a/zenoh-ext/examples/z_view_size.rs +++ b/zenoh-ext/examples/z_view_size.rs @@ -15,7 +15,7 @@ use clap::{arg, Command}; use std::sync::Arc; use std::time::Duration; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::core::AsyncResolve; use zenoh_ext::group::*; #[tokio::main] diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 12e349f299..d6175b502d 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -29,7 +29,6 @@ use zenoh::key_expr::keyexpr; use zenoh::key_expr::KeyExpr; use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::PayloadReader; -use zenoh::prelude::r#async::*; use zenoh::publication::Priority; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; @@ -38,6 +37,7 @@ use zenoh::session::Session; use zenoh::session::SessionDeclarations; use zenoh::Error as ZError; use zenoh::Result as ZResult; +use zenoh_core::AsyncResolve; use zenoh_result::bail; use zenoh_sync::Condition; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 2c3d21eec4..ad8be76e65 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -1148,7 +1148,8 @@ mod tests { #[test] fn sample_kind_integrity_in_put_builder() { - use crate::{api::session::open, prelude::sync::*}; + use crate::api::session::open; + use zenoh_core::SyncResolve; const KEY_EXPR: &str = "test/sample_kind_integrity/put_builder"; const VALUE: &str = "zenoh"; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c6e890cb27..9cbe4ca5e5 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -116,6 +116,11 @@ pub const FEATURES: &str = concat_enabled_features!( pub use crate::api::session::open; +pub mod core { + pub use zenoh_core::AsyncResolve; + pub use zenoh_core::SyncResolve; +} + /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate /// reading and writing data. pub mod buffers { @@ -277,4 +282,4 @@ pub mod shm { pub use zenoh_shm::SharedMemoryManager; } -pub mod prelude; +// pub mod prelude; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index be992621e6..968f6cc3de 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -21,7 +21,6 @@ use crate::api::value::Value; use crate::encoding::Encoding; use crate::net::primitives::Primitives; use crate::payload::Payload; -use crate::prelude::sync::SyncResolve; use log::{error, trace}; use serde_json::json; use std::collections::HashMap; @@ -31,6 +30,7 @@ use std::sync::Arc; use std::sync::Mutex; use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{ConfigValidator, ValidatedMap, WhatAmI}; +use zenoh_core::SyncResolve; use zenoh_plugin_trait::{PluginControl, PluginStatus}; use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 99177b17de..41c8d85dd7 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,8 +14,9 @@ #[cfg(feature = "unstable")] #[test] fn pubsub() { - use zenoh::{prelude::sync::*, sample::SampleBuilderTrait, session::SessionDeclarations}; + use zenoh::{sample::SampleBuilderTrait, session::SessionDeclarations}; use zenoh_config::Config; + use zenoh_core::SyncResolve; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh @@ -63,11 +64,11 @@ fn pubsub() { #[test] fn queries() { use zenoh::{ - prelude::sync::*, sample::{Attachment, SampleBuilderTrait, ValueBuilderTrait}, session::SessionDeclarations, }; use zenoh_config::Config; + use zenoh_core::SyncResolve; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index 675b4eb879..f510e4f54a 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -1,6 +1,5 @@ use zenoh_config::{Config, ConnectionRetryConf, EndPoint, ValidatedMap}; - -use zenoh::prelude::sync::*; +use zenoh_core::SyncResolve; #[test] fn retry_config_overriding() { diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 8b5c79bb97..3069e53e24 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -12,12 +12,12 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::sample::SampleKind; use zenoh::session::{Session, SessionDeclarations}; use zenoh_config::peer; use zenoh_core::ztimeout; +use zenoh_core::AsyncResolve; const TIMEOUT: Duration = Duration::from_secs(10); diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index a0c4129f3a..82030daef5 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -17,7 +17,8 @@ use zenoh_config::Config; #[test] fn pubsub_with_ringbuffer() { use std::{thread, time::Duration}; - use zenoh::{handlers::RingBuffer, prelude::sync::*}; + use zenoh::handlers::RingBuffer; + use zenoh_core::SyncResolve; let zenoh = zenoh::open(Config::default()).res().unwrap(); let sub = zenoh @@ -49,7 +50,8 @@ fn pubsub_with_ringbuffer() { #[test] fn query_with_ringbuffer() { - use zenoh::{handlers::RingBuffer, prelude::sync::*}; + use zenoh::handlers::RingBuffer; + use zenoh_core::SyncResolve; let zenoh = zenoh::open(Config::default()).res().unwrap(); let queryable = zenoh diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 1ff1f49651..c20dcafdb7 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -15,6 +15,7 @@ use std::sync::{Arc, Mutex}; use zenoh::session::SessionDeclarations; use zenoh_config::{Config, ValidatedMap}; use zenoh_core::zlock; +use zenoh_core::SyncResolve; struct IntervalCounter { first_tick: bool, @@ -63,8 +64,6 @@ impl IntervalCounter { fn downsampling_by_keyexpr_impl(egress: bool) { let _ = env_logger::builder().is_test(true).try_init(); - use zenoh::prelude::sync::*; - let ds_cfg = format!( r#" [ @@ -180,8 +179,6 @@ fn downsampling_by_keyexpr() { fn downsampling_by_interface_impl(egress: bool) { let _ = env_logger::builder().is_test(true).try_init(); - use zenoh::prelude::sync::*; - let ds_cfg = format!( r#" [ @@ -282,8 +279,6 @@ fn downsampling_by_interface() { fn downsampling_config_error_wrong_strategy() { let _ = env_logger::builder().is_test(true).try_init(); - use zenoh::prelude::sync::*; - let mut config = Config::default(); config .insert_json5( diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index b672227ab9..43dfd37281 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -12,9 +12,9 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::prelude::r#async::*; use zenoh_config as config; use zenoh_core::ztimeout; +use zenoh_core::AsyncResolve; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index b22d8dd1c8..341f66bba7 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -14,11 +14,11 @@ use std::str::FromStr; use std::time::Duration; use zenoh::config::Locator; -use zenoh::prelude::r#async::*; use zenoh::session::Session; use zenoh_config as config; use zenoh_config::peer; use zenoh_core::ztimeout; +use zenoh_core::AsyncResolve; use zenoh_result::ZResult as Result; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index e11fcf4e22..5e3f507006 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -12,11 +12,11 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::{publication::Priority, session::SessionDeclarations}; use zenoh_core::ztimeout; +use zenoh_core::AsyncResolve; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 98c2f002f7..c0879bdb7e 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -17,12 +17,12 @@ use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::config::{Config, ModeDependentValue}; -use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::session::{Session, SessionDeclarations}; use zenoh::Result; use zenoh_core::ztimeout; +use zenoh_core::AsyncResolve; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; use zenoh_result::bail; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 0ecd529c33..b325e7601b 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -15,7 +15,6 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; use zenoh::sample::{QoSBuilderTrait, SampleKind}; use zenoh::session::{Session, SessionDeclarations}; @@ -23,6 +22,7 @@ use zenoh::subscriber::Reliability; use zenoh::value::Value; use zenoh_config as config; use zenoh_core::ztimeout; +use zenoh_core::AsyncResolve; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 84e243e801..8ad80b7315 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -18,12 +18,12 @@ use std::time::Duration; use tokio::runtime::Handle; use zenoh::config::EndPoint; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::session::{Session, SessionDeclarations}; use zenoh_config as config; use zenoh_core::ztimeout; +use zenoh_core::AsyncResolve; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); From c24ea14833f24eb1a0c687dd77886f085728eab4 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 17:38:00 +0200 Subject: [PATCH 161/598] zenoh-ext depends on zenoh only --- Cargo.lock | 6 ------ zenoh-ext/Cargo.toml | 14 +++++++------- zenoh-ext/src/group.rs | 6 +++--- zenoh-ext/src/lib.rs | 2 +- zenoh-ext/src/publication_cache.rs | 14 ++++++++------ zenoh-ext/src/querying_subscriber.rs | 11 ++++++----- zenoh-ext/src/session_ext.rs | 9 +++++---- zenoh/src/lib.rs | 14 ++++++++++++++ 8 files changed, 44 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 75a045d9b3..f2f2b0f5e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4623,12 +4623,6 @@ dependencies = [ "serde_json", "tokio", "zenoh", - "zenoh-core", - "zenoh-macros", - "zenoh-result", - "zenoh-runtime", - "zenoh-sync", - "zenoh-util", ] [[package]] diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index 65f1d47af1..d86745b4fe 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -31,7 +31,13 @@ unstable = [] default = [] [dependencies] -tokio = { workspace = true, features = ["rt", "sync", "time", "macros", "io-std"] } +tokio = { workspace = true, features = [ + "rt", + "sync", + "time", + "macros", + "io-std", +] } bincode = { workspace = true } env_logger = { workspace = true } flume = { workspace = true } @@ -42,12 +48,6 @@ serde = { workspace = true, features = ["default"] } serde_cbor = { workspace = true } serde_json = { workspace = true } zenoh = { workspace = true, features = ["unstable"], default-features = false } -zenoh-core = { workspace = true } -zenoh-macros = { workspace = true } -zenoh-result = { workspace = true } -zenoh-sync = { workspace = true } -zenoh-util = { workspace = true } -zenoh-runtime = { workspace = true } [dev-dependencies] clap = { workspace = true, features = ["derive"] } diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index d6175b502d..90fefae638 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -25,6 +25,9 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; use tokio::task::JoinHandle; +use zenoh::core::AsyncResolve; +use zenoh::internal::bail; +use zenoh::internal::Condition; use zenoh::key_expr::keyexpr; use zenoh::key_expr::KeyExpr; use zenoh::key_expr::OwnedKeyExpr; @@ -37,9 +40,6 @@ use zenoh::session::Session; use zenoh::session::SessionDeclarations; use zenoh::Error as ZError; use zenoh::Result as ZResult; -use zenoh_core::AsyncResolve; -use zenoh_result::bail; -use zenoh_sync::Condition; const GROUP_PREFIX: &str = "zenoh/ext/net/group"; const EVENT_POSTFIX: &str = "evt"; diff --git a/zenoh-ext/src/lib.rs b/zenoh-ext/src/lib.rs index 7ac880fd8c..a59e057371 100644 --- a/zenoh-ext/src/lib.rs +++ b/zenoh-ext/src/lib.rs @@ -23,9 +23,9 @@ pub use querying_subscriber::{ pub use session_ext::SessionExt; pub use subscriber_ext::SubscriberBuilderExt; pub use subscriber_ext::SubscriberForward; +use zenoh::internal::zerror; use zenoh::query::Reply; use zenoh::{sample::Sample, Result as ZResult}; -use zenoh_core::zerror; /// The space of keys to use in a [`FetchingSubscriber`]. pub enum KeySpace { diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 25311ca647..ac37eaeafa 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -15,15 +15,17 @@ use flume::{bounded, Sender}; use std::collections::{HashMap, VecDeque}; use std::convert::TryInto; use std::future::Ready; +use zenoh::core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh::internal::ResolveFuture; use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; use zenoh::queryable::{Query, Queryable}; +use zenoh::runtime::ZRuntime; use zenoh::sample::{Locality, Sample}; use zenoh::selector::Parameters; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::FlumeSubscriber; -use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; -use zenoh_result::{bail, ZResult}; -use zenoh_util::core::ResolveFuture; +use zenoh::Error; +use zenoh::{internal::bail, Result as ZResult}; /// The builder of PublicationCache, allowing to configure it. #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -57,7 +59,7 @@ impl<'a, 'b, 'c> PublicationCacheBuilder<'a, 'b, 'c> { pub fn queryable_prefix(mut self, queryable_prefix: TryIntoKeyExpr) -> Self where TryIntoKeyExpr: TryInto>, - >>::Error: Into, + >>::Error: Into, { self.queryable_prefix = Some(queryable_prefix.try_into().map_err(Into::into)); self @@ -65,7 +67,7 @@ impl<'a, 'b, 'c> PublicationCacheBuilder<'a, 'b, 'c> { /// Restrict the matching queries that will be receive by this [`PublicationCache`]'s queryable /// to the ones that have the given [`Locality`](zenoh::prelude::Locality). - #[zenoh_macros::unstable] + #[zenoh::internal::unstable] #[inline] pub fn queryable_allowed_origin(mut self, origin: Locality) -> Self { self.queryable_origin = Some(origin); @@ -169,7 +171,7 @@ impl<'a> PublicationCache<'a> { // TODO(yuyuan): use CancellationToken to manage it let (stoptx, stoprx) = bounded::(1); - zenoh_runtime::ZRuntime::TX.spawn(async move { + ZRuntime::TX.spawn(async move { let mut cache: HashMap> = HashMap::with_capacity(resources_limit.unwrap_or(32)); let limit = resources_limit.unwrap_or(usize::MAX); diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index d5ed3e9987..9502a3a7b2 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -17,7 +17,9 @@ use std::future::Ready; use std::mem::swap; use std::sync::{Arc, Mutex}; use std::time::Duration; +use zenoh::core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh::handlers::{locked, DefaultHandler, IntoHandler}; +use zenoh::internal::zlock; use zenoh::key_expr::KeyExpr; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; use zenoh::sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}; @@ -25,8 +27,7 @@ use zenoh::selector::Selector; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; -use zenoh::Result as ZResult; -use zenoh_core::{zlock, AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh::{Error, Result as ZResult}; use crate::ExtractSample; @@ -162,7 +163,7 @@ impl<'a, 'b, Handler> QueryingSubscriberBuilder<'a, 'b, crate::UserSpace, Handle /// Restrict the matching publications that will be receive by this [`Subscriber`] /// to the ones that have the given [`Locality`](zenoh::prelude::Locality). - #[zenoh_macros::unstable] + #[zenoh::internal::unstable] #[inline] pub fn allowed_origin(mut self, origin: Locality) -> Self { self.origin = origin; @@ -174,7 +175,7 @@ impl<'a, 'b, Handler> QueryingSubscriberBuilder<'a, 'b, crate::UserSpace, Handle pub fn query_selector(mut self, query_selector: IntoSelector) -> Self where IntoSelector: TryInto>, - >>::Error: Into, + >>::Error: Into, { self.query_selector = Some(query_selector.try_into().map_err(Into::into)); self @@ -522,7 +523,7 @@ where /// Restrict the matching publications that will be receive by this [`FetchingSubscriber`] /// to the ones that have the given [`Locality`](zenoh::prelude::Locality). - #[zenoh_macros::unstable] + #[zenoh::internal::unstable] #[inline] pub fn allowed_origin(mut self, origin: Locality) -> Self { self.origin = origin; diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index 2dd0fbd873..ab178ae70f 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -17,6 +17,7 @@ use std::sync::Arc; use zenoh::{ key_expr::KeyExpr, session::{Session, SessionRef}, + Error, }; /// Some extensions to the [`zenoh::Session`](zenoh::Session) @@ -27,7 +28,7 @@ pub trait SessionExt<'s, 'a> { ) -> PublicationCacheBuilder<'a, 'b, 'c> where TryIntoKeyExpr: TryInto>, - >>::Error: Into; + >>::Error: Into; } impl<'s, 'a> SessionExt<'s, 'a> for SessionRef<'a> { @@ -37,7 +38,7 @@ impl<'s, 'a> SessionExt<'s, 'a> for SessionRef<'a> { ) -> PublicationCacheBuilder<'a, 'b, 'c> where TryIntoKeyExpr: TryInto>, - >>::Error: Into, + >>::Error: Into, { PublicationCacheBuilder::new(self.clone(), pub_key_expr.try_into().map_err(Into::into)) } @@ -50,7 +51,7 @@ impl<'a> SessionExt<'a, 'a> for Session { ) -> PublicationCacheBuilder<'a, 'b, 'c> where TryIntoKeyExpr: TryInto>, - >>::Error: Into, + >>::Error: Into, { SessionRef::Borrow(self).declare_publication_cache(pub_key_expr) } @@ -80,7 +81,7 @@ impl<'s> SessionExt<'s, 'static> for Arc { ) -> PublicationCacheBuilder<'static, 'b, 'c> where TryIntoKeyExpr: TryInto>, - >>::Error: Into, + >>::Error: Into, { SessionRef::Shared(self.clone()).declare_publication_cache(pub_key_expr) } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 9cbe4ca5e5..2e3b34bdf2 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -118,9 +118,21 @@ pub use crate::api::session::open; pub mod core { pub use zenoh_core::AsyncResolve; + pub use zenoh_core::Resolvable; + pub use zenoh_core::Resolve; pub use zenoh_core::SyncResolve; } +#[doc(hidden)] +pub mod internal { + pub use zenoh_core::zerror; + pub use zenoh_core::zlock; + pub use zenoh_macros::unstable; + pub use zenoh_result::bail; + pub use zenoh_sync::Condition; + pub use zenoh_util::core::ResolveFuture; +} + /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate /// reading and writing data. pub mod buffers { @@ -259,8 +271,10 @@ pub mod time { pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; } +#[doc(hidden)] pub mod runtime { pub use crate::net::runtime::{AdminSpace, Runtime}; + pub use zenoh_runtime::ZRuntime; } pub mod config { From 4d05c9f824e1fccb5cf63c1abe7916aba5f8c00a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 18:43:59 +0200 Subject: [PATCH 162/598] dependencis removed --- Cargo.lock | 3 --- plugins/zenoh-plugin-storage-manager/Cargo.toml | 3 --- plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/lib.rs | 4 ++-- .../zenoh-plugin-storage-manager/src/memory_backend/mod.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/replica/storage.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs | 2 +- 7 files changed, 6 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f2f2b0f5e7..93eef060a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4955,10 +4955,7 @@ dependencies = [ "serde_json", "urlencoding", "zenoh", - "zenoh-collections", - "zenoh-core", "zenoh-plugin-trait", - "zenoh-result", "zenoh-util", "zenoh_backend_traits", ] diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index fe9359f696..2b2a6a3f2c 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -47,10 +47,7 @@ serde = { workspace = true, features = ["default"] } serde_json = { workspace = true } urlencoding = { workspace = true } zenoh = { workspace = true, features = ["unstable"] } -zenoh-collections = { workspace = true } -zenoh-core = { workspace = true } zenoh-plugin-trait = { workspace = true } -zenoh-result = { workspace = true } zenoh-util = { workspace = true } zenoh_backend_traits = { workspace = true } diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index dcce49f5da..3837d26dda 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -15,9 +15,9 @@ use super::storages_mgt::*; use flume::Sender; use std::sync::Arc; use zenoh::session::Session; +use zenoh::Result as ZResult; use zenoh_backend_traits::config::StorageConfig; use zenoh_backend_traits::{Capability, VolumeInstance}; -use zenoh_result::ZResult; pub struct StoreIntercept { pub storage: Box, diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index bea144e39a..e3d11be4c8 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -28,24 +28,24 @@ use std::sync::Arc; use std::sync::Mutex; use storages_mgt::StorageMessage; use zenoh::core::SyncResolve; +use zenoh::internal::zlock; use zenoh::key_expr::keyexpr; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::runtime::Runtime; use zenoh::selector::Selector; use zenoh::session::Session; +use zenoh::Result as ZResult; use zenoh_backend_traits::config::ConfigDiff; use zenoh_backend_traits::config::PluginConfig; use zenoh_backend_traits::config::StorageConfig; use zenoh_backend_traits::config::VolumeConfig; use zenoh_backend_traits::VolumeInstance; -use zenoh_core::zlock; use zenoh_plugin_trait::plugin_long_version; use zenoh_plugin_trait::plugin_version; use zenoh_plugin_trait::Plugin; use zenoh_plugin_trait::PluginControl; use zenoh_plugin_trait::PluginReport; use zenoh_plugin_trait::PluginStatusRec; -use zenoh_result::ZResult; use zenoh_util::LibLoader; mod backends_mgt; diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index 0924279cb2..cd491ba01c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -18,10 +18,10 @@ use std::sync::Arc; use zenoh::key_expr::OwnedKeyExpr; use zenoh::time::Timestamp; use zenoh::value::Value; +use zenoh::Result as ZResult; use zenoh_backend_traits::config::{StorageConfig, VolumeConfig}; use zenoh_backend_traits::*; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin}; -use zenoh_result::ZResult; use crate::MEMORY_BACKEND_NAME; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index f0733c2371..c4d298ba83 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -24,6 +24,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::SplitBuffer; use zenoh::buffers::ZBuf; use zenoh::core::AsyncResolve; +use zenoh::internal::bail; use zenoh::key_expr::keyexpr_tree::KeyedSetProvider; use zenoh::key_expr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; use zenoh::key_expr::keyexpr_tree::{KeBoxTree, NonWild, UnknownWildness}; @@ -38,7 +39,6 @@ use zenoh::value::Value; use zenoh::{session::Session, Result as ZResult}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; -use zenoh_result::bail; use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer}; pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; diff --git a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs index 8643429a65..8ee9eb7218 100644 --- a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs @@ -13,8 +13,8 @@ // use async_std::sync::Arc; use zenoh::session::Session; +use zenoh::Result as ZResult; use zenoh_backend_traits::config::StorageConfig; -use zenoh_result::ZResult; pub use super::replica::{Replica, StorageService}; From 9ab5fc94be36c3ff7d3cae649332005646bb3616 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 18:47:37 +0200 Subject: [PATCH 163/598] dependency removed --- Cargo.lock | 1 - plugins/zenoh-plugin-storage-manager/Cargo.toml | 1 - plugins/zenoh-plugin-storage-manager/src/lib.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/replica/storage.rs | 2 +- zenoh/src/lib.rs | 2 ++ 5 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 93eef060a7..ba7cb0efbd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4956,7 +4956,6 @@ dependencies = [ "urlencoding", "zenoh", "zenoh-plugin-trait", - "zenoh-util", "zenoh_backend_traits", ] diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index 2b2a6a3f2c..ca6e5cb0fa 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -48,7 +48,6 @@ serde_json = { workspace = true } urlencoding = { workspace = true } zenoh = { workspace = true, features = ["unstable"] } zenoh-plugin-trait = { workspace = true } -zenoh-util = { workspace = true } zenoh_backend_traits = { workspace = true } [build-dependencies] diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index e3d11be4c8..e920486de8 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -30,6 +30,7 @@ use storages_mgt::StorageMessage; use zenoh::core::SyncResolve; use zenoh::internal::zlock; use zenoh::key_expr::keyexpr; +use zenoh::plugins::LibLoader; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::runtime::Runtime; use zenoh::selector::Selector; @@ -46,7 +47,6 @@ use zenoh_plugin_trait::Plugin; use zenoh_plugin_trait::PluginControl; use zenoh_plugin_trait::PluginReport; use zenoh_plugin_trait::PluginStatusRec; -use zenoh_util::LibLoader; mod backends_mgt; use backends_mgt::*; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index c4d298ba83..f5524c8eb5 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -25,6 +25,7 @@ use zenoh::buffers::SplitBuffer; use zenoh::buffers::ZBuf; use zenoh::core::AsyncResolve; use zenoh::internal::bail; +use zenoh::internal::{zenoh_home, Timed, TimedEvent, Timer}; use zenoh::key_expr::keyexpr_tree::KeyedSetProvider; use zenoh::key_expr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; use zenoh::key_expr::keyexpr_tree::{KeBoxTree, NonWild, UnknownWildness}; @@ -39,7 +40,6 @@ use zenoh::value::Value; use zenoh::{session::Session, Result as ZResult}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; -use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer}; pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 2e3b34bdf2..1c03506039 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -131,6 +131,7 @@ pub mod internal { pub use zenoh_result::bail; pub use zenoh_sync::Condition; pub use zenoh_util::core::ResolveFuture; + pub use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; } /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate @@ -289,6 +290,7 @@ pub mod plugins { pub use crate::api::plugins::Response; pub use crate::api::plugins::RunningPlugin; pub use crate::api::plugins::{RunningPluginTrait, ZenohPlugin}; + pub use zenoh_util::LibLoader; } #[cfg(feature = "shared-memory")] From 964176f9c81ee22947b97523729f56caea40d4e1 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 19:03:16 +0200 Subject: [PATCH 164/598] small move --- .../zenoh-plugin-storage-manager/src/lib.rs | 2 +- zenoh/src/lib.rs | 23 ++++++++++--------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index e920486de8..701a34f4d6 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -29,8 +29,8 @@ use std::sync::Mutex; use storages_mgt::StorageMessage; use zenoh::core::SyncResolve; use zenoh::internal::zlock; +use zenoh::internal::LibLoader; use zenoh::key_expr::keyexpr; -use zenoh::plugins::LibLoader; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::runtime::Runtime; use zenoh::selector::Selector; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 1c03506039..d2aa2f07fe 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -123,17 +123,6 @@ pub mod core { pub use zenoh_core::SyncResolve; } -#[doc(hidden)] -pub mod internal { - pub use zenoh_core::zerror; - pub use zenoh_core::zlock; - pub use zenoh_macros::unstable; - pub use zenoh_result::bail; - pub use zenoh_sync::Condition; - pub use zenoh_util::core::ResolveFuture; - pub use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; -} - /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate /// reading and writing data. pub mod buffers { @@ -285,12 +274,24 @@ pub mod config { }; } +#[doc(hidden)] pub mod plugins { pub use crate::api::plugins::PluginsManager; pub use crate::api::plugins::Response; pub use crate::api::plugins::RunningPlugin; pub use crate::api::plugins::{RunningPluginTrait, ZenohPlugin}; +} + +#[doc(hidden)] +pub mod internal { + pub use zenoh_core::zerror; + pub use zenoh_core::zlock; + pub use zenoh_macros::unstable; + pub use zenoh_result::bail; + pub use zenoh_sync::Condition; + pub use zenoh_util::core::ResolveFuture; pub use zenoh_util::LibLoader; + pub use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; } #[cfg(feature = "shared-memory")] From 5abc46265eaa192a990b7178ed42443e17091351 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 20:30:34 +0200 Subject: [PATCH 165/598] docs corrected --- zenoh/src/api/handlers.rs | 1 - zenoh/src/api/key_expr.rs | 31 --------------------- zenoh/src/api/liveliness.rs | 3 -- zenoh/src/api/payload.rs | 1 - zenoh/src/api/publication.rs | 1 - zenoh/src/api/query.rs | 1 - zenoh/src/api/queryable.rs | 1 - zenoh/src/api/sample.rs | 1 - zenoh/src/api/selector.rs | 2 -- zenoh/src/api/subscriber.rs | 1 - zenoh/src/api/value.rs | 1 - zenoh/src/lib.rs | 54 +++++++++++++++++++++++++++++++++--- 12 files changed, 50 insertions(+), 48 deletions(-) diff --git a/zenoh/src/api/handlers.rs b/zenoh/src/api/handlers.rs index 7610fe43d8..f17bdafca3 100644 --- a/zenoh/src/api/handlers.rs +++ b/zenoh/src/api/handlers.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Callback handler trait. use super::session::API_DATA_RECEPTION_CHANNEL_SIZE; use std::sync::{Arc, Mutex, Weak}; use zenoh_collections::RingBuffer as RingBufferInner; diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 0eb7515181..ff174186cb 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -12,37 +12,6 @@ // ZettaScale Zenoh Team, // -//! [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. -//! -//! In Zenoh, operations are performed on keys. To allow addressing multiple keys with a single operation, we use Key Expressions (KE). -//! KEs are a small language that express sets of keys through a glob-like language. -//! -//! These semantics can be a bit difficult to implement, so this module provides the following facilities: -//! -//! # Storing Key Expressions -//! This module provides 3 flavours to store strings that have been validated to respect the KE syntax: -//! - [`keyexpr`] is the equivalent of a [`str`], -//! - [`OwnedKeyExpr`] works like an [`std::sync::Arc`], -//! - [`KeyExpr`] works like a [`std::borrow::Cow`], but also stores some additional context internal to Zenoh to optimize -//! routing and network usage. -//! -//! All of these types [`Deref`](core::ops::Deref) to [`keyexpr`], which notably has methods to check whether a given [`keyexpr::intersects`] with another, -//! or even if a [`keyexpr::includes`] another. -//! -//! # Tying values to Key Expressions -//! When storing values tied to Key Expressions, you might want something more specialized than a [`HashMap`](std::collections::HashMap) if you want to respect -//! the Key Expression semantics with high performance. -//! -//! Enter [KeTrees](keyexpr_tree). These are data-structures specially built to store KE-value pairs in a manner that supports the set-semantics of KEs. -//! -//! # Building and parsing Key Expressions -//! A common issue in REST API is the association of meaning to sections of the URL, and respecting that API in a convenient manner. -//! The same issue arises naturally when designing a KE space, and [`KeFormat`](format::KeFormat) was designed to help you with this, -//! both in constructing and in parsing KEs that fit the formats you've defined. -//! -//! [`kedefine`] also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, -//! as the [`keformat`] and [`kewrite`] macros will be able to tell you if you're attempting to set fields of the format that do not exist. - use super::{ selector::Selector, session::{Session, Undeclarable}, diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index c2a075ee52..78f32efcba 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -12,9 +12,6 @@ // ZettaScale Zenoh Team, // -//! Liveliness primitives. -//! -//! see [`Liveliness`] use super::{ handlers::{locked, DefaultHandler, IntoHandler}, key_expr::KeyExpr, diff --git a/zenoh/src/api/payload.rs b/zenoh/src/api/payload.rs index ed2a58145c..a571f8433c 100644 --- a/zenoh/src/api/payload.rs +++ b/zenoh/src/api/payload.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Payload primitives. use crate::buffers::ZBuf; use std::{ borrow::Cow, convert::Infallible, fmt::Debug, ops::Deref, string::FromUtf8Error, sync::Arc, diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index ad8be76e65..8b029367de 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Publishing primitives. use super::{ builders::publication::{ PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 8b0f0c9f6c..b196f2bcaf 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Query primitives. use super::{ builders::sample::{QoSBuilderTrait, ValueBuilderTrait}, encoding::Encoding, diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index c966e02101..3e5117356a 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Queryable primitives. use super::{ builders::sample::{QoSBuilderTrait, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}, encoding::Encoding, diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index a51264e1a4..4321ac6d6c 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Sample primitives use super::{ builders::sample::{QoSBuilderTrait, ValueBuilderTrait}, encoding::Encoding, diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 144b4ee8a0..c966015721 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -12,8 +12,6 @@ // ZettaScale Zenoh Team, // -//! [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries - use super::{key_expr::KeyExpr, queryable::Query}; use std::{ borrow::{Borrow, Cow}, diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 7ad0160ae3..8778d9bc38 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Subscribing primitives. use super::{ handlers::{locked, Callback, DefaultHandler, IntoHandler}, key_expr::KeyExpr, diff --git a/zenoh/src/api/value.rs b/zenoh/src/api/value.rs index 60586ad040..3393b5477b 100644 --- a/zenoh/src/api/value.rs +++ b/zenoh/src/api/value.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Value primitives. use super::{builders::sample::ValueBuilderTrait, encoding::Encoding, payload::Payload}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index d2aa2f07fe..35f8920798 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -130,6 +130,36 @@ pub mod buffers { pub use zenoh_buffers::{ZBuf, ZSlice}; } +/// [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. +/// +/// In Zenoh, operations are performed on keys. To allow addressing multiple keys with a single operation, we use Key Expressions (KE). +/// KEs are a small language that express sets of keys through a glob-like language. +/// +/// These semantics can be a bit difficult to implement, so this module provides the following facilities: +/// +/// # Storing Key Expressions +/// This module provides 3 flavours to store strings that have been validated to respect the KE syntax: +/// - [`keyexpr`] is the equivalent of a [`str`], +/// - [`OwnedKeyExpr`] works like an [`std::sync::Arc`], +/// - [`KeyExpr`] works like a [`std::borrow::Cow`], but also stores some additional context internal to Zenoh to optimize +/// routing and network usage. +/// +/// All of these types [`Deref`](core::ops::Deref) to [`keyexpr`], which notably has methods to check whether a given [`keyexpr::intersects`] with another, +/// or even if a [`keyexpr::includes`] another. +/// +/// # Tying values to Key Expressions +/// When storing values tied to Key Expressions, you might want something more specialized than a [`HashMap`](std::collections::HashMap) if you want to respect +/// the Key Expression semantics with high performance. +/// +/// Enter [KeTrees](keyexpr_tree). These are data-structures specially built to store KE-value pairs in a manner that supports the set-semantics of KEs. +/// +/// # Building and parsing Key Expressions +/// A common issue in REST API is the association of meaning to sections of the URL, and respecting that API in a convenient manner. +/// The same issue arises naturally when designing a KE space, and [`KeFormat`](format::KeFormat) was designed to help you with this, +/// both in constructing and in parsing KEs that fit the formats you've defined. +/// +/// [`kedefine`] also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, +/// as the [`keformat`] and [`kewrite`] macros will be able to tell you if you're attempting to set fields of the format that do not exist. pub mod key_expr { pub mod keyexpr_tree { pub use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; @@ -151,6 +181,7 @@ pub mod key_expr { } } +/// Zenoh [`Session`](crate::session::Session) and associated types pub mod session { pub use crate::api::builders::publication::SessionDeleteBuilder; pub use crate::api::builders::publication::SessionPutBuilder; @@ -162,6 +193,7 @@ pub mod session { pub use crate::api::session::SessionRef; } +/// Sample primitives pub mod sample { pub use crate::api::builders::sample::QoSBuilderTrait; pub use crate::api::builders::sample::SampleBuilder; @@ -178,14 +210,17 @@ pub mod sample { pub use crate::api::sample::SourceInfo; } +/// Value primitives pub mod value { pub use crate::api::value::Value; } +/// Encoding support pub mod encoding { pub use crate::api::encoding::Encoding; } +/// Payload primitives pub mod payload { pub use crate::api::payload::Deserialize; pub use crate::api::payload::Payload; @@ -194,6 +229,7 @@ pub mod payload { pub use crate::api::payload::StringOrBase64; } +/// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries pub mod selector { pub use crate::api::selector::Parameter; pub use crate::api::selector::Parameters; @@ -201,6 +237,7 @@ pub mod selector { pub use crate::api::selector::TIME_RANGE_KEY; } +/// Subscribing primitives pub mod subscriber { pub use crate::api::subscriber::FlumeSubscriber; pub use crate::api::subscriber::Subscriber; @@ -209,6 +246,7 @@ pub mod subscriber { pub use zenoh_protocol::core::Reliability; } +/// Publishing primitives pub mod publication { pub use crate::api::builders::publication::PublisherBuilder; pub use crate::api::publication::Priority; @@ -218,6 +256,7 @@ pub mod publication { pub use zenoh_protocol::core::CongestionControl; } +/// Query primitives pub mod query { pub use crate::api::query::Mode; pub use crate::api::query::Reply; @@ -228,12 +267,14 @@ pub mod query { pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; } +/// Queryable primitives pub mod queryable { pub use crate::api::queryable::Query; pub use crate::api::queryable::Queryable; pub use crate::api::queryable::QueryableBuilder; } +/// Callback handler trait pub mod handlers { pub use crate::api::handlers::locked; pub use crate::api::handlers::DefaultHandler; @@ -241,6 +282,7 @@ pub mod handlers { pub use crate::api::handlers::RingBuffer; } +/// Scouting primitives pub mod scouting { pub use crate::api::scouting::scout; pub use crate::api::scouting::ScoutBuilder; @@ -250,12 +292,14 @@ pub mod scouting { pub use zenoh_protocol::scouting::Hello; } +/// Liveliness primitives #[cfg(feature = "unstable")] pub mod liveliness { pub use crate::api::liveliness::Liveliness; pub use crate::api::liveliness::LivelinessSubscriberBuilder; } +/// Timestamp support pub mod time { pub use crate::api::time::new_reception_timestamp; pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; @@ -267,11 +311,13 @@ pub mod runtime { pub use zenoh_runtime::ZRuntime; } +/// Configuration to pass to [`open`](crate::session::open) and [`scout`](crate::scouting::scout) functions and associated constants pub mod config { - pub use zenoh_config::{ - client, default, peer, Config, EndPoint, Locator, ModeDependentValue, PermissionsConf, - PluginLoad, ValidatedMap, ZenohId, - }; + // pub use zenoh_config::{ + // client, default, peer, Config, EndPoint, Locator, ModeDependentValue, PermissionsConf, + // PluginLoad, ValidatedMap, ZenohId, + // }; + pub use zenoh_config::*; } #[doc(hidden)] From cbc8d7d32fa2ac51eef3aa427551a12786dc071d Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 21:00:03 +0200 Subject: [PATCH 166/598] clippy fixes --- .../src/replica/digest.rs | 14 +++++++------- .../tests/operations.rs | 2 +- .../zenoh-plugin-storage-manager/tests/wildcard.rs | 2 +- zenoh/src/api/selector.rs | 2 +- zenoh/src/lib.rs | 6 ++++-- 5 files changed, 14 insertions(+), 12 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs b/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs index 98faa24aa2..ef09481880 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs @@ -770,7 +770,7 @@ impl Digest { #[test] fn test_create_digest_empty_initial() { async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); + zenoh::internal::zasync_executor_init!(); }); let created = Digest::create_digest( Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), @@ -802,7 +802,7 @@ fn test_create_digest_empty_initial() { #[test] fn test_create_digest_with_initial_hot() { async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); + zenoh::internal::zasync_executor_init!(); }); let created = Digest::create_digest( Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), @@ -858,7 +858,7 @@ fn test_create_digest_with_initial_hot() { #[test] fn test_create_digest_with_initial_warm() { async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); + zenoh::internal::zasync_executor_init!(); }); let created = Digest::create_digest( Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), @@ -914,7 +914,7 @@ fn test_create_digest_with_initial_warm() { #[test] fn test_create_digest_with_initial_cold() { async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); + zenoh::internal::zasync_executor_init!(); }); let created = Digest::create_digest( Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), @@ -970,7 +970,7 @@ fn test_create_digest_with_initial_cold() { #[test] fn test_update_digest_add_content() { async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); + zenoh::internal::zasync_executor_init!(); }); let created = async_std::task::block_on(Digest::update_digest( Digest { @@ -1034,7 +1034,7 @@ fn test_update_digest_add_content() { #[test] fn test_update_digest_remove_content() { async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); + zenoh::internal::zasync_executor_init!(); }); let created = async_std::task::block_on(Digest::update_digest( Digest { @@ -1098,7 +1098,7 @@ fn test_update_digest_remove_content() { #[test] fn test_update_remove_digest() { async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); + zenoh::internal::zasync_executor_init!(); }); let created = Digest::create_digest( Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 1def746449..77e62b2f0d 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -22,12 +22,12 @@ use std::thread::sleep; use async_std::task; use zenoh::config::{Config, ValidatedMap}; use zenoh::core::AsyncResolve; +use zenoh::internal::zasync_executor_init; use zenoh::payload::StringOrBase64; use zenoh::query::Reply; use zenoh::sample::Sample; use zenoh::session::Session; use zenoh::time::Timestamp; -use zenoh_core::zasync_executor_init; use zenoh_plugin_trait::Plugin; async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index bf1ecf707f..71decb8fee 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -23,12 +23,12 @@ use std::thread::sleep; use async_std::task; use zenoh::config::{Config, ValidatedMap}; use zenoh::core::AsyncResolve; +use zenoh::internal::zasync_executor_init; use zenoh::payload::StringOrBase64; use zenoh::query::Reply; use zenoh::sample::Sample; use zenoh::session::Session; use zenoh::time::Timestamp; -use zenoh_core::zasync_executor_init; use zenoh_plugin_trait::Plugin; async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index c966015721..04d641725e 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -22,7 +22,7 @@ use std::{ }; use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; use zenoh_result::ZResult; -pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; +use zenoh_util::time_range::TimeRange; /// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 35f8920798..a027a9e9ff 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -201,13 +201,13 @@ pub mod sample { pub use crate::api::builders::sample::TimestampBuilderTrait; pub use crate::api::builders::sample::ValueBuilderTrait; #[zenoh_macros::unstable] - pub use crate::api::sample::Attachment; - #[zenoh_macros::unstable] pub use crate::api::sample::Locality; pub use crate::api::sample::Sample; pub use crate::api::sample::SampleKind; #[zenoh_macros::unstable] pub use crate::api::sample::SourceInfo; + #[zenoh_macros::unstable] + pub use crate::api::sample::{Attachment, AttachmentBuilder, AttachmentIterator}; } /// Value primitives @@ -235,6 +235,7 @@ pub mod selector { pub use crate::api::selector::Parameters; pub use crate::api::selector::Selector; pub use crate::api::selector::TIME_RANGE_KEY; + pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; } /// Subscribing primitives @@ -330,6 +331,7 @@ pub mod plugins { #[doc(hidden)] pub mod internal { + pub use zenoh_core::zasync_executor_init; pub use zenoh_core::zerror; pub use zenoh_core::zlock; pub use zenoh_macros::unstable; From 5fb2531c35cf84d39241c7e3407a40982eccf006 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 21:12:21 +0200 Subject: [PATCH 167/598] result in zenoh::core --- commons/zenoh-macros/src/lib.rs | 2 +- plugins/zenoh-backend-traits/src/config.rs | 2 +- plugins/zenoh-backend-traits/src/lib.rs | 2 +- .../src/backends_mgt.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/lib.rs | 2 +- .../src/memory_backend/mod.rs | 2 +- .../src/replica/storage.rs | 2 +- .../src/storages_mgt.rs | 2 +- zenoh-ext/src/group.rs | 4 ++-- zenoh-ext/src/lib.rs | 2 +- zenoh-ext/src/publication_cache.rs | 4 ++-- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh-ext/src/session_ext.rs | 2 +- zenoh-ext/src/subscriber_ext.rs | 2 +- zenoh/src/lib.rs | 14 +++++++------- zenoh/tests/routing.rs | 2 +- zenohd/src/main.rs | 2 +- 17 files changed, 25 insertions(+), 25 deletions(-) diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index b77dffeba0..655747cd86 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -152,7 +152,7 @@ fn keformat_support(source: &str) -> proc_macro2::TokenStream { let formatter_doc = format!("And instance of a formatter for `{source}`."); quote! { - use ::zenoh::Result as ZResult; + use ::zenoh::core::Result as ZResult; const FORMAT_INNER: ::zenoh::key_expr::format::KeFormat<'static, [::zenoh::key_expr::format::Segment<'static>; #len]> = unsafe { ::zenoh::key_expr::format::macro_support::const_new(#source, [#(#segments)*]) }; diff --git a/plugins/zenoh-backend-traits/src/config.rs b/plugins/zenoh-backend-traits/src/config.rs index cfbc1566c8..096255fb59 100644 --- a/plugins/zenoh-backend-traits/src/config.rs +++ b/plugins/zenoh-backend-traits/src/config.rs @@ -17,7 +17,7 @@ use schemars::JsonSchema; use serde_json::{Map, Value}; use std::convert::TryFrom; use std::time::Duration; -use zenoh::{key_expr::keyexpr, key_expr::OwnedKeyExpr, Result as ZResult}; +use zenoh::{core::Result as ZResult, key_expr::keyexpr, key_expr::OwnedKeyExpr}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_result::{bail, zerror, Error}; diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 4340c454fa..a8910d784b 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -125,10 +125,10 @@ use async_trait::async_trait; use const_format::concatcp; +use zenoh::core::Result as ZResult; use zenoh::key_expr::{keyexpr, OwnedKeyExpr}; use zenoh::time::Timestamp; use zenoh::value::Value; -use zenoh::Result as ZResult; use zenoh_plugin_trait::{PluginControl, PluginInstance, PluginStatusRec, StructVersion}; use zenoh_util::concat_enabled_features; diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index 3837d26dda..cae784cb16 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -14,8 +14,8 @@ use super::storages_mgt::*; use flume::Sender; use std::sync::Arc; +use zenoh::core::Result as ZResult; use zenoh::session::Session; -use zenoh::Result as ZResult; use zenoh_backend_traits::config::StorageConfig; use zenoh_backend_traits::{Capability, VolumeInstance}; diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 701a34f4d6..a9d610bb8e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -27,6 +27,7 @@ use std::convert::TryFrom; use std::sync::Arc; use std::sync::Mutex; use storages_mgt::StorageMessage; +use zenoh::core::Result as ZResult; use zenoh::core::SyncResolve; use zenoh::internal::zlock; use zenoh::internal::LibLoader; @@ -35,7 +36,6 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::runtime::Runtime; use zenoh::selector::Selector; use zenoh::session::Session; -use zenoh::Result as ZResult; use zenoh_backend_traits::config::ConfigDiff; use zenoh_backend_traits::config::PluginConfig; use zenoh_backend_traits::config::StorageConfig; diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index cd491ba01c..162ef8d6d0 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -15,10 +15,10 @@ use async_std::sync::RwLock; use async_trait::async_trait; use std::collections::HashMap; use std::sync::Arc; +use zenoh::core::Result as ZResult; use zenoh::key_expr::OwnedKeyExpr; use zenoh::time::Timestamp; use zenoh::value::Value; -use zenoh::Result as ZResult; use zenoh_backend_traits::config::{StorageConfig, VolumeConfig}; use zenoh_backend_traits::*; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index f5524c8eb5..8fbf84d0fb 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -37,7 +37,7 @@ use zenoh::sample::{SampleBuilder, ValueBuilderTrait}; use zenoh::session::SessionDeclarations; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; -use zenoh::{session::Session, Result as ZResult}; +use zenoh::{core::Result as ZResult, session::Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; diff --git a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs index 8ee9eb7218..15ef063656 100644 --- a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use async_std::sync::Arc; +use zenoh::core::Result as ZResult; use zenoh::session::Session; -use zenoh::Result as ZResult; use zenoh_backend_traits::config::StorageConfig; pub use super::replica::{Replica, StorageService}; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 90fefae638..32eefaf685 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -26,6 +26,8 @@ use std::time::{Duration, Instant}; use tokio::sync::Mutex; use tokio::task::JoinHandle; use zenoh::core::AsyncResolve; +use zenoh::core::Error as ZError; +use zenoh::core::Result as ZResult; use zenoh::internal::bail; use zenoh::internal::Condition; use zenoh::key_expr::keyexpr; @@ -38,8 +40,6 @@ use zenoh::query::ConsolidationMode; use zenoh::sample::QoSBuilderTrait; use zenoh::session::Session; use zenoh::session::SessionDeclarations; -use zenoh::Error as ZError; -use zenoh::Result as ZResult; const GROUP_PREFIX: &str = "zenoh/ext/net/group"; const EVENT_POSTFIX: &str = "evt"; diff --git a/zenoh-ext/src/lib.rs b/zenoh-ext/src/lib.rs index a59e057371..ea4dbbd6d4 100644 --- a/zenoh-ext/src/lib.rs +++ b/zenoh-ext/src/lib.rs @@ -25,7 +25,7 @@ pub use subscriber_ext::SubscriberBuilderExt; pub use subscriber_ext::SubscriberForward; use zenoh::internal::zerror; use zenoh::query::Reply; -use zenoh::{sample::Sample, Result as ZResult}; +use zenoh::{core::Result as ZResult, sample::Sample}; /// The space of keys to use in a [`FetchingSubscriber`]. pub enum KeySpace { diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index ac37eaeafa..3ae9ec6b5a 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -15,6 +15,7 @@ use flume::{bounded, Sender}; use std::collections::{HashMap, VecDeque}; use std::convert::TryInto; use std::future::Ready; +use zenoh::core::Error; use zenoh::core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh::internal::ResolveFuture; use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; @@ -24,8 +25,7 @@ use zenoh::sample::{Locality, Sample}; use zenoh::selector::Parameters; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::FlumeSubscriber; -use zenoh::Error; -use zenoh::{internal::bail, Result as ZResult}; +use zenoh::{core::Result as ZResult, internal::bail}; /// The builder of PublicationCache, allowing to configure it. #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 9502a3a7b2..6d0baf5d25 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -27,7 +27,7 @@ use zenoh::selector::Selector; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; -use zenoh::{Error, Result as ZResult}; +use zenoh::{core::Error, core::Result as ZResult}; use crate::ExtractSample; diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index ab178ae70f..3f23239b29 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -15,9 +15,9 @@ use super::PublicationCacheBuilder; use std::convert::TryInto; use std::sync::Arc; use zenoh::{ + core::Error, key_expr::KeyExpr, session::{Session, SessionRef}, - Error, }; /// Some extensions to the [`zenoh::Session`](zenoh::Session) diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index c758f910c2..c9004bc99b 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -14,9 +14,9 @@ use flume::r#async::RecvStream; use futures::stream::{Forward, Map}; use std::time::Duration; +use zenoh::core::Result as ZResult; use zenoh::query::ReplyKeyExpr; use zenoh::sample::Locality; -use zenoh::Result as ZResult; use zenoh::{ liveliness::LivelinessSubscriberBuilder, query::{QueryConsolidation, QueryTarget}, diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index a027a9e9ff..17e95fb3fc 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -85,11 +85,6 @@ mod net; use git_version::git_version; use zenoh_util::concat_enabled_features; -/// A zenoh error. -pub use zenoh_result::Error; -/// A zenoh result. -pub use zenoh_result::ZResult as Result; - const GIT_VERSION: &str = git_version!(prefix = "v", cargo_prefix = "v"); pub const FEATURES: &str = concat_enabled_features!( @@ -116,11 +111,18 @@ pub const FEATURES: &str = concat_enabled_features!( pub use crate::api::session::open; +// pub mod prelude; + +/// Zenoh core types pub mod core { pub use zenoh_core::AsyncResolve; pub use zenoh_core::Resolvable; pub use zenoh_core::Resolve; pub use zenoh_core::SyncResolve; + /// A zenoh error. + pub use zenoh_result::Error; + /// A zenoh result. + pub use zenoh_result::ZResult as Result; } /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate @@ -346,5 +348,3 @@ pub mod internal { pub mod shm { pub use zenoh_shm::SharedMemoryManager; } - -// pub mod prelude; diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index c0879bdb7e..f79da05483 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -17,10 +17,10 @@ use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::config::{Config, ModeDependentValue}; +use zenoh::core::Result; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::session::{Session, SessionDeclarations}; -use zenoh::Result; use zenoh_core::ztimeout; use zenoh_core::AsyncResolve; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 781fc308df..ac4dc87f45 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -17,10 +17,10 @@ use git_version::git_version; use std::collections::HashSet; use zenoh::config::EndPoint; use zenoh::config::{Config, ModeDependentValue, PermissionsConf, PluginLoad, ValidatedMap}; +use zenoh::core::Result; use zenoh::plugins::PluginsManager; use zenoh::runtime::{AdminSpace, Runtime}; use zenoh::scouting::WhatAmI; -use zenoh::Result; const GIT_VERSION: &str = git_version!(prefix = "v", cargo_prefix = "v"); From 61a4b61a79d89b736c192f72b10a046cc03f4a03 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 21:54:17 +0200 Subject: [PATCH 168/598] prelude --- commons/zenoh-config/src/lib.rs | 2 +- commons/zenoh-config/src/mode_dependent.rs | 2 +- zenoh/src/lib.rs | 11 +-- zenoh/src/prelude.rs | 92 +++++++++++----------- 4 files changed, 51 insertions(+), 56 deletions(-) diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 2b5485fa6b..4843d575b0 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -35,7 +35,7 @@ use validated_struct::ValidatedMapAssociatedTypes; pub use validated_struct::{GetError, ValidatedMap}; use zenoh_core::zlock; pub use zenoh_protocol::core::{ - whatami, EndPoint, Locator, Priority, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, + whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, }; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Bits}, diff --git a/commons/zenoh-config/src/mode_dependent.rs b/commons/zenoh-config/src/mode_dependent.rs index 91e366f452..9f6cc2c7e4 100644 --- a/commons/zenoh-config/src/mode_dependent.rs +++ b/commons/zenoh-config/src/mode_dependent.rs @@ -19,7 +19,7 @@ use serde::{ use std::fmt; use std::marker::PhantomData; pub use zenoh_protocol::core::{ - whatami, EndPoint, Locator, Priority, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, + whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, }; pub trait ModeDependent { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 17e95fb3fc..b3c88a109b 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -82,12 +82,8 @@ extern crate zenoh_result; mod api; mod net; -use git_version::git_version; -use zenoh_util::concat_enabled_features; - -const GIT_VERSION: &str = git_version!(prefix = "v", cargo_prefix = "v"); - -pub const FEATURES: &str = concat_enabled_features!( +const GIT_VERSION: &str = git_version::git_version!(prefix = "v", cargo_prefix = "v"); +pub const FEATURES: &str = zenoh_util::concat_enabled_features!( prefix = "zenoh", features = [ "auth_pubkey", @@ -111,7 +107,7 @@ pub const FEATURES: &str = concat_enabled_features!( pub use crate::api::session::open; -// pub mod prelude; +pub mod prelude; /// Zenoh core types pub mod core { @@ -188,6 +184,7 @@ pub mod session { pub use crate::api::builders::publication::SessionDeleteBuilder; pub use crate::api::builders::publication::SessionPutBuilder; #[zenoh_macros::unstable] + #[doc(hidden)] pub use crate::api::session::init; pub use crate::api::session::open; pub use crate::api::session::Session; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index d482ffae75..d9230aa3fe 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -16,62 +16,60 @@ //! //! This prelude is similar to the standard library's prelude in that you'll //! almost always want to import its entire contents, but unlike the standard -//! library's prelude you'll have to do so manually. An example of using this is: +//! library's prelude you'll have to do so manually. +//! +//! There are three variants of the prelude: full, sync and async. The sync one excludes the [`AsyncResolve`](crate::core::AsyncResolve) trait and the async one excludes the [`SyncResolve`](crate::core::SyncResolve) trait. +//! When specific sync or async prelude is included, the `res()` function of buildes works synchronously or asynchronously, respectively. +//! +//! If root prelude is included, the `res_sync()` or `res_async()` function of builders should be called explicitly. +//! +//! Examples: //! //! ``` -//! use zenoh::prelude::r#async::*; +//!`use zenoh::prelude::*; +//! ``` +//! ``` +//!`use zenoh::prelude::sync::*; +//! ``` +//! ``` +//!`use zenoh::prelude::r#async::*; //! ``` -// pub use common::*; -// pub(crate) mod common { -// pub use crate::api::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; -// pub use zenoh_buffers::{ -// buffer::{Buffer, SplitBuffer}, -// reader::HasReader, -// writer::HasWriter, -// }; -// pub use zenoh_core::Resolve; - -// pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; -// #[zenoh_macros::unstable] -// pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; - -// pub use crate::config::{self, Config}; -// pub use crate::handlers::IntoHandler; -// pub use crate::selector::{Parameter, Parameters, Selector}; -// pub use crate::session::{Session, SessionDeclarations}; - -// pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; - -// pub use crate::api::encoding::Encoding; -// pub use crate::api::value::Value; -/// The encoding of a zenoh `Value`. -// pub use crate::payload::{Deserialize, Payload, Serialize}; - -// #[zenoh_macros::unstable] -// pub use crate::api::sample::Locality; -// #[zenoh_macros::unstable] -// pub use crate::api::sample::SourceInfo; -// pub use crate::api::sample::{Sample, SampleKind}; -// pub use crate::api::publication::Priority; -// #[zenoh_macros::unstable] -// pub use crate::api::publication::PublisherDeclarations; -// pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; -// pub use crate::api::builders::sample::{ -// QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, -// }; +// All API types and traits in flat namespace +pub(crate) mod flat { + pub use crate::buffers::*; + pub use crate::config::*; + pub use crate::core::{AsyncResolve, Error, Resolvable, Resolve, Result, SyncResolve}; + pub use crate::encoding::*; + pub use crate::handlers::*; + pub use crate::key_expr::*; + pub use crate::payload::*; + pub use crate::plugins::*; + pub use crate::publication::*; + pub use crate::query::*; + pub use crate::queryable::*; + pub use crate::sample::*; + pub use crate::scouting::*; + pub use crate::selector::*; + pub use crate::session::*; + #[cfg(feature = "shared-memory")] + pub use crate::shm::*; + pub use crate::subscriber::*; + pub use crate::time::*; + pub use crate::value::*; +} -// #[zenoh_macros::unstable] -// pub use crate::api::builders::sample::SampleBuilderTrait; -// } +pub use crate::core::AsyncResolve; +pub use crate::core::SyncResolve; +pub use flat::*; /// Prelude to import when using Zenoh's sync API. pub mod sync { - // pub use super::common::*; - pub use zenoh_core::SyncResolve; + pub use super::flat::*; + pub use crate::core::SyncResolve; } /// Prelude to import when using Zenoh's async API. pub mod r#async { - // pub use super::common::*; - pub use zenoh_core::AsyncResolve; + pub use super::flat::*; + pub use crate::core::AsyncResolve; } From 5b0d82d560f5c9814c4f18a17e7bc8619b6b3220 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 22:05:28 +0200 Subject: [PATCH 169/598] examples updated --- examples/examples/z_delete.rs | 4 +--- examples/examples/z_formats.rs | 4 +--- examples/examples/z_forward.rs | 5 +---- examples/examples/z_get.rs | 6 +----- examples/examples/z_get_liveliness.rs | 2 -- examples/examples/z_info.rs | 5 +---- examples/examples/z_liveliness.rs | 2 -- examples/examples/z_ping.rs | 8 +------- examples/examples/z_pong.rs | 7 +------ examples/examples/z_pub.rs | 6 +----- examples/examples/z_pub_shm.rs | 3 --- examples/examples/z_pub_shm_thr.rs | 3 --- examples/examples/z_pub_thr.rs | 6 +----- examples/examples/z_pull.rs | 5 +---- examples/examples/z_put.rs | 4 +--- examples/examples/z_put_float.rs | 4 +--- examples/examples/z_queryable.rs | 5 +---- examples/examples/z_scout.rs | 5 +---- examples/examples/z_storage.rs | 6 +----- examples/examples/z_sub.rs | 5 +---- examples/examples/z_sub_liveliness.rs | 2 -- examples/examples/z_sub_thr.rs | 4 +--- zenoh-ext/examples/z_member.rs | 3 +-- zenoh-ext/examples/z_pub_cache.rs | 3 +-- zenoh-ext/examples/z_query_sub.rs | 5 +---- zenoh-ext/examples/z_view_size.rs | 3 +-- zenoh/src/prelude.rs | 2 +- 27 files changed, 22 insertions(+), 95 deletions(-) diff --git a/examples/examples/z_delete.rs b/examples/examples/z_delete.rs index f441c1b68d..6823083c51 100644 --- a/examples/examples/z_delete.rs +++ b/examples/examples/z_delete.rs @@ -12,9 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::KeyExpr; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_formats.rs b/examples/examples/z_formats.rs index eab5aa035a..7d26eb7775 100644 --- a/examples/examples/z_formats.rs +++ b/examples/examples/z_formats.rs @@ -12,9 +12,7 @@ // ZettaScale Zenoh Team, // -use zenoh::key_expr::kedefine; -use zenoh::key_expr::keformat; -use zenoh::key_expr::keyexpr; +use zenoh::prelude::*; kedefine!( pub file_format: "user_id/${user_id:*}/file/${file:*/**}", diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index 06d85b3931..feb00d5ea4 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -12,10 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::KeyExpr; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; use zenoh_ext::SubscriberForward; diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 77304770a4..9cdc963c0c 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -13,11 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::query::QueryTarget; -use zenoh::sample::ValueBuilderTrait; -use zenoh::selector::Selector; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 5e6fd06c84..ec53c8ad8e 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -13,8 +13,6 @@ // use clap::Parser; use std::time::Duration; -use zenoh::config::Config; -use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index bb81030b3a..a42f848e69 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -12,10 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::config::ZenohId; -use zenoh::core::AsyncResolve; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_liveliness.rs b/examples/examples/z_liveliness.rs index 2a93f50db8..c8ee8af29d 100644 --- a/examples/examples/z_liveliness.rs +++ b/examples/examples/z_liveliness.rs @@ -12,8 +12,6 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 08cd9e8817..81c4d7141e 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -13,13 +13,7 @@ // use clap::Parser; use std::time::{Duration, Instant}; -use zenoh::config::Config; -use zenoh::core::SyncResolve; -use zenoh::key_expr::keyexpr; -use zenoh::payload::Payload; -use zenoh::publication::CongestionControl; -use zenoh::sample::QoSBuilderTrait; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::sync::*; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index c3225809fa..b4ca01ef9b 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -12,12 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::core::SyncResolve; -use zenoh::key_expr::keyexpr; -use zenoh::publication::CongestionControl; -use zenoh::sample::QoSBuilderTrait; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::sync::*; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index a0d8edadaf..8b88b490a8 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -13,11 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::KeyExpr; -use zenoh::sample::SampleBuilderTrait; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index bc239ebf41..52dc52cce0 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -13,10 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::config::Config; -use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; -use zenoh::shm::SharedMemoryManager; use zenoh_examples::CommonArgs; const N: usize = 10; diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index c8a33f98fa..f488a23a8e 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -12,10 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; use zenoh::prelude::r#async::*; -use zenoh::publication::CongestionControl; -use zenoh::shm::SharedMemoryManager; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index fd50118022..8d6266469e 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -14,11 +14,7 @@ use clap::Parser; use std::convert::TryInto; -use zenoh::core::SyncResolve; -use zenoh::payload::Payload; -use zenoh::publication::{CongestionControl, Priority}; -use zenoh::sample::QoSBuilderTrait; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::sync::*; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index b405fd331b..7c6e83e792 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,10 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::core::AsyncResolve; -use zenoh::{ - config::Config, handlers::RingBuffer, key_expr::KeyExpr, session::SessionDeclarations, -}; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index a2c6ac2574..af52b7d57a 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -12,9 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::KeyExpr; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_put_float.rs b/examples/examples/z_put_float.rs index 5fce2a5935..fb35bffc27 100644 --- a/examples/examples/z_put_float.rs +++ b/examples/examples/z_put_float.rs @@ -12,9 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::KeyExpr; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 49a5b946a7..511525dffd 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -12,10 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::KeyExpr; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index b0d34061d3..b5d02f6e40 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -11,10 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::scouting::scout; -use zenoh::scouting::WhatAmI; +use zenoh::prelude::r#async::*; #[tokio::main] async fn main() { diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index ed9a2b0b89..b618c859a2 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -16,11 +16,7 @@ use clap::Parser; use futures::select; use std::collections::HashMap; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::{keyexpr, KeyExpr}; -use zenoh::sample::{Sample, SampleKind}; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index ae1e7292e0..9c09f96620 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -12,10 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::KeyExpr; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_sub_liveliness.rs b/examples/examples/z_sub_liveliness.rs index 690299dbeb..150f3e9f99 100644 --- a/examples/examples/z_sub_liveliness.rs +++ b/examples/examples/z_sub_liveliness.rs @@ -12,8 +12,6 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index d94ca4fa0f..11eac07c26 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -13,9 +13,7 @@ // use clap::Parser; use std::time::Instant; -use zenoh::config::Config; -use zenoh::core::SyncResolve; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::sync::*; use zenoh_examples::CommonArgs; struct Stats { diff --git a/zenoh-ext/examples/z_member.rs b/zenoh-ext/examples/z_member.rs index 217c0d90e3..411f773edd 100644 --- a/zenoh-ext/examples/z_member.rs +++ b/zenoh-ext/examples/z_member.rs @@ -14,8 +14,7 @@ use futures::StreamExt; use std::sync::Arc; use std::time::Duration; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; +use zenoh::prelude::r#async::*; use zenoh_ext::group::*; #[tokio::main] diff --git a/zenoh-ext/examples/z_pub_cache.rs b/zenoh-ext/examples/z_pub_cache.rs index 50b6d11c53..ab80994e20 100644 --- a/zenoh-ext/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/z_pub_cache.rs @@ -13,8 +13,7 @@ // use clap::{arg, Command}; use std::time::Duration; -use zenoh::config::{Config, ModeDependentValue}; -use zenoh::core::AsyncResolve; +use zenoh::prelude::r#async::*; use zenoh_ext::*; #[tokio::main] diff --git a/zenoh-ext/examples/z_query_sub.rs b/zenoh-ext/examples/z_query_sub.rs index c3fc363069..e4e471a5d5 100644 --- a/zenoh-ext/examples/z_query_sub.rs +++ b/zenoh-ext/examples/z_query_sub.rs @@ -13,10 +13,7 @@ // use clap::arg; use clap::Command; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::query::ReplyKeyExpr; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::r#async::*; use zenoh_ext::*; #[tokio::main] diff --git a/zenoh-ext/examples/z_view_size.rs b/zenoh-ext/examples/z_view_size.rs index 8496629646..8e5b615531 100644 --- a/zenoh-ext/examples/z_view_size.rs +++ b/zenoh-ext/examples/z_view_size.rs @@ -14,8 +14,7 @@ use clap::{arg, Command}; use std::sync::Arc; use std::time::Duration; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; +use zenoh::prelude::r#async::*; use zenoh_ext::group::*; #[tokio::main] diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index d9230aa3fe..87f67fe39c 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -39,7 +39,7 @@ pub(crate) mod flat { pub use crate::buffers::*; pub use crate::config::*; - pub use crate::core::{AsyncResolve, Error, Resolvable, Resolve, Result, SyncResolve}; + pub use crate::core::{Error, Resolvable, Resolve, Result}; pub use crate::encoding::*; pub use crate::handlers::*; pub use crate::key_expr::*; From fa9b64beb89a8d44e78fe62a34af8b6fd43d9cba Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 22:07:47 +0200 Subject: [PATCH 170/598] format sample corrected --- examples/examples/z_formats.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/examples/z_formats.rs b/examples/examples/z_formats.rs index 7d26eb7775..f2698b296b 100644 --- a/examples/examples/z_formats.rs +++ b/examples/examples/z_formats.rs @@ -12,9 +12,9 @@ // ZettaScale Zenoh Team, // -use zenoh::prelude::*; +use zenoh::prelude as zenoh; -kedefine!( +zenoh::kedefine!( pub file_format: "user_id/${user_id:*}/file/${file:*/**}", pub(crate) settings_format: "user_id/${user_id:*}/settings/${setting:**}" ); @@ -23,7 +23,7 @@ fn main() { // Formatting let mut formatter = file_format::formatter(); let file = "hi/there"; - let ke = keformat!(formatter, user_id = 42, file).unwrap(); + let ke = zenoh::keformat!(formatter, user_id = 42, file).unwrap(); println!("{formatter:?} => {ke}"); // Parsing let settings_ke = keyexpr::new("user_id/30/settings/dark_mode").unwrap(); From ae723bc9d0c15739b043fdda5202219def82e05c Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 22:18:43 +0200 Subject: [PATCH 171/598] zenoh namespace fix --- examples/examples/z_formats.rs | 6 +++--- examples/examples/z_pub_shm.rs | 2 +- zenoh/src/prelude.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/examples/z_formats.rs b/examples/examples/z_formats.rs index f2698b296b..aeadc8d55d 100644 --- a/examples/examples/z_formats.rs +++ b/examples/examples/z_formats.rs @@ -26,8 +26,8 @@ fn main() { let ke = zenoh::keformat!(formatter, user_id = 42, file).unwrap(); println!("{formatter:?} => {ke}"); // Parsing - let settings_ke = keyexpr::new("user_id/30/settings/dark_mode").unwrap(); + let settings_ke = zenoh::keyexpr::new("user_id/30/settings/dark_mode").unwrap(); let parsed = settings_format::parse(settings_ke).unwrap(); - assert_eq!(parsed.user_id(), keyexpr::new("30").unwrap()); - assert_eq!(parsed.setting(), keyexpr::new("dark_mode").ok()); + assert_eq!(parsed.user_id(), zenoh::keyexpr::new("30").unwrap()); + assert_eq!(parsed.setting(), zenoh::keyexpr::new("dark_mode").ok()); } diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 52dc52cce0..3d21d386b0 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -20,7 +20,7 @@ const N: usize = 10; const K: u32 = 3; #[tokio::main] -async fn main() -> Result<(), zenoh::Error> { +async fn main() -> Result<(), ZError> { // Initiate logging env_logger::init(); diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 87f67fe39c..1fc73d31b3 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -39,7 +39,7 @@ pub(crate) mod flat { pub use crate::buffers::*; pub use crate::config::*; - pub use crate::core::{Error, Resolvable, Resolve, Result}; + pub use crate::core::{Error as ZError, Resolvable, Resolve, Result as ZResult}; pub use crate::encoding::*; pub use crate::handlers::*; pub use crate::key_expr::*; From b089e6c5261b77c2a55dd59e3364deb0e5be6431 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 8 Apr 2024 12:45:55 +0200 Subject: [PATCH 172/598] fix for doc test - reexport in prelude both flat and mods --- plugins/zenoh-backend-traits/src/lib.rs | 2 -- zenoh/src/api/builders/publication.rs | 4 --- zenoh/src/api/session.rs | 2 -- zenoh/src/lib.rs | 2 ++ zenoh/src/prelude.rs | 35 ++++++++++++++++++++++--- 5 files changed, 33 insertions(+), 12 deletions(-) diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index a8910d784b..5db79b57bd 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -30,10 +30,8 @@ //! use std::sync::Arc; //! use async_trait::async_trait; //! use zenoh::prelude::r#async::*; -//! use zenoh::time::Timestamp; //! use zenoh_backend_traits::*; //! use zenoh_backend_traits::config::*; -//! use zenoh::Result as ZResult; //! //! #[no_mangle] //! pub fn create_volume(config: VolumeConfig) -> ZResult> { diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index eb60021dbd..b6ebb0bad2 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -57,8 +57,6 @@ pub struct PublicationBuilderDelete; /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::publication::CongestionControl; -/// use zenoh::sample::builder::{ValueBuilderTrait, QoSBuilderTrait}; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session @@ -236,8 +234,6 @@ impl AsyncResolve for PublicationBuilder, PublicationBu /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::publication::CongestionControl; -/// use zenoh::sample::builder::QoSBuilderTrait; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let publisher = session diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 136dfda1bf..8d0ae2ff66 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -480,7 +480,6 @@ impl Session { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; - /// use zenoh::Session; /// /// let session = Session::leak(zenoh::open(config::peer()).res().await.unwrap()); /// let subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); @@ -687,7 +686,6 @@ impl Session { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; - /// use zenoh::prelude::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index b3c88a109b..016888a8fd 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -105,7 +105,9 @@ pub const FEATURES: &str = zenoh_util::concat_enabled_features!( ] ); +// Reexport some functions directly to root `zenoh::`` namespace for convenience pub use crate::api::session::open; +pub use crate::scouting::scout; pub mod prelude; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 1fc73d31b3..d204aeabdc 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -26,16 +26,16 @@ //! Examples: //! //! ``` -//!`use zenoh::prelude::*; +//!use zenoh::prelude::*; //! ``` //! ``` -//!`use zenoh::prelude::sync::*; +//!use zenoh::prelude::sync::*; //! ``` //! ``` -//!`use zenoh::prelude::r#async::*; +//!use zenoh::prelude::r#async::*; //! ``` -// All API types and traits in flat namespace +// Reexport API in flat namespace pub(crate) mod flat { pub use crate::buffers::*; pub use crate::config::*; @@ -59,17 +59,44 @@ pub(crate) mod flat { pub use crate::value::*; } +// Reexport API in hierarchical namespace +pub(crate) mod mods { + pub use crate::buffers; + pub use crate::config; + pub use crate::core; + pub use crate::encoding; + pub use crate::handlers; + pub use crate::key_expr; + pub use crate::payload; + pub use crate::plugins; + pub use crate::publication; + pub use crate::query; + pub use crate::queryable; + pub use crate::sample; + pub use crate::scouting; + pub use crate::selector; + pub use crate::session; + #[cfg(feature = "shared-memory")] + pub use crate::shm; + pub use crate::subscriber; + pub use crate::time; + pub use crate::value; +} + pub use crate::core::AsyncResolve; pub use crate::core::SyncResolve; pub use flat::*; +pub use mods::*; /// Prelude to import when using Zenoh's sync API. pub mod sync { pub use super::flat::*; + pub use super::mods::*; pub use crate::core::SyncResolve; } /// Prelude to import when using Zenoh's async API. pub mod r#async { pub use super::flat::*; + pub use super::mods::*; pub use crate::core::AsyncResolve; } From 2e6a7b2ddce97981a6b3c849783596804b94f91a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 8 Apr 2024 12:55:58 +0200 Subject: [PATCH 173/598] unnecessary inports removed from doc --- zenoh/src/api/liveliness.rs | 1 - zenoh/src/api/query.rs | 1 - zenoh/src/api/queryable.rs | 1 - zenoh/src/api/scouting.rs | 9 --------- 4 files changed, 12 deletions(-) diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 78f32efcba..70b09ad738 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -584,7 +584,6 @@ where /// # async fn main() { /// # use std::convert::TryFrom; /// use zenoh::prelude::r#async::*; -/// use zenoh::query::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let tokens = session diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index b196f2bcaf..3c15b18054 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -110,7 +110,6 @@ pub(crate) struct QueryState { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::query::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let replies = session diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 3e5117356a..8cd6292e3d 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -647,7 +647,6 @@ impl Drop for CallbackQueryable<'_> { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::queryable; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index c15e9955a3..a769c34e8c 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -27,7 +27,6 @@ use zenoh_result::ZResult; /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .res() @@ -54,7 +53,6 @@ impl ScoutBuilder { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; - /// use zenoh::scouting::WhatAmI; /// /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .callback(|hello| { println!("{}", hello); }) @@ -90,7 +88,6 @@ impl ScoutBuilder { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; - /// use zenoh::scouting::WhatAmI; /// /// let mut n = 0; /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) @@ -118,7 +115,6 @@ impl ScoutBuilder { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; - /// use zenoh::scouting::WhatAmI; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .with(flume::bounded(32)) @@ -186,7 +182,6 @@ where /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; /// /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .callback(|hello| { println!("{}", hello); }) @@ -208,7 +203,6 @@ impl ScoutInner { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; - /// use zenoh::scouting::WhatAmI; /// /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .callback(|hello| { println!("{}", hello); }) @@ -237,7 +231,6 @@ impl fmt::Debug for ScoutInner { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .with(flume::bounded(32)) @@ -272,7 +265,6 @@ impl Scout { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; - /// use zenoh::scouting::WhatAmI; /// /// let scout = zenoh::scout(WhatAmI::Router, config::default()) /// .with(flume::bounded(32)) @@ -347,7 +339,6 @@ fn _scout( /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .res() From ba41a34230792ca49eba148cdb21a5b9d117c68b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 8 Apr 2024 16:42:37 +0200 Subject: [PATCH 174/598] shortened import statements in tests --- zenoh/src/lib.rs | 1 + zenoh/tests/attachments.rs | 13 ++----------- zenoh/tests/connection_retry.rs | 16 ++++++++++++++-- zenoh/tests/events.rs | 8 ++------ zenoh/tests/handler.rs | 13 +++---------- zenoh/tests/interceptors.rs | 6 ++---- zenoh/tests/liveliness.rs | 12 +++--------- zenoh/tests/matching.rs | 30 ++++++++++-------------------- zenoh/tests/qos.rs | 7 ++----- zenoh/tests/routing.rs | 10 ++-------- zenoh/tests/session.rs | 11 ++--------- zenoh/tests/unicity.rs | 11 ++--------- 12 files changed, 45 insertions(+), 93 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 016888a8fd..6c139e4b24 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -335,6 +335,7 @@ pub mod internal { pub use zenoh_core::zasync_executor_init; pub use zenoh_core::zerror; pub use zenoh_core::zlock; + pub use zenoh_core::ztimeout; pub use zenoh_macros::unstable; pub use zenoh_result::bail; pub use zenoh_sync::Condition; diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 41c8d85dd7..ef8b70f772 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,10 +14,7 @@ #[cfg(feature = "unstable")] #[test] fn pubsub() { - use zenoh::{sample::SampleBuilderTrait, session::SessionDeclarations}; - use zenoh_config::Config; - use zenoh_core::SyncResolve; - + use zenoh::prelude::sync::*; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh .declare_subscriber("test/attachment") @@ -63,13 +60,7 @@ fn pubsub() { #[cfg(feature = "unstable")] #[test] fn queries() { - use zenoh::{ - sample::{Attachment, SampleBuilderTrait, ValueBuilderTrait}, - session::SessionDeclarations, - }; - use zenoh_config::Config; - use zenoh_core::SyncResolve; - + use zenoh::prelude::sync::*; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh .declare_queryable("test/attachment") diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index f510e4f54a..d99017ff43 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -1,5 +1,17 @@ -use zenoh_config::{Config, ConnectionRetryConf, EndPoint, ValidatedMap}; -use zenoh_core::SyncResolve; +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh::prelude::sync::*; #[test] fn retry_config_overriding() { diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 3069e53e24..b659b462df 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -12,12 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::query::Reply; -use zenoh::sample::SampleKind; -use zenoh::session::{Session, SessionDeclarations}; -use zenoh_config::peer; -use zenoh_core::ztimeout; -use zenoh_core::AsyncResolve; +use zenoh::prelude::r#async::*; +use zenoh::internal::ztimeout; const TIMEOUT: Duration = Duration::from_secs(10); diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index 82030daef5..4f8be094d2 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -1,6 +1,3 @@ -use zenoh::{sample::ValueBuilderTrait, session::SessionDeclarations}; -use zenoh_config::Config; - // // Copyright (c) 2024 ZettaScale Technology // @@ -14,12 +11,11 @@ use zenoh_config::Config; // Contributors: // ZettaScale Zenoh Team, // +use std::{thread, time::Duration}; +use zenoh::prelude::sync::*; + #[test] fn pubsub_with_ringbuffer() { - use std::{thread, time::Duration}; - use zenoh::handlers::RingBuffer; - use zenoh_core::SyncResolve; - let zenoh = zenoh::open(Config::default()).res().unwrap(); let sub = zenoh .declare_subscriber("test/ringbuffer") @@ -50,9 +46,6 @@ fn pubsub_with_ringbuffer() { #[test] fn query_with_ringbuffer() { - use zenoh::handlers::RingBuffer; - use zenoh_core::SyncResolve; - let zenoh = zenoh::open(Config::default()).res().unwrap(); let queryable = zenoh .declare_queryable("test/ringbuffer_query") diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index c20dcafdb7..036dcd8e2a 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -12,10 +12,8 @@ // ZettaScale Zenoh Team, // use std::sync::{Arc, Mutex}; -use zenoh::session::SessionDeclarations; -use zenoh_config::{Config, ValidatedMap}; -use zenoh_core::zlock; -use zenoh_core::SyncResolve; +use zenoh::internal::zlock; +use zenoh::prelude::sync::*; struct IntervalCounter { first_tick: bool, diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 43dfd37281..79f0e277be 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -11,18 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::time::Duration; -use zenoh_config as config; -use zenoh_core::ztimeout; -use zenoh_core::AsyncResolve; - -const TIMEOUT: Duration = Duration::from_secs(60); -const SLEEP: Duration = Duration::from_secs(1); - #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_liveliness() { - use zenoh::{sample::SampleKind, session::SessionDeclarations}; + use {std::time::Duration, zenoh::internal::ztimeout, zenoh::prelude::*}; + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); let mut c1 = config::peer(); c1.listen diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index 341f66bba7..4e838f98a1 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -11,17 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::str::FromStr; -use std::time::Duration; -use zenoh::config::Locator; -use zenoh::session::Session; -use zenoh_config as config; -use zenoh_config::peer; -use zenoh_core::ztimeout; -use zenoh_core::AsyncResolve; -use zenoh_result::ZResult as Result; +#[cfg(feature = "unstable")] +use { + flume::RecvTimeoutError, std::str::FromStr, std::time::Duration, zenoh::internal::ztimeout, + zenoh::prelude::*, +}; +#[cfg(feature = "unstable")] const TIMEOUT: Duration = Duration::from_secs(60); +#[cfg(feature = "unstable")] const RECV_TIMEOUT: Duration = Duration::from_secs(1); #[cfg(feature = "unstable")] @@ -44,10 +42,7 @@ async fn create_session_pair(locator: &str) -> (Session, Session) { #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn zenoh_matching_status_any() -> Result<()> { - use flume::RecvTimeoutError; - use zenoh::{sample::Locality, session::SessionDeclarations}; - +async fn zenoh_matching_status_any() -> ZResult<()> { let (session1, session2) = create_session_pair("tcp/127.0.0.1:18001").await; let publisher1 = ztimeout!(session1 @@ -106,9 +101,7 @@ async fn zenoh_matching_status_any() -> Result<()> { #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn zenoh_matching_status_remote() -> Result<()> { - use flume::RecvTimeoutError; - use zenoh::{sample::Locality, session::SessionDeclarations}; +async fn zenoh_matching_status_remote() -> ZResult<()> { let session1 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); let session2 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); @@ -170,10 +163,7 @@ async fn zenoh_matching_status_remote() -> Result<()> { #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn zenoh_matching_status_local() -> Result<()> { - use flume::RecvTimeoutError; - use zenoh::{sample::Locality, session::SessionDeclarations}; - +async fn zenoh_matching_status_local() -> ZResult<()> { let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 5e3f507006..3c0cfb0b37 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -12,11 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::publication::CongestionControl; -use zenoh::sample::QoSBuilderTrait; -use zenoh::{publication::Priority, session::SessionDeclarations}; -use zenoh_core::ztimeout; -use zenoh_core::AsyncResolve; +use zenoh::internal::ztimeout; +use zenoh::prelude::r#async::*; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index f79da05483..07af6a8840 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -16,15 +16,9 @@ use std::sync::atomic::Ordering; use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; -use zenoh::config::{Config, ModeDependentValue}; +use zenoh::internal::{bail, ztimeout}; +use zenoh::prelude::r#async::*; use zenoh::core::Result; -use zenoh::publication::CongestionControl; -use zenoh::sample::QoSBuilderTrait; -use zenoh::session::{Session, SessionDeclarations}; -use zenoh_core::ztimeout; -use zenoh_core::AsyncResolve; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; -use zenoh_result::bail; const TIMEOUT: Duration = Duration::from_secs(10); const MSG_COUNT: usize = 50; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index b325e7601b..724d48eef1 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -14,15 +14,8 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; -use zenoh::key_expr::KeyExpr; -use zenoh::publication::CongestionControl; -use zenoh::sample::{QoSBuilderTrait, SampleKind}; -use zenoh::session::{Session, SessionDeclarations}; -use zenoh::subscriber::Reliability; -use zenoh::value::Value; -use zenoh_config as config; -use zenoh_core::ztimeout; -use zenoh_core::AsyncResolve; +use zenoh::internal::ztimeout; +use zenoh::prelude::r#async::*; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 8ad80b7315..d54186050c 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -11,19 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use config::WhatAmI; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use tokio::runtime::Handle; -use zenoh::config::EndPoint; -use zenoh::key_expr::KeyExpr; -use zenoh::publication::CongestionControl; -use zenoh::sample::QoSBuilderTrait; -use zenoh::session::{Session, SessionDeclarations}; -use zenoh_config as config; -use zenoh_core::ztimeout; -use zenoh_core::AsyncResolve; +use zenoh::internal::ztimeout; +use zenoh::prelude::r#async::*; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); From 182eb3a7d9a0422f6d67a1bf0696468d1a486b9a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 8 Apr 2024 17:54:12 +0200 Subject: [PATCH 175/598] QoS type leak fixed --- zenoh/src/api/builders/sample.rs | 2 +- zenoh/src/api/sample.rs | 23 ++++++++++++++++------- zenoh/src/lib.rs | 4 ++-- zenoh/src/prelude.rs | 2 -- zenoh/tests/qos.rs | 10 ++++------ 5 files changed, 23 insertions(+), 18 deletions(-) diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 2af1a0a71c..ee0f716800 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -145,7 +145,7 @@ impl SampleBuilder { } // Allows to change qos as a whole of [`Sample`] - pub fn qos(self, qos: QoS) -> Self { + pub(crate) fn qos(self, qos: QoS) -> Self { Self { sample: Sample { qos, ..self.sample }, _t: PhantomData::, diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 4321ac6d6c..f2ff96fb04 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -546,16 +546,25 @@ impl Sample { &self.encoding } - /// Gets the timestamp of this Sample. + /// Gets the timestamp of this Sample #[inline] pub fn timestamp(&self) -> Option<&Timestamp> { self.timestamp.as_ref() } - /// Gets the quality of service settings this Sample was sent with. - #[inline] - pub fn qos(&self) -> &QoS { - &self.qos + /// Gets the congetion control of this Sample + pub fn congestion_control(&self) -> CongestionControl { + self.qos.congestion_control() + } + + /// Gets the priority of this Sample + pub fn priority(&self) -> Priority { + self.qos.priority() + } + + /// Gets the express flag value. If `true`, the message is not batched during transmission, in order to reduce latency. + pub fn express(&self) -> bool { + self.qos.express() } /// Gets infos on the source of this Sample. @@ -581,12 +590,12 @@ impl From for Value { /// Structure containing quality of service data #[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] -pub struct QoS { +pub(crate) struct QoS { inner: QoSType, } #[derive(Debug)] -pub struct QoSBuilder(QoS); +pub(crate) struct QoSBuilder(QoS); impl From for QoSBuilder { fn from(qos: QoS) -> Self { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 6c139e4b24..f071360567 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -105,9 +105,9 @@ pub const FEATURES: &str = zenoh_util::concat_enabled_features!( ] ); -// Reexport some functions directly to root `zenoh::`` namespace for convenience +// Expose some functions directly to root `zenoh::`` namespace for convenience pub use crate::api::session::open; -pub use crate::scouting::scout; +pub use crate::api::scouting::scout; pub mod prelude; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index d204aeabdc..e89542122d 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -44,7 +44,6 @@ pub(crate) mod flat { pub use crate::handlers::*; pub use crate::key_expr::*; pub use crate::payload::*; - pub use crate::plugins::*; pub use crate::publication::*; pub use crate::query::*; pub use crate::queryable::*; @@ -68,7 +67,6 @@ pub(crate) mod mods { pub use crate::handlers; pub use crate::key_expr; pub use crate::payload; - pub use crate::plugins; pub use crate::publication; pub use crate::query; pub use crate::queryable; diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 3c0cfb0b37..b70d01ec79 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -42,15 +42,13 @@ async fn pubsub() { ztimeout!(publisher1.put("qos").res_async()).unwrap(); let sample = ztimeout!(subscriber.recv_async()).unwrap(); - let qos = sample.qos(); - assert_eq!(qos.priority(), Priority::DataHigh); - assert_eq!(qos.congestion_control(), CongestionControl::Drop); + assert_eq!(sample.priority(), Priority::DataHigh); + assert_eq!(sample.congestion_control(), CongestionControl::Drop); ztimeout!(publisher2.put("qos").res_async()).unwrap(); let sample = ztimeout!(subscriber.recv_async()).unwrap(); - let qos = sample.qos(); - assert_eq!(qos.priority(), Priority::DataLow); - assert_eq!(qos.congestion_control(), CongestionControl::Block); + assert_eq!(sample.priority(), Priority::DataLow); + assert_eq!(sample.congestion_control(), CongestionControl::Block); } From 5aedd2c09219073895ab1fcd105f2fb2b05a5d86 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 8 Apr 2024 18:01:43 +0200 Subject: [PATCH 176/598] Payload tuple generic impl --- Cargo.lock | 7 + Cargo.toml | 1 + zenoh/Cargo.toml | 1 + zenoh/src/payload.rs | 1143 ++++++++++++++++++++++++++++++++++++--- zenoh/src/queryable.rs | 1 + zenoh/src/session.rs | 13 +- zenoh/src/subscriber.rs | 3 - 7 files changed, 1093 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d3ea8978b5..3f74af9ed1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3954,6 +3954,12 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +[[package]] +name = "unwrap-infallible" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "151ac09978d3c2862c4e39b557f4eceee2cc72150bc4cb4f16abf061b6e381fb" + [[package]] name = "unzip-n" version = "0.1.2" @@ -4479,6 +4485,7 @@ dependencies = [ "tokio", "tokio-util", "uhlc", + "unwrap-infallible", "uuid", "vec_map", "zenoh-buffers", diff --git a/Cargo.toml b/Cargo.toml index 9210c96b70..d02f84eca8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -159,6 +159,7 @@ tokio-rustls = "0.25.0" console-subscriber = "0.2" typenum = "1.16.0" uhlc = { version = "0.7.0", default-features = false } # Default features are disabled due to usage in no_std crates +unwrap-infallible = "0.1.5" unzip-n = "0.1.2" url = "2.3.1" urlencoding = "2.1.2" diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index d20a4b914e..80cf8ba1bc 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -90,6 +90,7 @@ serde_yaml = { workspace = true } socket2 = { workspace = true } stop-token = { workspace = true } uhlc = { workspace = true, features = ["default"] } +unwrap-infallible = { workspace = true } uuid = { workspace = true, features = ["default"] } vec_map = { workspace = true } zenoh-buffers = { workspace = true, features = ["std"] } diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index ed2a58145c..db3126d93d 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -14,17 +14,38 @@ //! Payload primitives. use crate::buffers::ZBuf; +use std::marker::PhantomData; use std::{ borrow::Cow, convert::Infallible, fmt::Debug, ops::Deref, string::FromUtf8Error, sync::Arc, }; -use zenoh_buffers::buffer::Buffer; +use unwrap_infallible::UnwrapInfallible; use zenoh_buffers::{ - buffer::SplitBuffer, reader::HasReader, writer::HasWriter, ZBufReader, ZSlice, + buffer::{Buffer, SplitBuffer}, + reader::{HasReader, Reader}, + writer::HasWriter, + ZBufReader, ZSlice, }; -use zenoh_result::ZResult; +use zenoh_codec::{RCodec, WCodec, Zenoh080}; +use zenoh_result::{ZError, ZResult}; #[cfg(feature = "shared-memory")] use zenoh_shm::SharedMemoryBuf; +/// Trait to encode a type `T` into a [`Value`]. +pub trait Serialize { + type Output; + + /// The implementer should take care of serializing the type `T` and set the proper [`Encoding`]. + fn serialize(self, t: T) -> Self::Output; +} + +pub trait Deserialize<'a, T> { + type Error; + + /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. + fn deserialize(self, t: &'a Payload) -> Result; +} + +/// A payload contains the serialized bytes of user data. #[repr(transparent)] #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct Payload(ZBuf); @@ -57,14 +78,17 @@ impl Payload { pub fn reader(&self) -> PayloadReader<'_> { PayloadReader(self.0.reader()) } -} - -/// A reader that implements [`std::io::Read`] trait to read from a [`Payload`]. -pub struct PayloadReader<'a>(ZBufReader<'a>); -impl std::io::Read for PayloadReader<'_> { - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { - self.0.read(buf) + /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. + pub fn iter(&self) -> PayloadIterator<'_, T> + where + T: TryFrom, + ZSerde: for<'b> Deserialize<'b, T, Error = ZDeserializeError>, + { + PayloadIterator { + reader: self.0.reader(), + _t: PhantomData::, + } } } @@ -99,19 +123,45 @@ impl Payload { } } -/// Trait to encode a type `T` into a [`Value`]. -pub trait Serialize { - type Output; +/// A reader that implements [`std::io::Read`] trait to read from a [`Payload`]. +pub struct PayloadReader<'a>(ZBufReader<'a>); - /// The implementer should take care of serializing the type `T` and set the proper [`Encoding`]. - fn serialize(self, t: T) -> Self::Output; +impl std::io::Read for PayloadReader<'_> { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + std::io::Read::read(&mut self.0, buf) + } } -pub trait Deserialize<'a, T> { - type Error; +/// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`Payload`]. +/// Note that [`Payload`] contains a serialized version of `T` and iterating over a [`Payload`] performs lazy deserialization. +pub struct PayloadIterator<'a, T> +where + ZSerde: Deserialize<'a, T>, +{ + reader: ZBufReader<'a>, + _t: PhantomData, +} - /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. - fn deserialize(self, t: &'a Payload) -> Result; +impl<'a, T> Iterator for PayloadIterator<'a, T> +where + ZSerde: for<'b> Deserialize<'b, T, Error = ZDeserializeError>, +{ + type Item = T; + + fn next(&mut self) -> Option { + let codec = Zenoh080::new(); + + let kbuf: ZBuf = codec.read(&mut self.reader).ok()?; + let kpld = Payload::new(kbuf); + + let t = ZSerde.deserialize(&kpld).ok()?; + Some(t) + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.reader.remaining(); + (remaining, Some(remaining)) + } } /// The default serializer for Zenoh payload. It supports primitives types, such as: vec, int, uint, float, string, bool. @@ -122,7 +172,7 @@ pub struct ZSerde; #[derive(Debug, Clone, Copy)] pub struct ZDeserializeError; -// Bytes +// ZBuf impl Serialize for ZSerde { type Output = Payload; @@ -131,9 +181,9 @@ impl Serialize for ZSerde { } } -impl From for ZBuf { - fn from(value: Payload) -> Self { - value.0 +impl From for Payload { + fn from(t: ZBuf) -> Self { + ZSerde.serialize(t) } } @@ -141,16 +191,23 @@ impl Deserialize<'_, ZBuf> for ZSerde { type Error = Infallible; fn deserialize(self, v: &Payload) -> Result { - Ok(v.into()) + Ok(v.0.clone()) + } +} + +impl From for ZBuf { + fn from(value: Payload) -> Self { + value.0 } } impl From<&Payload> for ZBuf { fn from(value: &Payload) -> Self { - value.0.clone() + ZSerde.deserialize(value).unwrap_infallible() } } +// Vec impl Serialize> for ZSerde { type Output = Payload; @@ -159,11 +216,9 @@ impl Serialize> for ZSerde { } } -impl Serialize<&[u8]> for ZSerde { - type Output = Payload; - - fn serialize(self, t: &[u8]) -> Self::Output { - Payload::new(t.to_vec()) +impl From> for Payload { + fn from(t: Vec) -> Self { + ZSerde.serialize(t) } } @@ -171,16 +226,38 @@ impl Deserialize<'_, Vec> for ZSerde { type Error = Infallible; fn deserialize(self, v: &Payload) -> Result, Self::Error> { - Ok(Vec::from(v)) + Ok(v.0.contiguous().to_vec()) + } +} + +impl From for Vec { + fn from(value: Payload) -> Self { + ZSerde.deserialize(&value).unwrap_infallible() } } impl From<&Payload> for Vec { fn from(value: &Payload) -> Self { - Cow::from(value).to_vec() + ZSerde.deserialize(value).unwrap_infallible() } } +// &[u8] +impl Serialize<&[u8]> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &[u8]) -> Self::Output { + Payload::new(t.to_vec()) + } +} + +impl From<&[u8]> for Payload { + fn from(t: &[u8]) -> Self { + ZSerde.serialize(t) + } +} + +// Cow<[u8]> impl<'a> Serialize> for ZSerde { type Output = Payload; @@ -189,6 +266,12 @@ impl<'a> Serialize> for ZSerde { } } +impl From> for Payload { + fn from(t: Cow<'_, [u8]>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { type Error = Infallible; @@ -199,7 +282,7 @@ impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { impl<'a> From<&'a Payload> for Cow<'a, [u8]> { fn from(value: &'a Payload) -> Self { - value.0.contiguous() + ZSerde.deserialize(value).unwrap_infallible() } } @@ -212,11 +295,9 @@ impl Serialize for ZSerde { } } -impl Serialize<&str> for ZSerde { - type Output = Payload; - - fn serialize(self, s: &str) -> Self::Output { - Self.serialize(s.to_string()) +impl From for Payload { + fn from(t: String) -> Self { + ZSerde.serialize(t) } } @@ -224,7 +305,16 @@ impl Deserialize<'_, String> for ZSerde { type Error = FromUtf8Error; fn deserialize(self, v: &Payload) -> Result { - String::from_utf8(Vec::from(v)) + let v: Vec = ZSerde.deserialize(v).unwrap_infallible(); + String::from_utf8(v) + } +} + +impl TryFrom for String { + type Error = FromUtf8Error; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) } } @@ -236,14 +326,22 @@ impl TryFrom<&Payload> for String { } } -impl TryFrom for String { - type Error = FromUtf8Error; +// &str +impl Serialize<&str> for ZSerde { + type Output = Payload; - fn try_from(value: Payload) -> Result { - ZSerde.deserialize(&value) + fn serialize(self, s: &str) -> Self::Output { + Self.serialize(s.to_string()) } } +impl From<&str> for Payload { + fn from(t: &str) -> Self { + ZSerde.serialize(t) + } +} + +// Cow impl<'a> Serialize> for ZSerde { type Output = Payload; @@ -252,6 +350,12 @@ impl<'a> Serialize> for ZSerde { } } +impl From> for Payload { + fn from(t: Cow<'_, str>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { type Error = FromUtf8Error; @@ -277,7 +381,11 @@ macro_rules! impl_int { fn serialize(self, t: $t) -> Self::Output { let bs = t.to_le_bytes(); - let end = 1 + bs.iter().rposition(|b| *b != 0).unwrap_or(bs.len() - 1); + let end = if t == 0 as $t { + 0 + } else { + 1 + bs.iter().rposition(|b| *b != 0).unwrap_or(bs.len() - 1) + }; // SAFETY: // - 0 is a valid start index because bs is guaranteed to always have a length greater or equal than 1 // - end is a valid end index because is bounded between 0 and bs.len() @@ -285,6 +393,12 @@ macro_rules! impl_int { } } + impl From<$t> for Payload { + fn from(t: $t) -> Self { + ZSerde.serialize(t) + } + } + impl Serialize<&$t> for ZSerde { type Output = Payload; @@ -293,11 +407,23 @@ macro_rules! impl_int { } } + impl From<&$t> for Payload { + fn from(t: &$t) -> Self { + ZSerde.serialize(t) + } + } + impl Serialize<&mut $t> for ZSerde { type Output = Payload; fn serialize(self, t: &mut $t) -> Self::Output { - Self.serialize(*t) + ZSerde.serialize(*t) + } + } + + impl From<&mut $t> for Payload { + fn from(t: &mut $t) -> Self { + ZSerde.serialize(t) } } @@ -319,6 +445,14 @@ macro_rules! impl_int { } } + impl TryFrom for $t { + type Error = ZDeserializeError; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } + } + impl TryFrom<&Payload> for $t { type Error = ZDeserializeError; @@ -349,12 +483,18 @@ impl_int!(f64, ZSerde::ZENOH_FLOAT); // Zenoh bool impl Serialize for ZSerde { - type Output = ZBuf; + type Output = Payload; fn serialize(self, t: bool) -> Self::Output { // SAFETY: casting a bool into an integer is well-defined behaviour. // 0 is false, 1 is true: https://doc.rust-lang.org/std/primitive.bool.html - ZBuf::from((t as u8).to_le_bytes()) + Payload::new(ZBuf::from((t as u8).to_le_bytes())) + } +} + +impl From for Payload { + fn from(t: bool) -> Self { + ZSerde.serialize(t) } } @@ -391,6 +531,14 @@ impl Serialize<&serde_json::Value> for ZSerde { } } +impl TryFrom<&serde_json::Value> for Payload { + type Error = serde_json::Error; + + fn try_from(value: &serde_json::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Serialize for ZSerde { type Output = Result; @@ -399,6 +547,14 @@ impl Serialize for ZSerde { } } +impl TryFrom for Payload { + type Error = serde_json::Error; + + fn try_from(value: serde_json::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Deserialize<'_, serde_json::Value> for ZSerde { type Error = serde_json::Error; @@ -407,11 +563,11 @@ impl Deserialize<'_, serde_json::Value> for ZSerde { } } -impl TryFrom for Payload { +impl TryFrom<&Payload> for serde_json::Value { type Error = serde_json::Error; - fn try_from(value: serde_json::Value) -> Result { - ZSerde.serialize(value) + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) } } @@ -426,6 +582,14 @@ impl Serialize<&serde_yaml::Value> for ZSerde { } } +impl TryFrom<&serde_yaml::Value> for Payload { + type Error = serde_yaml::Error; + + fn try_from(value: &serde_yaml::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Serialize for ZSerde { type Output = Result; @@ -434,6 +598,14 @@ impl Serialize for ZSerde { } } +impl TryFrom for Payload { + type Error = serde_yaml::Error; + + fn try_from(value: serde_yaml::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Deserialize<'_, serde_yaml::Value> for ZSerde { type Error = serde_yaml::Error; @@ -442,11 +614,11 @@ impl Deserialize<'_, serde_yaml::Value> for ZSerde { } } -impl TryFrom for Payload { +impl TryFrom<&Payload> for serde_yaml::Value { type Error = serde_yaml::Error; - fn try_from(value: serde_yaml::Value) -> Result { - ZSerde.serialize(value) + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) } } @@ -461,6 +633,14 @@ impl Serialize<&serde_cbor::Value> for ZSerde { } } +impl TryFrom<&serde_cbor::Value> for Payload { + type Error = serde_cbor::Error; + + fn try_from(value: &serde_cbor::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Serialize for ZSerde { type Output = Result; @@ -469,6 +649,14 @@ impl Serialize for ZSerde { } } +impl TryFrom for Payload { + type Error = serde_cbor::Error; + + fn try_from(value: serde_cbor::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Deserialize<'_, serde_cbor::Value> for ZSerde { type Error = serde_cbor::Error; @@ -477,11 +665,11 @@ impl Deserialize<'_, serde_cbor::Value> for ZSerde { } } -impl TryFrom for Payload { +impl TryFrom<&Payload> for serde_cbor::Value { type Error = serde_cbor::Error; - fn try_from(value: serde_cbor::Value) -> Result { - ZSerde.serialize(value) + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) } } @@ -500,6 +688,14 @@ impl Serialize<&serde_pickle::Value> for ZSerde { } } +impl TryFrom<&serde_pickle::Value> for Payload { + type Error = serde_pickle::Error; + + fn try_from(value: &serde_pickle::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Serialize for ZSerde { type Output = Result; @@ -508,6 +704,14 @@ impl Serialize for ZSerde { } } +impl TryFrom for Payload { + type Error = serde_pickle::Error; + + fn try_from(value: serde_pickle::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Deserialize<'_, serde_pickle::Value> for ZSerde { type Error = serde_pickle::Error; @@ -516,11 +720,11 @@ impl Deserialize<'_, serde_pickle::Value> for ZSerde { } } -impl TryFrom for Payload { +impl TryFrom<&Payload> for serde_pickle::Value { type Error = serde_pickle::Error; - fn try_from(value: serde_pickle::Value) -> Result { - ZSerde.serialize(value) + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) } } @@ -553,15 +757,86 @@ impl Serialize for ZSerde { } } -impl From for Payload +// Tuple +impl Serialize<(A, B)> for ZSerde where - ZSerde: Serialize, + A: Into, + B: Into, { - fn from(t: T) -> Self { - ZSerde.serialize(t) + type Output = Payload; + + fn serialize(self, t: (A, B)) -> Self::Output { + let (a, b) = t; + + let codec = Zenoh080::new(); + let mut buffer: ZBuf = ZBuf::empty(); + let mut writer = buffer.writer(); + let apld: Payload = a.into(); + let bpld: Payload = b.into(); + + // SAFETY: we are serializing slices on a ZBuf, so serialization will never + // fail unless we run out of memory. In that case, Rust memory allocator + // will panic before the serializer has any chance to fail. + unsafe { + codec.write(&mut writer, &apld.0).unwrap_unchecked(); + codec.write(&mut writer, &bpld.0).unwrap_unchecked(); + } + + Payload::new(buffer) + } +} + +impl<'a, A, B> Deserialize<'a, (A, B)> for ZSerde +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, +{ + type Error = ZError; + + fn deserialize(self, payload: &'a Payload) -> Result<(A, B), Self::Error> { + let codec = Zenoh080::new(); + let mut reader = payload.0.reader(); + + let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; + let apld = Payload::new(abuf); + + let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; + let bpld = Payload::new(bbuf); + + let a = A::try_from(apld).map_err(|e| zerror!("{:?}", e))?; + let b = B::try_from(bpld).map_err(|e| zerror!("{:?}", e))?; + Ok((a, b)) } } +// Iterator +// impl Serialize for ZSerde +// where +// I: Iterator, +// T: Into, +// { +// type Output = Payload; + +// fn serialize(self, iter: I) -> Self::Output { +// let codec = Zenoh080::new(); +// let mut buffer: ZBuf = ZBuf::empty(); +// let mut writer = buffer.writer(); +// for t in iter { +// let tpld: Payload = t.into(); +// // SAFETY: we are serializing slices on a ZBuf, so serialization will never +// // fail unless we run out of memory. In that case, Rust memory allocator +// // will panic before the serializer has any chance to fail. +// unsafe { +// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); +// } +// } + +// Payload::new(buffer) +// } +// } + // For convenience to always convert a Value the examples #[derive(Debug, Clone, PartialEq, Eq)] pub enum StringOrBase64 { @@ -619,14 +894,18 @@ mod tests { ($t:ty, $in:expr) => { let i = $in; let t = i.clone(); + println!("Serialize:\t{:?}", t); let v = Payload::serialize(t); + println!("Deserialize:\t{:?}", v); let o: $t = v.deserialize().unwrap(); - assert_eq!(i, o) + assert_eq!(i, o); + println!(""); }; } let mut rng = rand::thread_rng(); + // unsigned integer serialize_deserialize!(u8, u8::MIN); serialize_deserialize!(u16, u16::MIN); serialize_deserialize!(u32, u32::MIN); @@ -647,6 +926,7 @@ mod tests { serialize_deserialize!(usize, rng.gen::()); } + // signed integer serialize_deserialize!(i8, i8::MIN); serialize_deserialize!(i16, i16::MIN); serialize_deserialize!(i32, i32::MIN); @@ -667,6 +947,7 @@ mod tests { serialize_deserialize!(isize, rng.gen::()); } + // float serialize_deserialize!(f32, f32::MIN); serialize_deserialize!(f64, f64::MIN); @@ -678,13 +959,747 @@ mod tests { serialize_deserialize!(f64, rng.gen::()); } + // String serialize_deserialize!(String, ""); serialize_deserialize!(String, String::from("abcdefghijklmnopqrstuvwxyz")); + // Vec serialize_deserialize!(Vec, vec![0u8; 0]); serialize_deserialize!(Vec, vec![0u8; 64]); + // ZBuf serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); + + // Tuple + serialize_deserialize!((usize, usize), (0, 1)); + serialize_deserialize!((usize, String), (0, String::from("a"))); + serialize_deserialize!((String, String), (String::from("a"), String::from("b"))); + + // Iterator + // let mut hm = Vec::new(); + // hm.push(0); + // hm.push(1); + // Payload::serialize(hm.iter()); + + // let mut hm = HashMap::new(); + // hm.insert(0, 0); + // hm.insert(1, 1); + // Payload::serialize(hm.iter().map(|(k, v)| (k, v))); + // for (k, v) in sample.payload().iter::<(String, serde_json::Value)>() {} } } + +// macro_rules! impl_iterator_inner { +// ($iter:expr) => {{ +// let codec = Zenoh080::new(); +// let mut buffer: ZBuf = ZBuf::empty(); +// let mut writer = buffer.writer(); +// for t in $iter { +// let tpld = ZSerde.serialize(t); +// // SAFETY: we are serializing slices on a ZBuf, so serialization will never +// // fail unless we run out of memory. In that case, Rust memory allocator +// // will panic before the serializer has any chance to fail. +// unsafe { +// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); +// } +// } + +// Payload::new(buffer) +// }}; +// } + +// impl<'a> Serialize> for ZSerde { +// type Output = Payload; + +// fn serialize(self, iter: std::slice::Iter<'_, i32>) -> Self::Output { +// impl_iterator_inner!(iter) +// } +// } + +// impl<'a> Serialize> for ZSerde { +// type Output = Payload; + +// fn serialize(self, iter: std::slice::IterMut<'_, i32>) -> Self::Output { +// impl_iterator_inner!(iter) +// } +// } + +// impl Serialize<&mut dyn Iterator> for ZSerde { +// type Output = Payload; + +// fn serialize(self, iter: &mut dyn Iterator) -> Self::Output { +// let codec = Zenoh080::new(); +// let mut buffer: ZBuf = ZBuf::empty(); +// let mut writer = buffer.writer(); +// for t in iter { +// let tpld = ZSerde.serialize(t); +// // SAFETY: we are serializing slices on a ZBuf, so serialization will never +// // fail unless we run out of memory. In that case, Rust memory allocator +// // will panic before the serializer has any chance to fail. +// unsafe { +// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); +// } +// } + +// Payload::new(buffer) +// } +// } + +// impl Serialize<(A, B)> for ZSerde +// where +// ZSerde: Serialize, +// ZSerde: Serialize, +// { +// type Output = Payload; + +// fn serialize(self, t: (A, B)) -> Self::Output { +// let (a, b) = t; + +// let codec = Zenoh080::new(); +// let mut buffer: ZBuf = ZBuf::empty(); +// let mut writer = buffer.writer(); +// let apld = Payload::serialize::(a); +// let bpld = Payload::serialize::(b); + +// // SAFETY: we are serializing slices on a ZBuf, so serialization will never +// // fail unless we run out of memory. In that case, Rust memory allocator +// // will panic before the serializer has any chance to fail. +// unsafe { +// codec.write(&mut writer, &apld.0).unwrap_unchecked(); +// codec.write(&mut writer, &bpld.0).unwrap_unchecked(); +// } + +// Payload::new(buffer) +// } +// } + +// impl<'a, A, B> Deserialize<'a, (A, B)> for ZSerde +// where +// A: TryFrom, +// ZSerde: Deserialize<'a, A>, +// >::Error: Debug, +// B: TryFrom, +// ZSerde: Deserialize<'a, B>, +// >::Error: Debug, +// { +// type Error = ZError; + +// fn deserialize(self, payload: &'a Payload) -> Result<(A, B), Self::Error> { +// let codec = Zenoh080::new(); +// let mut reader = payload.0.reader(); + +// let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; +// let apld = Payload::new(abuf); + +// let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; +// let bpld = Payload::new(bbuf); + +// let a = A::try_from(apld).map_err(|e| zerror!("{:?}", e))?; +// let b = B::try_from(bpld).map_err(|e| zerror!("{:?}", e))?; +// Ok((a, b)) +// } +// } + +// impl Serialize<&mut dyn Iterator> for ZSerde +// where +// ZSerde: Serialize, +// { +// type Output = Payload; + +// fn serialize(self, iter: &mut dyn Iterator) -> Self::Output { +// let codec = Zenoh080::new(); +// let mut buffer: ZBuf = ZBuf::empty(); +// let mut writer = buffer.writer(); +// for t in iter { +// let tpld = ZSerde.serialize(t); +// // SAFETY: we are serializing slices on a ZBuf, so serialization will never +// // fail unless we run out of memory. In that case, Rust memory allocator +// // will panic before the serializer has any chance to fail. +// unsafe { +// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); +// } +// } + +// Payload::new(buffer) +// } +// } + +// Iterator +// macro_rules! impl_iterator_serialize { +// ($a:ty) => { +// impl Serialize<&mut dyn Iterator> for ZSerde +// { +// type Output = Payload; + +// fn serialize(self, iter: &mut dyn Iterator) -> Self::Output { +// let codec = Zenoh080::new(); +// let mut buffer: ZBuf = ZBuf::empty(); +// let mut writer = buffer.writer(); +// for t in iter { +// let tpld = ZSerde.serialize(t); +// // SAFETY: we are serializing slices on a ZBuf, so serialization will never +// // fail unless we run out of memory. In that case, Rust memory allocator +// // will panic before the serializer has any chance to fail. +// unsafe { +// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); +// } +// } + +// Payload::new(buffer) +// } +// } +// }; +// } + +// Tuples +// macro_rules! impl_tuple_serialize { +// ($a:ty, $b:ty) => { +// impl Serialize<($a, $b)> for ZSerde +// { +// type Output = Payload; + +// fn serialize(self, t: ($a, $b)) -> Self::Output { +// let (a, b) = t; + +// let codec = Zenoh080::new(); +// let mut buffer: ZBuf = ZBuf::empty(); +// let mut writer = buffer.writer(); +// let apld = Payload::serialize::<$a>(a); +// let bpld = Payload::serialize::<$b>(b); + +// // SAFETY: we are serializing slices on a ZBuf, so serialization will never +// // fail unless we run out of memory. In that case, Rust memory allocator +// // will panic before the serializer has any chance to fail. +// unsafe { +// codec.write(&mut writer, &apld.0).unwrap_unchecked(); +// codec.write(&mut writer, &bpld.0).unwrap_unchecked(); +// } + +// Payload::new(buffer) +// } +// } +// } + +// } + +// macro_rules! impl_tuple_deserialize { +// ($a:ty, $b:ty) => { +// impl<'a> Deserialize<'a, ($a, $b)> for ZSerde { +// type Error = ZError; + +// fn deserialize(self, payload: &'a Payload) -> Result<($a, $b), Self::Error> { +// let codec = Zenoh080::new(); +// let mut reader = payload.0.reader(); + +// let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; +// let apld = Payload::new(abuf); + +// let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; +// let bpld = Payload::new(bbuf); + +// let a = apld.deserialize::<$a>().map_err(|e| zerror!("{:?}", e))?; +// let b = bpld.deserialize::<$b>().map_err(|e| zerror!("{:?}", e))?; +// Ok((a, b)) +// } +// } +// }; +// } + +// impl_tuple_serialize!(u8, u8); +// impl_tuple_deserialize!(u8, u8); +// impl_tuple_serialize!(u8, u16); +// impl_tuple_deserialize!(u8, u16); +// impl_tuple_serialize!(u8, u32); +// impl_tuple_deserialize!(u8, u32); +// impl_tuple_serialize!(u8, u64); +// impl_tuple_deserialize!(u8, u64); +// impl_tuple_serialize!(u8, usize); +// impl_tuple_deserialize!(u8, usize); +// impl_tuple_serialize!(u8, i8); +// impl_tuple_deserialize!(u8, i8); +// impl_tuple_serialize!(u8, i16); +// impl_tuple_deserialize!(u8, i16); +// impl_tuple_serialize!(u8, i32); +// impl_tuple_deserialize!(u8, i32); +// impl_tuple_serialize!(u8, isize); +// impl_tuple_deserialize!(u8, isize); +// impl_tuple_serialize!(u8, f32); +// impl_tuple_deserialize!(u8, f32); +// impl_tuple_serialize!(u8, f64); +// impl_tuple_deserialize!(u8, f64); +// impl_tuple_serialize!(u8, bool); +// impl_tuple_deserialize!(u8, bool); +// impl_tuple_serialize!(u8, ZBuf); +// impl_tuple_deserialize!(u8, ZBuf); +// impl_tuple_serialize!(u8, Vec); +// impl_tuple_deserialize!(u8, Vec); +// impl_tuple_serialize!(u8, String); +// impl_tuple_deserialize!(u8, String); +// impl_tuple_serialize!(u8, &[u8]); +// impl_tuple_serialize!(u16, u8); +// impl_tuple_deserialize!(u16, u8); +// impl_tuple_serialize!(u16, u16); +// impl_tuple_deserialize!(u16, u16); +// impl_tuple_serialize!(u16, u32); +// impl_tuple_deserialize!(u16, u32); +// impl_tuple_serialize!(u16, u64); +// impl_tuple_deserialize!(u16, u64); +// impl_tuple_serialize!(u16, usize); +// impl_tuple_deserialize!(u16, usize); +// impl_tuple_serialize!(u16, i8); +// impl_tuple_deserialize!(u16, i8); +// impl_tuple_serialize!(u16, i16); +// impl_tuple_deserialize!(u16, i16); +// impl_tuple_serialize!(u16, i32); +// impl_tuple_deserialize!(u16, i32); +// impl_tuple_serialize!(u16, isize); +// impl_tuple_deserialize!(u16, isize); +// impl_tuple_serialize!(u16, f32); +// impl_tuple_deserialize!(u16, f32); +// impl_tuple_serialize!(u16, f64); +// impl_tuple_deserialize!(u16, f64); +// impl_tuple_serialize!(u16, bool); +// impl_tuple_deserialize!(u16, bool); +// impl_tuple_serialize!(u16, ZBuf); +// impl_tuple_deserialize!(u16, ZBuf); +// impl_tuple_serialize!(u16, Vec); +// impl_tuple_deserialize!(u16, Vec); +// impl_tuple_serialize!(u16, String); +// impl_tuple_deserialize!(u16, String); +// impl_tuple_serialize!(u16, &[u8]); +// impl_tuple_serialize!(u32, u8); +// impl_tuple_deserialize!(u32, u8); +// impl_tuple_serialize!(u32, u16); +// impl_tuple_deserialize!(u32, u16); +// impl_tuple_serialize!(u32, u32); +// impl_tuple_deserialize!(u32, u32); +// impl_tuple_serialize!(u32, u64); +// impl_tuple_deserialize!(u32, u64); +// impl_tuple_serialize!(u32, usize); +// impl_tuple_deserialize!(u32, usize); +// impl_tuple_serialize!(u32, i8); +// impl_tuple_deserialize!(u32, i8); +// impl_tuple_serialize!(u32, i16); +// impl_tuple_deserialize!(u32, i16); +// impl_tuple_serialize!(u32, i32); +// impl_tuple_deserialize!(u32, i32); +// impl_tuple_serialize!(u32, isize); +// impl_tuple_deserialize!(u32, isize); +// impl_tuple_serialize!(u32, f32); +// impl_tuple_deserialize!(u32, f32); +// impl_tuple_serialize!(u32, f64); +// impl_tuple_deserialize!(u32, f64); +// impl_tuple_serialize!(u32, bool); +// impl_tuple_deserialize!(u32, bool); +// impl_tuple_serialize!(u32, ZBuf); +// impl_tuple_deserialize!(u32, ZBuf); +// impl_tuple_serialize!(u32, Vec); +// impl_tuple_deserialize!(u32, Vec); +// impl_tuple_serialize!(u32, String); +// impl_tuple_deserialize!(u32, String); +// impl_tuple_serialize!(u32, &[u8]); +// impl_tuple_serialize!(u64, u8); +// impl_tuple_deserialize!(u64, u8); +// impl_tuple_serialize!(u64, u16); +// impl_tuple_deserialize!(u64, u16); +// impl_tuple_serialize!(u64, u32); +// impl_tuple_deserialize!(u64, u32); +// impl_tuple_serialize!(u64, u64); +// impl_tuple_deserialize!(u64, u64); +// impl_tuple_serialize!(u64, usize); +// impl_tuple_deserialize!(u64, usize); +// impl_tuple_serialize!(u64, i8); +// impl_tuple_deserialize!(u64, i8); +// impl_tuple_serialize!(u64, i16); +// impl_tuple_deserialize!(u64, i16); +// impl_tuple_serialize!(u64, i32); +// impl_tuple_deserialize!(u64, i32); +// impl_tuple_serialize!(u64, isize); +// impl_tuple_deserialize!(u64, isize); +// impl_tuple_serialize!(u64, f32); +// impl_tuple_deserialize!(u64, f32); +// impl_tuple_serialize!(u64, f64); +// impl_tuple_deserialize!(u64, f64); +// impl_tuple_serialize!(u64, bool); +// impl_tuple_deserialize!(u64, bool); +// impl_tuple_serialize!(u64, ZBuf); +// impl_tuple_deserialize!(u64, ZBuf); +// impl_tuple_serialize!(u64, Vec); +// impl_tuple_deserialize!(u64, Vec); +// impl_tuple_serialize!(u64, String); +// impl_tuple_deserialize!(u64, String); +// impl_tuple_serialize!(u64, &[u8]); +// impl_tuple_serialize!(usize, u8); +// impl_tuple_deserialize!(usize, u8); +// impl_tuple_serialize!(usize, u16); +// impl_tuple_deserialize!(usize, u16); +// impl_tuple_serialize!(usize, u32); +// impl_tuple_deserialize!(usize, u32); +// impl_tuple_serialize!(usize, u64); +// impl_tuple_deserialize!(usize, u64); +// impl_tuple_serialize!(usize, usize); +// impl_tuple_deserialize!(usize, usize); +// impl_tuple_serialize!(usize, i8); +// impl_tuple_deserialize!(usize, i8); +// impl_tuple_serialize!(usize, i16); +// impl_tuple_deserialize!(usize, i16); +// impl_tuple_serialize!(usize, i32); +// impl_tuple_deserialize!(usize, i32); +// impl_tuple_serialize!(usize, isize); +// impl_tuple_deserialize!(usize, isize); +// impl_tuple_serialize!(usize, f32); +// impl_tuple_deserialize!(usize, f32); +// impl_tuple_serialize!(usize, f64); +// impl_tuple_deserialize!(usize, f64); +// impl_tuple_serialize!(usize, bool); +// impl_tuple_deserialize!(usize, bool); +// impl_tuple_serialize!(usize, ZBuf); +// impl_tuple_deserialize!(usize, ZBuf); +// impl_tuple_serialize!(usize, Vec); +// impl_tuple_deserialize!(usize, Vec); +// impl_tuple_serialize!(usize, String); +// impl_tuple_deserialize!(usize, String); +// impl_tuple_serialize!(usize, &[u8]); +// impl_tuple_serialize!(i8, u8); +// impl_tuple_deserialize!(i8, u8); +// impl_tuple_serialize!(i8, u16); +// impl_tuple_deserialize!(i8, u16); +// impl_tuple_serialize!(i8, u32); +// impl_tuple_deserialize!(i8, u32); +// impl_tuple_serialize!(i8, u64); +// impl_tuple_deserialize!(i8, u64); +// impl_tuple_serialize!(i8, usize); +// impl_tuple_deserialize!(i8, usize); +// impl_tuple_serialize!(i8, i8); +// impl_tuple_deserialize!(i8, i8); +// impl_tuple_serialize!(i8, i16); +// impl_tuple_deserialize!(i8, i16); +// impl_tuple_serialize!(i8, i32); +// impl_tuple_deserialize!(i8, i32); +// impl_tuple_serialize!(i8, isize); +// impl_tuple_deserialize!(i8, isize); +// impl_tuple_serialize!(i8, f32); +// impl_tuple_deserialize!(i8, f32); +// impl_tuple_serialize!(i8, f64); +// impl_tuple_deserialize!(i8, f64); +// impl_tuple_serialize!(i8, bool); +// impl_tuple_deserialize!(i8, bool); +// impl_tuple_serialize!(i8, ZBuf); +// impl_tuple_deserialize!(i8, ZBuf); +// impl_tuple_serialize!(i8, Vec); +// impl_tuple_deserialize!(i8, Vec); +// impl_tuple_serialize!(i8, String); +// impl_tuple_deserialize!(i8, String); +// impl_tuple_serialize!(i8, &[u8]); +// impl_tuple_serialize!(i16, u8); +// impl_tuple_deserialize!(i16, u8); +// impl_tuple_serialize!(i16, u16); +// impl_tuple_deserialize!(i16, u16); +// impl_tuple_serialize!(i16, u32); +// impl_tuple_deserialize!(i16, u32); +// impl_tuple_serialize!(i16, u64); +// impl_tuple_deserialize!(i16, u64); +// impl_tuple_serialize!(i16, usize); +// impl_tuple_deserialize!(i16, usize); +// impl_tuple_serialize!(i16, i8); +// impl_tuple_deserialize!(i16, i8); +// impl_tuple_serialize!(i16, i16); +// impl_tuple_deserialize!(i16, i16); +// impl_tuple_serialize!(i16, i32); +// impl_tuple_deserialize!(i16, i32); +// impl_tuple_serialize!(i16, isize); +// impl_tuple_deserialize!(i16, isize); +// impl_tuple_serialize!(i16, f32); +// impl_tuple_deserialize!(i16, f32); +// impl_tuple_serialize!(i16, f64); +// impl_tuple_deserialize!(i16, f64); +// impl_tuple_serialize!(i16, bool); +// impl_tuple_deserialize!(i16, bool); +// impl_tuple_serialize!(i16, ZBuf); +// impl_tuple_deserialize!(i16, ZBuf); +// impl_tuple_serialize!(i16, Vec); +// impl_tuple_deserialize!(i16, Vec); +// impl_tuple_serialize!(i16, String); +// impl_tuple_deserialize!(i16, String); +// impl_tuple_serialize!(i16, &[u8]); +// impl_tuple_serialize!(i32, u8); +// impl_tuple_deserialize!(i32, u8); +// impl_tuple_serialize!(i32, u16); +// impl_tuple_deserialize!(i32, u16); +// impl_tuple_serialize!(i32, u32); +// impl_tuple_deserialize!(i32, u32); +// impl_tuple_serialize!(i32, u64); +// impl_tuple_deserialize!(i32, u64); +// impl_tuple_serialize!(i32, usize); +// impl_tuple_deserialize!(i32, usize); +// impl_tuple_serialize!(i32, i8); +// impl_tuple_deserialize!(i32, i8); +// impl_tuple_serialize!(i32, i16); +// impl_tuple_deserialize!(i32, i16); +// impl_tuple_serialize!(i32, i32); +// impl_tuple_deserialize!(i32, i32); +// impl_tuple_serialize!(i32, isize); +// impl_tuple_deserialize!(i32, isize); +// impl_tuple_serialize!(i32, f32); +// impl_tuple_deserialize!(i32, f32); +// impl_tuple_serialize!(i32, f64); +// impl_tuple_deserialize!(i32, f64); +// impl_tuple_serialize!(i32, bool); +// impl_tuple_deserialize!(i32, bool); +// impl_tuple_serialize!(i32, ZBuf); +// impl_tuple_deserialize!(i32, ZBuf); +// impl_tuple_serialize!(i32, Vec); +// impl_tuple_deserialize!(i32, Vec); +// impl_tuple_serialize!(i32, String); +// impl_tuple_deserialize!(i32, String); +// impl_tuple_serialize!(i32, &[u8]); +// impl_tuple_serialize!(isize, u8); +// impl_tuple_deserialize!(isize, u8); +// impl_tuple_serialize!(isize, u16); +// impl_tuple_deserialize!(isize, u16); +// impl_tuple_serialize!(isize, u32); +// impl_tuple_deserialize!(isize, u32); +// impl_tuple_serialize!(isize, u64); +// impl_tuple_deserialize!(isize, u64); +// impl_tuple_serialize!(isize, usize); +// impl_tuple_deserialize!(isize, usize); +// impl_tuple_serialize!(isize, i8); +// impl_tuple_deserialize!(isize, i8); +// impl_tuple_serialize!(isize, i16); +// impl_tuple_deserialize!(isize, i16); +// impl_tuple_serialize!(isize, i32); +// impl_tuple_deserialize!(isize, i32); +// impl_tuple_serialize!(isize, isize); +// impl_tuple_deserialize!(isize, isize); +// impl_tuple_serialize!(isize, f32); +// impl_tuple_deserialize!(isize, f32); +// impl_tuple_serialize!(isize, f64); +// impl_tuple_deserialize!(isize, f64); +// impl_tuple_serialize!(isize, bool); +// impl_tuple_deserialize!(isize, bool); +// impl_tuple_serialize!(isize, ZBuf); +// impl_tuple_deserialize!(isize, ZBuf); +// impl_tuple_serialize!(isize, Vec); +// impl_tuple_deserialize!(isize, Vec); +// impl_tuple_serialize!(isize, String); +// impl_tuple_deserialize!(isize, String); +// impl_tuple_serialize!(isize, &[u8]); +// impl_tuple_serialize!(f32, u8); +// impl_tuple_deserialize!(f32, u8); +// impl_tuple_serialize!(f32, u16); +// impl_tuple_deserialize!(f32, u16); +// impl_tuple_serialize!(f32, u32); +// impl_tuple_deserialize!(f32, u32); +// impl_tuple_serialize!(f32, u64); +// impl_tuple_deserialize!(f32, u64); +// impl_tuple_serialize!(f32, usize); +// impl_tuple_deserialize!(f32, usize); +// impl_tuple_serialize!(f32, i8); +// impl_tuple_deserialize!(f32, i8); +// impl_tuple_serialize!(f32, i16); +// impl_tuple_deserialize!(f32, i16); +// impl_tuple_serialize!(f32, i32); +// impl_tuple_deserialize!(f32, i32); +// impl_tuple_serialize!(f32, isize); +// impl_tuple_deserialize!(f32, isize); +// impl_tuple_serialize!(f32, f32); +// impl_tuple_deserialize!(f32, f32); +// impl_tuple_serialize!(f32, f64); +// impl_tuple_deserialize!(f32, f64); +// impl_tuple_serialize!(f32, bool); +// impl_tuple_deserialize!(f32, bool); +// impl_tuple_serialize!(f32, ZBuf); +// impl_tuple_deserialize!(f32, ZBuf); +// impl_tuple_serialize!(f32, Vec); +// impl_tuple_deserialize!(f32, Vec); +// impl_tuple_serialize!(f32, String); +// impl_tuple_deserialize!(f32, String); +// impl_tuple_serialize!(f32, &[u8]); +// impl_tuple_serialize!(f64, u8); +// impl_tuple_deserialize!(f64, u8); +// impl_tuple_serialize!(f64, u16); +// impl_tuple_deserialize!(f64, u16); +// impl_tuple_serialize!(f64, u32); +// impl_tuple_deserialize!(f64, u32); +// impl_tuple_serialize!(f64, u64); +// impl_tuple_deserialize!(f64, u64); +// impl_tuple_serialize!(f64, usize); +// impl_tuple_deserialize!(f64, usize); +// impl_tuple_serialize!(f64, i8); +// impl_tuple_deserialize!(f64, i8); +// impl_tuple_serialize!(f64, i16); +// impl_tuple_deserialize!(f64, i16); +// impl_tuple_serialize!(f64, i32); +// impl_tuple_deserialize!(f64, i32); +// impl_tuple_serialize!(f64, isize); +// impl_tuple_deserialize!(f64, isize); +// impl_tuple_serialize!(f64, f32); +// impl_tuple_deserialize!(f64, f32); +// impl_tuple_serialize!(f64, f64); +// impl_tuple_deserialize!(f64, f64); +// impl_tuple_serialize!(f64, bool); +// impl_tuple_deserialize!(f64, bool); +// impl_tuple_serialize!(f64, ZBuf); +// impl_tuple_deserialize!(f64, ZBuf); +// impl_tuple_serialize!(f64, Vec); +// impl_tuple_deserialize!(f64, Vec); +// impl_tuple_serialize!(f64, String); +// impl_tuple_deserialize!(f64, String); +// impl_tuple_serialize!(f64, &[u8]); +// impl_tuple_serialize!(bool, u8); +// impl_tuple_deserialize!(bool, u8); +// impl_tuple_serialize!(bool, u16); +// impl_tuple_deserialize!(bool, u16); +// impl_tuple_serialize!(bool, u32); +// impl_tuple_deserialize!(bool, u32); +// impl_tuple_serialize!(bool, u64); +// impl_tuple_deserialize!(bool, u64); +// impl_tuple_serialize!(bool, usize); +// impl_tuple_deserialize!(bool, usize); +// impl_tuple_serialize!(bool, i8); +// impl_tuple_deserialize!(bool, i8); +// impl_tuple_serialize!(bool, i16); +// impl_tuple_deserialize!(bool, i16); +// impl_tuple_serialize!(bool, i32); +// impl_tuple_deserialize!(bool, i32); +// impl_tuple_serialize!(bool, isize); +// impl_tuple_deserialize!(bool, isize); +// impl_tuple_serialize!(bool, f32); +// impl_tuple_deserialize!(bool, f32); +// impl_tuple_serialize!(bool, f64); +// impl_tuple_deserialize!(bool, f64); +// impl_tuple_serialize!(bool, bool); +// impl_tuple_deserialize!(bool, bool); +// impl_tuple_serialize!(bool, ZBuf); +// impl_tuple_deserialize!(bool, ZBuf); +// impl_tuple_serialize!(bool, Vec); +// impl_tuple_deserialize!(bool, Vec); +// impl_tuple_serialize!(bool, String); +// impl_tuple_deserialize!(bool, String); +// impl_tuple_serialize!(bool, &[u8]); +// impl_tuple_serialize!(ZBuf, u8); +// impl_tuple_deserialize!(ZBuf, u8); +// impl_tuple_serialize!(ZBuf, u16); +// impl_tuple_deserialize!(ZBuf, u16); +// impl_tuple_serialize!(ZBuf, u32); +// impl_tuple_deserialize!(ZBuf, u32); +// impl_tuple_serialize!(ZBuf, u64); +// impl_tuple_deserialize!(ZBuf, u64); +// impl_tuple_serialize!(ZBuf, usize); +// impl_tuple_deserialize!(ZBuf, usize); +// impl_tuple_serialize!(ZBuf, i8); +// impl_tuple_deserialize!(ZBuf, i8); +// impl_tuple_serialize!(ZBuf, i16); +// impl_tuple_deserialize!(ZBuf, i16); +// impl_tuple_serialize!(ZBuf, i32); +// impl_tuple_deserialize!(ZBuf, i32); +// impl_tuple_serialize!(ZBuf, isize); +// impl_tuple_deserialize!(ZBuf, isize); +// impl_tuple_serialize!(ZBuf, f32); +// impl_tuple_deserialize!(ZBuf, f32); +// impl_tuple_serialize!(ZBuf, f64); +// impl_tuple_deserialize!(ZBuf, f64); +// impl_tuple_serialize!(ZBuf, bool); +// impl_tuple_deserialize!(ZBuf, bool); +// impl_tuple_serialize!(ZBuf, ZBuf); +// impl_tuple_deserialize!(ZBuf, ZBuf); +// impl_tuple_serialize!(ZBuf, Vec); +// impl_tuple_deserialize!(ZBuf, Vec); +// impl_tuple_serialize!(ZBuf, String); +// impl_tuple_deserialize!(ZBuf, String); +// impl_tuple_serialize!(ZBuf, &[u8]); +// impl_tuple_serialize!(Vec, u8); +// impl_tuple_deserialize!(Vec, u8); +// impl_tuple_serialize!(Vec, u16); +// impl_tuple_deserialize!(Vec, u16); +// impl_tuple_serialize!(Vec, u32); +// impl_tuple_deserialize!(Vec, u32); +// impl_tuple_serialize!(Vec, u64); +// impl_tuple_deserialize!(Vec, u64); +// impl_tuple_serialize!(Vec, usize); +// impl_tuple_deserialize!(Vec, usize); +// impl_tuple_serialize!(Vec, i8); +// impl_tuple_deserialize!(Vec, i8); +// impl_tuple_serialize!(Vec, i16); +// impl_tuple_deserialize!(Vec, i16); +// impl_tuple_serialize!(Vec, i32); +// impl_tuple_deserialize!(Vec, i32); +// impl_tuple_serialize!(Vec, isize); +// impl_tuple_deserialize!(Vec, isize); +// impl_tuple_serialize!(Vec, f32); +// impl_tuple_deserialize!(Vec, f32); +// impl_tuple_serialize!(Vec, f64); +// impl_tuple_deserialize!(Vec, f64); +// impl_tuple_serialize!(Vec, bool); +// impl_tuple_deserialize!(Vec, bool); +// impl_tuple_serialize!(Vec, ZBuf); +// impl_tuple_deserialize!(Vec, ZBuf); +// impl_tuple_serialize!(Vec, Vec); +// impl_tuple_deserialize!(Vec, Vec); +// impl_tuple_serialize!(Vec, String); +// impl_tuple_deserialize!(Vec, String); +// impl_tuple_serialize!(Vec, &[u8]); +// impl_tuple_serialize!(String, u8); +// impl_tuple_deserialize!(String, u8); +// impl_tuple_serialize!(String, u16); +// impl_tuple_deserialize!(String, u16); +// impl_tuple_serialize!(String, u32); +// impl_tuple_deserialize!(String, u32); +// impl_tuple_serialize!(String, u64); +// impl_tuple_deserialize!(String, u64); +// impl_tuple_serialize!(String, usize); +// impl_tuple_deserialize!(String, usize); +// impl_tuple_serialize!(String, i8); +// impl_tuple_deserialize!(String, i8); +// impl_tuple_serialize!(String, i16); +// impl_tuple_deserialize!(String, i16); +// impl_tuple_serialize!(String, i32); +// impl_tuple_deserialize!(String, i32); +// impl_tuple_serialize!(String, isize); +// impl_tuple_deserialize!(String, isize); +// impl_tuple_serialize!(String, f32); +// impl_tuple_deserialize!(String, f32); +// impl_tuple_serialize!(String, f64); +// impl_tuple_deserialize!(String, f64); +// impl_tuple_serialize!(String, bool); +// impl_tuple_deserialize!(String, bool); +// impl_tuple_serialize!(String, ZBuf); +// impl_tuple_deserialize!(String, ZBuf); +// impl_tuple_serialize!(String, Vec); +// impl_tuple_deserialize!(String, Vec); +// impl_tuple_serialize!(String, String); +// impl_tuple_deserialize!(String, String); +// impl_tuple_serialize!(String, &[u8]); +// impl_tuple_serialize!(&[u8], u8); +// impl_tuple_serialize!(&[u8], u16); +// impl_tuple_serialize!(&[u8], u32); +// impl_tuple_serialize!(&[u8], u64); +// impl_tuple_serialize!(&[u8], usize); +// impl_tuple_serialize!(&[u8], i8); +// impl_tuple_serialize!(&[u8], i16); +// impl_tuple_serialize!(&[u8], i32); +// impl_tuple_serialize!(&[u8], isize); +// impl_tuple_serialize!(&[u8], f32); +// impl_tuple_serialize!(&[u8], f64); +// impl_tuple_serialize!(&[u8], bool); +// impl_tuple_serialize!(&[u8], ZBuf); +// impl_tuple_serialize!(&[u8], Vec); +// impl_tuple_serialize!(&[u8], String); +// impl_tuple_serialize!(&[u8], &[u8]); +// impl_iterator_serialize!(u8); +// impl_iterator_serialize!(u16); +// impl_iterator_serialize!(u32); +// impl_iterator_serialize!(u64); +// impl_iterator_serialize!(usize); +// impl_iterator_serialize!(i8); +// impl_iterator_serialize!(i16); +// impl_iterator_serialize!(i32); +// impl_iterator_serialize!(isize); +// impl_iterator_serialize!(f32); +// impl_iterator_serialize!(f64); +// impl_iterator_serialize!(bool); +// impl_iterator_serialize!(ZBuf); +// impl_iterator_serialize!(Vec); +// impl_iterator_serialize!(String); +// impl_iterator_serialize!(&[u8]); diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 58589bfe8f..ae9119ac8a 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -19,6 +19,7 @@ use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; use crate::sample::QoS; +#[cfg(feature = "unstable")] use crate::sample::SourceInfo; use crate::Id; use crate::SessionRef; diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 9bc6c9c331..0c1c193568 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -57,12 +57,7 @@ use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; #[cfg(feature = "unstable")] -use zenoh_protocol::network::declare::SubscriberId; -use zenoh_protocol::network::AtomicRequestId; -use zenoh_protocol::network::RequestId; -use zenoh_protocol::zenoh::reply::ReplyBody; -use zenoh_protocol::zenoh::Del; -use zenoh_protocol::zenoh::Put; +use zenoh_protocol::network::{declare::SubscriberId, ext}; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, @@ -74,13 +69,13 @@ use zenoh_protocol::{ subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareMode, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, - ext, request::{self, ext::TargetType, Request}, - Mapping, Push, Response, ResponseFinal, + AtomicRequestId, Mapping, Push, RequestId, Response, ResponseFinal, }, zenoh::{ query::{self, ext::QueryBodyType, Consolidation}, - PushBody, RequestBody, ResponseBody, + reply::ReplyBody, + Del, PushBody, Put, RequestBody, ResponseBody, }, }; use zenoh_result::ZResult; diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 60a31a6577..47d41ebb1f 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -202,9 +202,6 @@ pub struct SubscriberBuilder<'a, 'b, Handler> { #[cfg(not(feature = "unstable"))] pub(crate) reliability: Reliability, - #[cfg(not(feature = "unstable"))] - pub(crate) mode: Mode, - #[cfg(feature = "unstable")] pub origin: Locality, #[cfg(not(feature = "unstable"))] From 3dea601356c7fdb08f14c7ce6c94e732db5b1836 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 8 Apr 2024 18:33:22 +0200 Subject: [PATCH 177/598] Payload iter impl --- zenoh/src/payload.rs | 821 ++++--------------------------------------- 1 file changed, 67 insertions(+), 754 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index db3126d93d..ed8c1b98c3 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -80,10 +80,11 @@ impl Payload { } /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. - pub fn iter(&self) -> PayloadIterator<'_, T> + pub fn iter<'a, T>(&'a self) -> PayloadIterator<'a, T> where T: TryFrom, - ZSerde: for<'b> Deserialize<'b, T, Error = ZDeserializeError>, + ZSerde: Deserialize<'a, T>, + >::Error: Debug, { PayloadIterator { reader: self.0.reader(), @@ -144,7 +145,8 @@ where impl<'a, T> Iterator for PayloadIterator<'a, T> where - ZSerde: for<'b> Deserialize<'b, T, Error = ZDeserializeError>, + ZSerde: for<'b> Deserialize<'b, T>, + >::Error: Debug, { type Item = T; @@ -164,6 +166,28 @@ where } } +impl FromIterator for Payload +where + ZSerde: Serialize, +{ + fn from_iter>(iter: T) -> Self { + let codec = Zenoh080::new(); + let mut buffer: ZBuf = ZBuf::empty(); + let mut writer = buffer.writer(); + for t in iter { + let tpld = ZSerde.serialize(t); + // SAFETY: we are serializing slices on a ZBuf, so serialization will never + // fail unless we run out of memory. In that case, Rust memory allocator + // will panic before the serializer has any chance to fail. + unsafe { + codec.write(&mut writer, &tpld.0).unwrap_unchecked(); + } + } + + Payload::new(buffer) + } +} + /// The default serializer for Zenoh payload. It supports primitives types, such as: vec, int, uint, float, string, bool. /// It also supports common Rust serde values. #[derive(Clone, Copy, Debug)] @@ -786,6 +810,16 @@ where } } +impl From<(A, B)> for Payload +where + A: Into, + B: Into, +{ + fn from(value: (A, B)) -> Self { + ZSerde.serialize(value) + } +} + impl<'a, A, B> Deserialize<'a, (A, B)> for ZSerde where A: TryFrom, @@ -811,31 +845,19 @@ where } } -// Iterator -// impl Serialize for ZSerde -// where -// I: Iterator, -// T: Into, -// { -// type Output = Payload; - -// fn serialize(self, iter: I) -> Self::Output { -// let codec = Zenoh080::new(); -// let mut buffer: ZBuf = ZBuf::empty(); -// let mut writer = buffer.writer(); -// for t in iter { -// let tpld: Payload = t.into(); -// // SAFETY: we are serializing slices on a ZBuf, so serialization will never -// // fail unless we run out of memory. In that case, Rust memory allocator -// // will panic before the serializer has any chance to fail. -// unsafe { -// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); -// } -// } - -// Payload::new(buffer) -// } -// } +impl TryFrom for (A, B) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, +{ + type Error = ZError; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} // For convenience to always convert a Value the examples #[derive(Debug, Clone, PartialEq, Eq)] @@ -977,729 +999,20 @@ mod tests { serialize_deserialize!((String, String), (String::from("a"), String::from("b"))); // Iterator - // let mut hm = Vec::new(); - // hm.push(0); - // hm.push(1); - // Payload::serialize(hm.iter()); - - // let mut hm = HashMap::new(); - // hm.insert(0, 0); - // hm.insert(1, 1); - // Payload::serialize(hm.iter().map(|(k, v)| (k, v))); - // for (k, v) in sample.payload().iter::<(String, serde_json::Value)>() {} - } -} - -// macro_rules! impl_iterator_inner { -// ($iter:expr) => {{ -// let codec = Zenoh080::new(); -// let mut buffer: ZBuf = ZBuf::empty(); -// let mut writer = buffer.writer(); -// for t in $iter { -// let tpld = ZSerde.serialize(t); -// // SAFETY: we are serializing slices on a ZBuf, so serialization will never -// // fail unless we run out of memory. In that case, Rust memory allocator -// // will panic before the serializer has any chance to fail. -// unsafe { -// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); -// } -// } - -// Payload::new(buffer) -// }}; -// } - -// impl<'a> Serialize> for ZSerde { -// type Output = Payload; - -// fn serialize(self, iter: std::slice::Iter<'_, i32>) -> Self::Output { -// impl_iterator_inner!(iter) -// } -// } - -// impl<'a> Serialize> for ZSerde { -// type Output = Payload; - -// fn serialize(self, iter: std::slice::IterMut<'_, i32>) -> Self::Output { -// impl_iterator_inner!(iter) -// } -// } - -// impl Serialize<&mut dyn Iterator> for ZSerde { -// type Output = Payload; - -// fn serialize(self, iter: &mut dyn Iterator) -> Self::Output { -// let codec = Zenoh080::new(); -// let mut buffer: ZBuf = ZBuf::empty(); -// let mut writer = buffer.writer(); -// for t in iter { -// let tpld = ZSerde.serialize(t); -// // SAFETY: we are serializing slices on a ZBuf, so serialization will never -// // fail unless we run out of memory. In that case, Rust memory allocator -// // will panic before the serializer has any chance to fail. -// unsafe { -// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); -// } -// } - -// Payload::new(buffer) -// } -// } - -// impl Serialize<(A, B)> for ZSerde -// where -// ZSerde: Serialize, -// ZSerde: Serialize, -// { -// type Output = Payload; - -// fn serialize(self, t: (A, B)) -> Self::Output { -// let (a, b) = t; - -// let codec = Zenoh080::new(); -// let mut buffer: ZBuf = ZBuf::empty(); -// let mut writer = buffer.writer(); -// let apld = Payload::serialize::(a); -// let bpld = Payload::serialize::(b); - -// // SAFETY: we are serializing slices on a ZBuf, so serialization will never -// // fail unless we run out of memory. In that case, Rust memory allocator -// // will panic before the serializer has any chance to fail. -// unsafe { -// codec.write(&mut writer, &apld.0).unwrap_unchecked(); -// codec.write(&mut writer, &bpld.0).unwrap_unchecked(); -// } - -// Payload::new(buffer) -// } -// } - -// impl<'a, A, B> Deserialize<'a, (A, B)> for ZSerde -// where -// A: TryFrom, -// ZSerde: Deserialize<'a, A>, -// >::Error: Debug, -// B: TryFrom, -// ZSerde: Deserialize<'a, B>, -// >::Error: Debug, -// { -// type Error = ZError; - -// fn deserialize(self, payload: &'a Payload) -> Result<(A, B), Self::Error> { -// let codec = Zenoh080::new(); -// let mut reader = payload.0.reader(); - -// let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; -// let apld = Payload::new(abuf); - -// let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; -// let bpld = Payload::new(bbuf); - -// let a = A::try_from(apld).map_err(|e| zerror!("{:?}", e))?; -// let b = B::try_from(bpld).map_err(|e| zerror!("{:?}", e))?; -// Ok((a, b)) -// } -// } - -// impl Serialize<&mut dyn Iterator> for ZSerde -// where -// ZSerde: Serialize, -// { -// type Output = Payload; - -// fn serialize(self, iter: &mut dyn Iterator) -> Self::Output { -// let codec = Zenoh080::new(); -// let mut buffer: ZBuf = ZBuf::empty(); -// let mut writer = buffer.writer(); -// for t in iter { -// let tpld = ZSerde.serialize(t); -// // SAFETY: we are serializing slices on a ZBuf, so serialization will never -// // fail unless we run out of memory. In that case, Rust memory allocator -// // will panic before the serializer has any chance to fail. -// unsafe { -// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); -// } -// } - -// Payload::new(buffer) -// } -// } - -// Iterator -// macro_rules! impl_iterator_serialize { -// ($a:ty) => { -// impl Serialize<&mut dyn Iterator> for ZSerde -// { -// type Output = Payload; - -// fn serialize(self, iter: &mut dyn Iterator) -> Self::Output { -// let codec = Zenoh080::new(); -// let mut buffer: ZBuf = ZBuf::empty(); -// let mut writer = buffer.writer(); -// for t in iter { -// let tpld = ZSerde.serialize(t); -// // SAFETY: we are serializing slices on a ZBuf, so serialization will never -// // fail unless we run out of memory. In that case, Rust memory allocator -// // will panic before the serializer has any chance to fail. -// unsafe { -// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); -// } -// } - -// Payload::new(buffer) -// } -// } -// }; -// } - -// Tuples -// macro_rules! impl_tuple_serialize { -// ($a:ty, $b:ty) => { -// impl Serialize<($a, $b)> for ZSerde -// { -// type Output = Payload; - -// fn serialize(self, t: ($a, $b)) -> Self::Output { -// let (a, b) = t; - -// let codec = Zenoh080::new(); -// let mut buffer: ZBuf = ZBuf::empty(); -// let mut writer = buffer.writer(); -// let apld = Payload::serialize::<$a>(a); -// let bpld = Payload::serialize::<$b>(b); - -// // SAFETY: we are serializing slices on a ZBuf, so serialization will never -// // fail unless we run out of memory. In that case, Rust memory allocator -// // will panic before the serializer has any chance to fail. -// unsafe { -// codec.write(&mut writer, &apld.0).unwrap_unchecked(); -// codec.write(&mut writer, &bpld.0).unwrap_unchecked(); -// } - -// Payload::new(buffer) -// } -// } -// } - -// } - -// macro_rules! impl_tuple_deserialize { -// ($a:ty, $b:ty) => { -// impl<'a> Deserialize<'a, ($a, $b)> for ZSerde { -// type Error = ZError; - -// fn deserialize(self, payload: &'a Payload) -> Result<($a, $b), Self::Error> { -// let codec = Zenoh080::new(); -// let mut reader = payload.0.reader(); - -// let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; -// let apld = Payload::new(abuf); - -// let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; -// let bpld = Payload::new(bbuf); - -// let a = apld.deserialize::<$a>().map_err(|e| zerror!("{:?}", e))?; -// let b = bpld.deserialize::<$b>().map_err(|e| zerror!("{:?}", e))?; -// Ok((a, b)) -// } -// } -// }; -// } - -// impl_tuple_serialize!(u8, u8); -// impl_tuple_deserialize!(u8, u8); -// impl_tuple_serialize!(u8, u16); -// impl_tuple_deserialize!(u8, u16); -// impl_tuple_serialize!(u8, u32); -// impl_tuple_deserialize!(u8, u32); -// impl_tuple_serialize!(u8, u64); -// impl_tuple_deserialize!(u8, u64); -// impl_tuple_serialize!(u8, usize); -// impl_tuple_deserialize!(u8, usize); -// impl_tuple_serialize!(u8, i8); -// impl_tuple_deserialize!(u8, i8); -// impl_tuple_serialize!(u8, i16); -// impl_tuple_deserialize!(u8, i16); -// impl_tuple_serialize!(u8, i32); -// impl_tuple_deserialize!(u8, i32); -// impl_tuple_serialize!(u8, isize); -// impl_tuple_deserialize!(u8, isize); -// impl_tuple_serialize!(u8, f32); -// impl_tuple_deserialize!(u8, f32); -// impl_tuple_serialize!(u8, f64); -// impl_tuple_deserialize!(u8, f64); -// impl_tuple_serialize!(u8, bool); -// impl_tuple_deserialize!(u8, bool); -// impl_tuple_serialize!(u8, ZBuf); -// impl_tuple_deserialize!(u8, ZBuf); -// impl_tuple_serialize!(u8, Vec); -// impl_tuple_deserialize!(u8, Vec); -// impl_tuple_serialize!(u8, String); -// impl_tuple_deserialize!(u8, String); -// impl_tuple_serialize!(u8, &[u8]); -// impl_tuple_serialize!(u16, u8); -// impl_tuple_deserialize!(u16, u8); -// impl_tuple_serialize!(u16, u16); -// impl_tuple_deserialize!(u16, u16); -// impl_tuple_serialize!(u16, u32); -// impl_tuple_deserialize!(u16, u32); -// impl_tuple_serialize!(u16, u64); -// impl_tuple_deserialize!(u16, u64); -// impl_tuple_serialize!(u16, usize); -// impl_tuple_deserialize!(u16, usize); -// impl_tuple_serialize!(u16, i8); -// impl_tuple_deserialize!(u16, i8); -// impl_tuple_serialize!(u16, i16); -// impl_tuple_deserialize!(u16, i16); -// impl_tuple_serialize!(u16, i32); -// impl_tuple_deserialize!(u16, i32); -// impl_tuple_serialize!(u16, isize); -// impl_tuple_deserialize!(u16, isize); -// impl_tuple_serialize!(u16, f32); -// impl_tuple_deserialize!(u16, f32); -// impl_tuple_serialize!(u16, f64); -// impl_tuple_deserialize!(u16, f64); -// impl_tuple_serialize!(u16, bool); -// impl_tuple_deserialize!(u16, bool); -// impl_tuple_serialize!(u16, ZBuf); -// impl_tuple_deserialize!(u16, ZBuf); -// impl_tuple_serialize!(u16, Vec); -// impl_tuple_deserialize!(u16, Vec); -// impl_tuple_serialize!(u16, String); -// impl_tuple_deserialize!(u16, String); -// impl_tuple_serialize!(u16, &[u8]); -// impl_tuple_serialize!(u32, u8); -// impl_tuple_deserialize!(u32, u8); -// impl_tuple_serialize!(u32, u16); -// impl_tuple_deserialize!(u32, u16); -// impl_tuple_serialize!(u32, u32); -// impl_tuple_deserialize!(u32, u32); -// impl_tuple_serialize!(u32, u64); -// impl_tuple_deserialize!(u32, u64); -// impl_tuple_serialize!(u32, usize); -// impl_tuple_deserialize!(u32, usize); -// impl_tuple_serialize!(u32, i8); -// impl_tuple_deserialize!(u32, i8); -// impl_tuple_serialize!(u32, i16); -// impl_tuple_deserialize!(u32, i16); -// impl_tuple_serialize!(u32, i32); -// impl_tuple_deserialize!(u32, i32); -// impl_tuple_serialize!(u32, isize); -// impl_tuple_deserialize!(u32, isize); -// impl_tuple_serialize!(u32, f32); -// impl_tuple_deserialize!(u32, f32); -// impl_tuple_serialize!(u32, f64); -// impl_tuple_deserialize!(u32, f64); -// impl_tuple_serialize!(u32, bool); -// impl_tuple_deserialize!(u32, bool); -// impl_tuple_serialize!(u32, ZBuf); -// impl_tuple_deserialize!(u32, ZBuf); -// impl_tuple_serialize!(u32, Vec); -// impl_tuple_deserialize!(u32, Vec); -// impl_tuple_serialize!(u32, String); -// impl_tuple_deserialize!(u32, String); -// impl_tuple_serialize!(u32, &[u8]); -// impl_tuple_serialize!(u64, u8); -// impl_tuple_deserialize!(u64, u8); -// impl_tuple_serialize!(u64, u16); -// impl_tuple_deserialize!(u64, u16); -// impl_tuple_serialize!(u64, u32); -// impl_tuple_deserialize!(u64, u32); -// impl_tuple_serialize!(u64, u64); -// impl_tuple_deserialize!(u64, u64); -// impl_tuple_serialize!(u64, usize); -// impl_tuple_deserialize!(u64, usize); -// impl_tuple_serialize!(u64, i8); -// impl_tuple_deserialize!(u64, i8); -// impl_tuple_serialize!(u64, i16); -// impl_tuple_deserialize!(u64, i16); -// impl_tuple_serialize!(u64, i32); -// impl_tuple_deserialize!(u64, i32); -// impl_tuple_serialize!(u64, isize); -// impl_tuple_deserialize!(u64, isize); -// impl_tuple_serialize!(u64, f32); -// impl_tuple_deserialize!(u64, f32); -// impl_tuple_serialize!(u64, f64); -// impl_tuple_deserialize!(u64, f64); -// impl_tuple_serialize!(u64, bool); -// impl_tuple_deserialize!(u64, bool); -// impl_tuple_serialize!(u64, ZBuf); -// impl_tuple_deserialize!(u64, ZBuf); -// impl_tuple_serialize!(u64, Vec); -// impl_tuple_deserialize!(u64, Vec); -// impl_tuple_serialize!(u64, String); -// impl_tuple_deserialize!(u64, String); -// impl_tuple_serialize!(u64, &[u8]); -// impl_tuple_serialize!(usize, u8); -// impl_tuple_deserialize!(usize, u8); -// impl_tuple_serialize!(usize, u16); -// impl_tuple_deserialize!(usize, u16); -// impl_tuple_serialize!(usize, u32); -// impl_tuple_deserialize!(usize, u32); -// impl_tuple_serialize!(usize, u64); -// impl_tuple_deserialize!(usize, u64); -// impl_tuple_serialize!(usize, usize); -// impl_tuple_deserialize!(usize, usize); -// impl_tuple_serialize!(usize, i8); -// impl_tuple_deserialize!(usize, i8); -// impl_tuple_serialize!(usize, i16); -// impl_tuple_deserialize!(usize, i16); -// impl_tuple_serialize!(usize, i32); -// impl_tuple_deserialize!(usize, i32); -// impl_tuple_serialize!(usize, isize); -// impl_tuple_deserialize!(usize, isize); -// impl_tuple_serialize!(usize, f32); -// impl_tuple_deserialize!(usize, f32); -// impl_tuple_serialize!(usize, f64); -// impl_tuple_deserialize!(usize, f64); -// impl_tuple_serialize!(usize, bool); -// impl_tuple_deserialize!(usize, bool); -// impl_tuple_serialize!(usize, ZBuf); -// impl_tuple_deserialize!(usize, ZBuf); -// impl_tuple_serialize!(usize, Vec); -// impl_tuple_deserialize!(usize, Vec); -// impl_tuple_serialize!(usize, String); -// impl_tuple_deserialize!(usize, String); -// impl_tuple_serialize!(usize, &[u8]); -// impl_tuple_serialize!(i8, u8); -// impl_tuple_deserialize!(i8, u8); -// impl_tuple_serialize!(i8, u16); -// impl_tuple_deserialize!(i8, u16); -// impl_tuple_serialize!(i8, u32); -// impl_tuple_deserialize!(i8, u32); -// impl_tuple_serialize!(i8, u64); -// impl_tuple_deserialize!(i8, u64); -// impl_tuple_serialize!(i8, usize); -// impl_tuple_deserialize!(i8, usize); -// impl_tuple_serialize!(i8, i8); -// impl_tuple_deserialize!(i8, i8); -// impl_tuple_serialize!(i8, i16); -// impl_tuple_deserialize!(i8, i16); -// impl_tuple_serialize!(i8, i32); -// impl_tuple_deserialize!(i8, i32); -// impl_tuple_serialize!(i8, isize); -// impl_tuple_deserialize!(i8, isize); -// impl_tuple_serialize!(i8, f32); -// impl_tuple_deserialize!(i8, f32); -// impl_tuple_serialize!(i8, f64); -// impl_tuple_deserialize!(i8, f64); -// impl_tuple_serialize!(i8, bool); -// impl_tuple_deserialize!(i8, bool); -// impl_tuple_serialize!(i8, ZBuf); -// impl_tuple_deserialize!(i8, ZBuf); -// impl_tuple_serialize!(i8, Vec); -// impl_tuple_deserialize!(i8, Vec); -// impl_tuple_serialize!(i8, String); -// impl_tuple_deserialize!(i8, String); -// impl_tuple_serialize!(i8, &[u8]); -// impl_tuple_serialize!(i16, u8); -// impl_tuple_deserialize!(i16, u8); -// impl_tuple_serialize!(i16, u16); -// impl_tuple_deserialize!(i16, u16); -// impl_tuple_serialize!(i16, u32); -// impl_tuple_deserialize!(i16, u32); -// impl_tuple_serialize!(i16, u64); -// impl_tuple_deserialize!(i16, u64); -// impl_tuple_serialize!(i16, usize); -// impl_tuple_deserialize!(i16, usize); -// impl_tuple_serialize!(i16, i8); -// impl_tuple_deserialize!(i16, i8); -// impl_tuple_serialize!(i16, i16); -// impl_tuple_deserialize!(i16, i16); -// impl_tuple_serialize!(i16, i32); -// impl_tuple_deserialize!(i16, i32); -// impl_tuple_serialize!(i16, isize); -// impl_tuple_deserialize!(i16, isize); -// impl_tuple_serialize!(i16, f32); -// impl_tuple_deserialize!(i16, f32); -// impl_tuple_serialize!(i16, f64); -// impl_tuple_deserialize!(i16, f64); -// impl_tuple_serialize!(i16, bool); -// impl_tuple_deserialize!(i16, bool); -// impl_tuple_serialize!(i16, ZBuf); -// impl_tuple_deserialize!(i16, ZBuf); -// impl_tuple_serialize!(i16, Vec); -// impl_tuple_deserialize!(i16, Vec); -// impl_tuple_serialize!(i16, String); -// impl_tuple_deserialize!(i16, String); -// impl_tuple_serialize!(i16, &[u8]); -// impl_tuple_serialize!(i32, u8); -// impl_tuple_deserialize!(i32, u8); -// impl_tuple_serialize!(i32, u16); -// impl_tuple_deserialize!(i32, u16); -// impl_tuple_serialize!(i32, u32); -// impl_tuple_deserialize!(i32, u32); -// impl_tuple_serialize!(i32, u64); -// impl_tuple_deserialize!(i32, u64); -// impl_tuple_serialize!(i32, usize); -// impl_tuple_deserialize!(i32, usize); -// impl_tuple_serialize!(i32, i8); -// impl_tuple_deserialize!(i32, i8); -// impl_tuple_serialize!(i32, i16); -// impl_tuple_deserialize!(i32, i16); -// impl_tuple_serialize!(i32, i32); -// impl_tuple_deserialize!(i32, i32); -// impl_tuple_serialize!(i32, isize); -// impl_tuple_deserialize!(i32, isize); -// impl_tuple_serialize!(i32, f32); -// impl_tuple_deserialize!(i32, f32); -// impl_tuple_serialize!(i32, f64); -// impl_tuple_deserialize!(i32, f64); -// impl_tuple_serialize!(i32, bool); -// impl_tuple_deserialize!(i32, bool); -// impl_tuple_serialize!(i32, ZBuf); -// impl_tuple_deserialize!(i32, ZBuf); -// impl_tuple_serialize!(i32, Vec); -// impl_tuple_deserialize!(i32, Vec); -// impl_tuple_serialize!(i32, String); -// impl_tuple_deserialize!(i32, String); -// impl_tuple_serialize!(i32, &[u8]); -// impl_tuple_serialize!(isize, u8); -// impl_tuple_deserialize!(isize, u8); -// impl_tuple_serialize!(isize, u16); -// impl_tuple_deserialize!(isize, u16); -// impl_tuple_serialize!(isize, u32); -// impl_tuple_deserialize!(isize, u32); -// impl_tuple_serialize!(isize, u64); -// impl_tuple_deserialize!(isize, u64); -// impl_tuple_serialize!(isize, usize); -// impl_tuple_deserialize!(isize, usize); -// impl_tuple_serialize!(isize, i8); -// impl_tuple_deserialize!(isize, i8); -// impl_tuple_serialize!(isize, i16); -// impl_tuple_deserialize!(isize, i16); -// impl_tuple_serialize!(isize, i32); -// impl_tuple_deserialize!(isize, i32); -// impl_tuple_serialize!(isize, isize); -// impl_tuple_deserialize!(isize, isize); -// impl_tuple_serialize!(isize, f32); -// impl_tuple_deserialize!(isize, f32); -// impl_tuple_serialize!(isize, f64); -// impl_tuple_deserialize!(isize, f64); -// impl_tuple_serialize!(isize, bool); -// impl_tuple_deserialize!(isize, bool); -// impl_tuple_serialize!(isize, ZBuf); -// impl_tuple_deserialize!(isize, ZBuf); -// impl_tuple_serialize!(isize, Vec); -// impl_tuple_deserialize!(isize, Vec); -// impl_tuple_serialize!(isize, String); -// impl_tuple_deserialize!(isize, String); -// impl_tuple_serialize!(isize, &[u8]); -// impl_tuple_serialize!(f32, u8); -// impl_tuple_deserialize!(f32, u8); -// impl_tuple_serialize!(f32, u16); -// impl_tuple_deserialize!(f32, u16); -// impl_tuple_serialize!(f32, u32); -// impl_tuple_deserialize!(f32, u32); -// impl_tuple_serialize!(f32, u64); -// impl_tuple_deserialize!(f32, u64); -// impl_tuple_serialize!(f32, usize); -// impl_tuple_deserialize!(f32, usize); -// impl_tuple_serialize!(f32, i8); -// impl_tuple_deserialize!(f32, i8); -// impl_tuple_serialize!(f32, i16); -// impl_tuple_deserialize!(f32, i16); -// impl_tuple_serialize!(f32, i32); -// impl_tuple_deserialize!(f32, i32); -// impl_tuple_serialize!(f32, isize); -// impl_tuple_deserialize!(f32, isize); -// impl_tuple_serialize!(f32, f32); -// impl_tuple_deserialize!(f32, f32); -// impl_tuple_serialize!(f32, f64); -// impl_tuple_deserialize!(f32, f64); -// impl_tuple_serialize!(f32, bool); -// impl_tuple_deserialize!(f32, bool); -// impl_tuple_serialize!(f32, ZBuf); -// impl_tuple_deserialize!(f32, ZBuf); -// impl_tuple_serialize!(f32, Vec); -// impl_tuple_deserialize!(f32, Vec); -// impl_tuple_serialize!(f32, String); -// impl_tuple_deserialize!(f32, String); -// impl_tuple_serialize!(f32, &[u8]); -// impl_tuple_serialize!(f64, u8); -// impl_tuple_deserialize!(f64, u8); -// impl_tuple_serialize!(f64, u16); -// impl_tuple_deserialize!(f64, u16); -// impl_tuple_serialize!(f64, u32); -// impl_tuple_deserialize!(f64, u32); -// impl_tuple_serialize!(f64, u64); -// impl_tuple_deserialize!(f64, u64); -// impl_tuple_serialize!(f64, usize); -// impl_tuple_deserialize!(f64, usize); -// impl_tuple_serialize!(f64, i8); -// impl_tuple_deserialize!(f64, i8); -// impl_tuple_serialize!(f64, i16); -// impl_tuple_deserialize!(f64, i16); -// impl_tuple_serialize!(f64, i32); -// impl_tuple_deserialize!(f64, i32); -// impl_tuple_serialize!(f64, isize); -// impl_tuple_deserialize!(f64, isize); -// impl_tuple_serialize!(f64, f32); -// impl_tuple_deserialize!(f64, f32); -// impl_tuple_serialize!(f64, f64); -// impl_tuple_deserialize!(f64, f64); -// impl_tuple_serialize!(f64, bool); -// impl_tuple_deserialize!(f64, bool); -// impl_tuple_serialize!(f64, ZBuf); -// impl_tuple_deserialize!(f64, ZBuf); -// impl_tuple_serialize!(f64, Vec); -// impl_tuple_deserialize!(f64, Vec); -// impl_tuple_serialize!(f64, String); -// impl_tuple_deserialize!(f64, String); -// impl_tuple_serialize!(f64, &[u8]); -// impl_tuple_serialize!(bool, u8); -// impl_tuple_deserialize!(bool, u8); -// impl_tuple_serialize!(bool, u16); -// impl_tuple_deserialize!(bool, u16); -// impl_tuple_serialize!(bool, u32); -// impl_tuple_deserialize!(bool, u32); -// impl_tuple_serialize!(bool, u64); -// impl_tuple_deserialize!(bool, u64); -// impl_tuple_serialize!(bool, usize); -// impl_tuple_deserialize!(bool, usize); -// impl_tuple_serialize!(bool, i8); -// impl_tuple_deserialize!(bool, i8); -// impl_tuple_serialize!(bool, i16); -// impl_tuple_deserialize!(bool, i16); -// impl_tuple_serialize!(bool, i32); -// impl_tuple_deserialize!(bool, i32); -// impl_tuple_serialize!(bool, isize); -// impl_tuple_deserialize!(bool, isize); -// impl_tuple_serialize!(bool, f32); -// impl_tuple_deserialize!(bool, f32); -// impl_tuple_serialize!(bool, f64); -// impl_tuple_deserialize!(bool, f64); -// impl_tuple_serialize!(bool, bool); -// impl_tuple_deserialize!(bool, bool); -// impl_tuple_serialize!(bool, ZBuf); -// impl_tuple_deserialize!(bool, ZBuf); -// impl_tuple_serialize!(bool, Vec); -// impl_tuple_deserialize!(bool, Vec); -// impl_tuple_serialize!(bool, String); -// impl_tuple_deserialize!(bool, String); -// impl_tuple_serialize!(bool, &[u8]); -// impl_tuple_serialize!(ZBuf, u8); -// impl_tuple_deserialize!(ZBuf, u8); -// impl_tuple_serialize!(ZBuf, u16); -// impl_tuple_deserialize!(ZBuf, u16); -// impl_tuple_serialize!(ZBuf, u32); -// impl_tuple_deserialize!(ZBuf, u32); -// impl_tuple_serialize!(ZBuf, u64); -// impl_tuple_deserialize!(ZBuf, u64); -// impl_tuple_serialize!(ZBuf, usize); -// impl_tuple_deserialize!(ZBuf, usize); -// impl_tuple_serialize!(ZBuf, i8); -// impl_tuple_deserialize!(ZBuf, i8); -// impl_tuple_serialize!(ZBuf, i16); -// impl_tuple_deserialize!(ZBuf, i16); -// impl_tuple_serialize!(ZBuf, i32); -// impl_tuple_deserialize!(ZBuf, i32); -// impl_tuple_serialize!(ZBuf, isize); -// impl_tuple_deserialize!(ZBuf, isize); -// impl_tuple_serialize!(ZBuf, f32); -// impl_tuple_deserialize!(ZBuf, f32); -// impl_tuple_serialize!(ZBuf, f64); -// impl_tuple_deserialize!(ZBuf, f64); -// impl_tuple_serialize!(ZBuf, bool); -// impl_tuple_deserialize!(ZBuf, bool); -// impl_tuple_serialize!(ZBuf, ZBuf); -// impl_tuple_deserialize!(ZBuf, ZBuf); -// impl_tuple_serialize!(ZBuf, Vec); -// impl_tuple_deserialize!(ZBuf, Vec); -// impl_tuple_serialize!(ZBuf, String); -// impl_tuple_deserialize!(ZBuf, String); -// impl_tuple_serialize!(ZBuf, &[u8]); -// impl_tuple_serialize!(Vec, u8); -// impl_tuple_deserialize!(Vec, u8); -// impl_tuple_serialize!(Vec, u16); -// impl_tuple_deserialize!(Vec, u16); -// impl_tuple_serialize!(Vec, u32); -// impl_tuple_deserialize!(Vec, u32); -// impl_tuple_serialize!(Vec, u64); -// impl_tuple_deserialize!(Vec, u64); -// impl_tuple_serialize!(Vec, usize); -// impl_tuple_deserialize!(Vec, usize); -// impl_tuple_serialize!(Vec, i8); -// impl_tuple_deserialize!(Vec, i8); -// impl_tuple_serialize!(Vec, i16); -// impl_tuple_deserialize!(Vec, i16); -// impl_tuple_serialize!(Vec, i32); -// impl_tuple_deserialize!(Vec, i32); -// impl_tuple_serialize!(Vec, isize); -// impl_tuple_deserialize!(Vec, isize); -// impl_tuple_serialize!(Vec, f32); -// impl_tuple_deserialize!(Vec, f32); -// impl_tuple_serialize!(Vec, f64); -// impl_tuple_deserialize!(Vec, f64); -// impl_tuple_serialize!(Vec, bool); -// impl_tuple_deserialize!(Vec, bool); -// impl_tuple_serialize!(Vec, ZBuf); -// impl_tuple_deserialize!(Vec, ZBuf); -// impl_tuple_serialize!(Vec, Vec); -// impl_tuple_deserialize!(Vec, Vec); -// impl_tuple_serialize!(Vec, String); -// impl_tuple_deserialize!(Vec, String); -// impl_tuple_serialize!(Vec, &[u8]); -// impl_tuple_serialize!(String, u8); -// impl_tuple_deserialize!(String, u8); -// impl_tuple_serialize!(String, u16); -// impl_tuple_deserialize!(String, u16); -// impl_tuple_serialize!(String, u32); -// impl_tuple_deserialize!(String, u32); -// impl_tuple_serialize!(String, u64); -// impl_tuple_deserialize!(String, u64); -// impl_tuple_serialize!(String, usize); -// impl_tuple_deserialize!(String, usize); -// impl_tuple_serialize!(String, i8); -// impl_tuple_deserialize!(String, i8); -// impl_tuple_serialize!(String, i16); -// impl_tuple_deserialize!(String, i16); -// impl_tuple_serialize!(String, i32); -// impl_tuple_deserialize!(String, i32); -// impl_tuple_serialize!(String, isize); -// impl_tuple_deserialize!(String, isize); -// impl_tuple_serialize!(String, f32); -// impl_tuple_deserialize!(String, f32); -// impl_tuple_serialize!(String, f64); -// impl_tuple_deserialize!(String, f64); -// impl_tuple_serialize!(String, bool); -// impl_tuple_deserialize!(String, bool); -// impl_tuple_serialize!(String, ZBuf); -// impl_tuple_deserialize!(String, ZBuf); -// impl_tuple_serialize!(String, Vec); -// impl_tuple_deserialize!(String, Vec); -// impl_tuple_serialize!(String, String); -// impl_tuple_deserialize!(String, String); -// impl_tuple_serialize!(String, &[u8]); -// impl_tuple_serialize!(&[u8], u8); -// impl_tuple_serialize!(&[u8], u16); -// impl_tuple_serialize!(&[u8], u32); -// impl_tuple_serialize!(&[u8], u64); -// impl_tuple_serialize!(&[u8], usize); -// impl_tuple_serialize!(&[u8], i8); -// impl_tuple_serialize!(&[u8], i16); -// impl_tuple_serialize!(&[u8], i32); -// impl_tuple_serialize!(&[u8], isize); -// impl_tuple_serialize!(&[u8], f32); -// impl_tuple_serialize!(&[u8], f64); -// impl_tuple_serialize!(&[u8], bool); -// impl_tuple_serialize!(&[u8], ZBuf); -// impl_tuple_serialize!(&[u8], Vec); -// impl_tuple_serialize!(&[u8], String); -// impl_tuple_serialize!(&[u8], &[u8]); -// impl_iterator_serialize!(u8); -// impl_iterator_serialize!(u16); -// impl_iterator_serialize!(u32); -// impl_iterator_serialize!(u64); -// impl_iterator_serialize!(usize); -// impl_iterator_serialize!(i8); -// impl_iterator_serialize!(i16); -// impl_iterator_serialize!(i32); -// impl_iterator_serialize!(isize); -// impl_iterator_serialize!(f32); -// impl_iterator_serialize!(f64); -// impl_iterator_serialize!(bool); -// impl_iterator_serialize!(ZBuf); -// impl_iterator_serialize!(Vec); -// impl_iterator_serialize!(String); -// impl_iterator_serialize!(&[u8]); + let v: [usize; 5] = [0, 1, 2, 3, 4]; + let p = Payload::from_iter(v.iter()); + for (i, t) in p.iter::().enumerate() { + assert_eq!(i, t); + } + + use std::collections::HashMap; + let mut hm: HashMap = HashMap::new(); + hm.insert(0, 0); + hm.insert(1, 1); + let p = Payload::from_iter(hm.iter()); + // for (i, (k, v)) in p.iter::<(usize, usize)>().enumerate() { + // assert_eq!(i, k); + // assert_eq!(i, v); + // } + } +} From a25676b4c468c408c31f74d2a896be315a1d7f1a Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 8 Apr 2024 20:08:36 +0200 Subject: [PATCH 178/598] Improve payload serde --- zenoh/src/payload.rs | 272 ++++++++++++++++++++++++++++++++----------- 1 file changed, 202 insertions(+), 70 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index ed8c1b98c3..3c4709a6ae 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -211,6 +211,20 @@ impl From for Payload { } } +impl Serialize<&ZBuf> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &ZBuf) -> Self::Output { + Payload::new(t.clone()) + } +} + +impl From<&ZBuf> for Payload { + fn from(t: &ZBuf) -> Self { + ZSerde.serialize(t) + } +} + impl Deserialize<'_, ZBuf> for ZSerde { type Error = Infallible; @@ -246,6 +260,20 @@ impl From> for Payload { } } +impl Serialize<&Vec> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &Vec) -> Self::Output { + Payload::new(t.clone()) + } +} + +impl From<&Vec> for Payload { + fn from(t: &Vec) -> Self { + ZSerde.serialize(t) + } +} + impl Deserialize<'_, Vec> for ZSerde { type Error = Infallible; @@ -296,6 +324,20 @@ impl From> for Payload { } } +impl<'a> Serialize<&Cow<'a, [u8]>> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &Cow<'a, [u8]>) -> Self::Output { + Payload::new(t.to_vec()) + } +} + +impl From<&Cow<'_, [u8]>> for Payload { + fn from(t: &Cow<'_, [u8]>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { type Error = Infallible; @@ -325,6 +367,20 @@ impl From for Payload { } } +impl Serialize<&String> for ZSerde { + type Output = Payload; + + fn serialize(self, s: &String) -> Self::Output { + Payload::new(s.clone().into_bytes()) + } +} + +impl From<&String> for Payload { + fn from(t: &String) -> Self { + ZSerde.serialize(t) + } +} + impl Deserialize<'_, String> for ZSerde { type Error = FromUtf8Error; @@ -380,6 +436,20 @@ impl From> for Payload { } } +impl<'a> Serialize<&Cow<'a, str>> for ZSerde { + type Output = Payload; + + fn serialize(self, s: &Cow<'a, str>) -> Self::Output { + Self.serialize(s.to_string()) + } +} + +impl From<&Cow<'_, str>> for Payload { + fn from(t: &Cow<'_, str>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { type Error = FromUtf8Error; @@ -437,20 +507,6 @@ macro_rules! impl_int { } } - impl Serialize<&mut $t> for ZSerde { - type Output = Payload; - - fn serialize(self, t: &mut $t) -> Self::Output { - ZSerde.serialize(*t) - } - } - - impl From<&mut $t> for Payload { - fn from(t: &mut $t) -> Self { - ZSerde.serialize(t) - } - } - impl<'a> Deserialize<'a, $t> for ZSerde { type Error = ZDeserializeError; @@ -522,6 +578,20 @@ impl From for Payload { } } +impl Serialize<&bool> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &bool) -> Self::Output { + ZSerde.serialize(*t) + } +} + +impl From<&bool> for Payload { + fn from(t: &bool) -> Self { + ZSerde.serialize(t) + } +} + impl Deserialize<'_, bool> for ZSerde { type Error = ZDeserializeError; @@ -535,6 +605,14 @@ impl Deserialize<'_, bool> for ZSerde { } } +impl TryFrom for bool { + type Error = ZDeserializeError; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} + impl TryFrom<&Payload> for bool { type Error = ZDeserializeError; @@ -545,36 +623,36 @@ impl TryFrom<&Payload> for bool { // - Zenoh advanced types encoders/decoders // JSON -impl Serialize<&serde_json::Value> for ZSerde { +impl Serialize for ZSerde { type Output = Result; - fn serialize(self, t: &serde_json::Value) -> Self::Output { - let mut payload = Payload::empty(); - serde_json::to_writer(payload.0.writer(), t)?; - Ok(payload) + fn serialize(self, t: serde_json::Value) -> Self::Output { + ZSerde.serialize(&t) } } -impl TryFrom<&serde_json::Value> for Payload { +impl TryFrom for Payload { type Error = serde_json::Error; - fn try_from(value: &serde_json::Value) -> Result { - ZSerde.serialize(value) + fn try_from(value: serde_json::Value) -> Result { + ZSerde.serialize(&value) } } -impl Serialize for ZSerde { +impl Serialize<&serde_json::Value> for ZSerde { type Output = Result; - fn serialize(self, t: serde_json::Value) -> Self::Output { - Self.serialize(&t) + fn serialize(self, t: &serde_json::Value) -> Self::Output { + let mut payload = Payload::empty(); + serde_json::to_writer(payload.0.writer(), t)?; + Ok(payload) } } -impl TryFrom for Payload { +impl TryFrom<&serde_json::Value> for Payload { type Error = serde_json::Error; - fn try_from(value: serde_json::Value) -> Result { + fn try_from(value: &serde_json::Value) -> Result { ZSerde.serialize(value) } } @@ -587,6 +665,14 @@ impl Deserialize<'_, serde_json::Value> for ZSerde { } } +impl TryFrom for serde_json::Value { + type Error = serde_json::Error; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} + impl TryFrom<&Payload> for serde_json::Value { type Error = serde_json::Error; @@ -596,36 +682,36 @@ impl TryFrom<&Payload> for serde_json::Value { } // Yaml -impl Serialize<&serde_yaml::Value> for ZSerde { +impl Serialize for ZSerde { type Output = Result; - fn serialize(self, t: &serde_yaml::Value) -> Self::Output { - let mut payload = Payload::empty(); - serde_yaml::to_writer(payload.0.writer(), t)?; - Ok(payload) + fn serialize(self, t: serde_yaml::Value) -> Self::Output { + Self.serialize(&t) } } -impl TryFrom<&serde_yaml::Value> for Payload { +impl TryFrom for Payload { type Error = serde_yaml::Error; - fn try_from(value: &serde_yaml::Value) -> Result { + fn try_from(value: serde_yaml::Value) -> Result { ZSerde.serialize(value) } } -impl Serialize for ZSerde { +impl Serialize<&serde_yaml::Value> for ZSerde { type Output = Result; - fn serialize(self, t: serde_yaml::Value) -> Self::Output { - Self.serialize(&t) + fn serialize(self, t: &serde_yaml::Value) -> Self::Output { + let mut payload = Payload::empty(); + serde_yaml::to_writer(payload.0.writer(), t)?; + Ok(payload) } } -impl TryFrom for Payload { +impl TryFrom<&serde_yaml::Value> for Payload { type Error = serde_yaml::Error; - fn try_from(value: serde_yaml::Value) -> Result { + fn try_from(value: &serde_yaml::Value) -> Result { ZSerde.serialize(value) } } @@ -638,6 +724,14 @@ impl Deserialize<'_, serde_yaml::Value> for ZSerde { } } +impl TryFrom for serde_yaml::Value { + type Error = serde_yaml::Error; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} + impl TryFrom<&Payload> for serde_yaml::Value { type Error = serde_yaml::Error; @@ -647,36 +741,36 @@ impl TryFrom<&Payload> for serde_yaml::Value { } // CBOR -impl Serialize<&serde_cbor::Value> for ZSerde { +impl Serialize for ZSerde { type Output = Result; - fn serialize(self, t: &serde_cbor::Value) -> Self::Output { - let mut payload = Payload::empty(); - serde_cbor::to_writer(payload.0.writer(), t)?; - Ok(payload) + fn serialize(self, t: serde_cbor::Value) -> Self::Output { + Self.serialize(&t) } } -impl TryFrom<&serde_cbor::Value> for Payload { +impl TryFrom for Payload { type Error = serde_cbor::Error; - fn try_from(value: &serde_cbor::Value) -> Result { + fn try_from(value: serde_cbor::Value) -> Result { ZSerde.serialize(value) } } -impl Serialize for ZSerde { +impl Serialize<&serde_cbor::Value> for ZSerde { type Output = Result; - fn serialize(self, t: serde_cbor::Value) -> Self::Output { - Self.serialize(&t) + fn serialize(self, t: &serde_cbor::Value) -> Self::Output { + let mut payload = Payload::empty(); + serde_cbor::to_writer(payload.0.writer(), t)?; + Ok(payload) } } -impl TryFrom for Payload { +impl TryFrom<&serde_cbor::Value> for Payload { type Error = serde_cbor::Error; - fn try_from(value: serde_cbor::Value) -> Result { + fn try_from(value: &serde_cbor::Value) -> Result { ZSerde.serialize(value) } } @@ -689,6 +783,14 @@ impl Deserialize<'_, serde_cbor::Value> for ZSerde { } } +impl TryFrom for serde_cbor::Value { + type Error = serde_cbor::Error; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} + impl TryFrom<&Payload> for serde_cbor::Value { type Error = serde_cbor::Error; @@ -698,6 +800,22 @@ impl TryFrom<&Payload> for serde_cbor::Value { } // Pickle +impl Serialize for ZSerde { + type Output = Result; + + fn serialize(self, t: serde_pickle::Value) -> Self::Output { + Self.serialize(&t) + } +} + +impl TryFrom for Payload { + type Error = serde_pickle::Error; + + fn try_from(value: serde_pickle::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Serialize<&serde_pickle::Value> for ZSerde { type Output = Result; @@ -720,27 +838,19 @@ impl TryFrom<&serde_pickle::Value> for Payload { } } -impl Serialize for ZSerde { - type Output = Result; - - fn serialize(self, t: serde_pickle::Value) -> Self::Output { - Self.serialize(&t) - } -} - -impl TryFrom for Payload { +impl Deserialize<'_, serde_pickle::Value> for ZSerde { type Error = serde_pickle::Error; - fn try_from(value: serde_pickle::Value) -> Result { - ZSerde.serialize(value) + fn deserialize(self, v: &Payload) -> Result { + serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) } } -impl Deserialize<'_, serde_pickle::Value> for ZSerde { +impl TryFrom for serde_pickle::Value { type Error = serde_pickle::Error; - fn deserialize(self, v: &Payload) -> Result { - serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) } } @@ -761,6 +871,12 @@ impl Serialize> for ZSerde { Payload::new(t) } } +#[cfg(feature = "shared-memory")] +impl From> for Payload { + fn from(t: Arc) -> Self { + ZSerde.serialize(t) + } +} #[cfg(feature = "shared-memory")] impl Serialize> for ZSerde { @@ -772,6 +888,13 @@ impl Serialize> for ZSerde { } } +#[cfg(feature = "shared-memory")] +impl From> for Payload { + fn from(t: Box) -> Self { + ZSerde.serialize(t) + } +} + #[cfg(feature = "shared-memory")] impl Serialize for ZSerde { type Output = Payload; @@ -781,6 +904,13 @@ impl Serialize for ZSerde { } } +#[cfg(feature = "shared-memory")] +impl From for Payload { + fn from(t: SharedMemoryBuf) -> Self { + ZSerde.serialize(t) + } +} + // Tuple impl Serialize<(A, B)> for ZSerde where @@ -859,7 +989,7 @@ where } } -// For convenience to always convert a Value the examples +// For convenience to always convert a Value in the examples #[derive(Debug, Clone, PartialEq, Eq)] pub enum StringOrBase64 { String(String), @@ -1000,7 +1130,9 @@ mod tests { // Iterator let v: [usize; 5] = [0, 1, 2, 3, 4]; + println!("Serialize:\t{:?}", v); let p = Payload::from_iter(v.iter()); + println!("Deerialize:\t{:?}", p); for (i, t) in p.iter::().enumerate() { assert_eq!(i, t); } @@ -1009,10 +1141,10 @@ mod tests { let mut hm: HashMap = HashMap::new(); hm.insert(0, 0); hm.insert(1, 1); + println!("Serialize:\t{:?}", hm); let p = Payload::from_iter(hm.iter()); - // for (i, (k, v)) in p.iter::<(usize, usize)>().enumerate() { - // assert_eq!(i, k); - // assert_eq!(i, v); - // } + println!("Deerialize:\t{:?}", p); + let o: HashMap = HashMap::from_iter(p.iter()); + assert_eq!(hm, o); } } From d0246076a3260e40a0df4fc0d0c2357126a37793 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 09:38:42 +0200 Subject: [PATCH 179/598] [u8;N] payload support. from_reader functionality. --- zenoh/src/payload.rs | 88 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 85 insertions(+), 3 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 3c4709a6ae..a65843dcaf 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -14,6 +14,7 @@ //! Payload primitives. use crate::buffers::ZBuf; +use std::io::Read; use std::marker::PhantomData; use std::{ borrow::Cow, convert::Infallible, fmt::Debug, ops::Deref, string::FromUtf8Error, sync::Arc, @@ -79,6 +80,16 @@ impl Payload { PayloadReader(self.0.reader()) } + /// Build a [`Payload`] from a [`Reader`]. This operation copies data from the reader. + pub fn from_reader(mut reader: R) -> Result + where + R: std::io::Read, + { + let mut buf: Vec = vec![]; + reader.read_to_end(&mut buf)?; + Ok(Payload::new(buf)) + } + /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. pub fn iter<'a, T>(&'a self) -> PayloadIterator<'a, T> where @@ -91,10 +102,7 @@ impl Payload { _t: PhantomData::, } } -} -/// Provide some facilities specific to the Rust API to encode/decode a [`Value`] with an `Serialize`. -impl Payload { /// Encode an object of type `T` as a [`Value`] using the [`ZSerde`]. /// /// ```rust @@ -125,6 +133,8 @@ impl Payload { } /// A reader that implements [`std::io::Read`] trait to read from a [`Payload`]. +#[repr(transparent)] +#[derive(Debug)] pub struct PayloadReader<'a>(ZBufReader<'a>); impl std::io::Read for PayloadReader<'_> { @@ -135,6 +145,8 @@ impl std::io::Read for PayloadReader<'_> { /// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`Payload`]. /// Note that [`Payload`] contains a serialized version of `T` and iterating over a [`Payload`] performs lazy deserialization. +#[repr(transparent)] +#[derive(Debug)] pub struct PayloadIterator<'a, T> where ZSerde: Deserialize<'a, T>, @@ -245,6 +257,65 @@ impl From<&Payload> for ZBuf { } } +// [u8; N] +impl Serialize<[u8; N]> for ZSerde { + type Output = Payload; + + fn serialize(self, t: [u8; N]) -> Self::Output { + Payload::new(t) + } +} + +impl From<[u8; N]> for Payload { + fn from(t: [u8; N]) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&[u8; N]> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &[u8; N]) -> Self::Output { + Payload::new(*t) + } +} + +impl From<&[u8; N]> for Payload { + fn from(t: &[u8; N]) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize<'_, [u8; N]> for ZSerde { + type Error = ZDeserializeError; + + fn deserialize(self, v: &Payload) -> Result<[u8; N], Self::Error> { + if v.0.len() != N { + return Err(ZDeserializeError); + } + let mut dst = [0u8; N]; + let mut reader = v.reader(); + reader.read_exact(&mut dst).map_err(|_| ZDeserializeError)?; + Ok(dst) + } +} + +impl TryFrom for [u8; N] { + type Error = ZDeserializeError; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&Payload> for [u8; N] { + type Error = ZDeserializeError; + + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) + } +} + // Vec impl Serialize> for ZSerde { type Output = Payload; @@ -1137,6 +1208,17 @@ mod tests { assert_eq!(i, t); } + let mut v = vec![[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]; + println!("Serialize:\t{:?}", v); + let p = Payload::from_iter(v.drain(..)); + println!("Deerialize:\t{:?}", p); + let mut iter = p.iter::<[u8; 4]>(); + assert_eq!(iter.next().unwrap(), [0, 1, 2, 3]); + assert_eq!(iter.next().unwrap(), [4, 5, 6, 7]); + assert_eq!(iter.next().unwrap(), [8, 9, 10, 11]); + assert_eq!(iter.next().unwrap(), [12, 13, 14, 15]); + assert!(iter.next().is_none()); + use std::collections::HashMap; let mut hm: HashMap = HashMap::new(); hm.insert(0, 0); From 2a6bade7cc2d932cee30c18f97848c74511097cd Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 09:45:05 +0200 Subject: [PATCH 180/598] Improve payload test --- zenoh/src/payload.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index a65843dcaf..4899dd97e6 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -1224,9 +1224,9 @@ mod tests { hm.insert(0, 0); hm.insert(1, 1); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.iter()); + let p = Payload::from_iter(hm.drain()); println!("Deerialize:\t{:?}", p); - let o: HashMap = HashMap::from_iter(p.iter()); + let o = HashMap::from_iter(p.iter::<(usize, usize)>()); assert_eq!(hm, o); } } From 6793a6b8741fc055633c28e568c2fc8237abbeea Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 12:20:09 +0200 Subject: [PATCH 181/598] Payload zserde improvement --- zenoh/src/payload.rs | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 4899dd97e6..59ad8b79b5 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -540,7 +540,7 @@ impl<'a> TryFrom<&'a Payload> for Cow<'a, str> { // - Integers impl macro_rules! impl_int { - ($t:ty, $encoding:expr) => { + ($t:ty) => { impl Serialize<$t> for ZSerde { type Output = Payload; @@ -615,22 +615,22 @@ macro_rules! impl_int { } // Zenoh unsigned integers -impl_int!(u8, ZSerde::ZENOH_UINT); -impl_int!(u16, ZSerde::ZENOH_UINT); -impl_int!(u32, ZSerde::ZENOH_UINT); -impl_int!(u64, ZSerde::ZENOH_UINT); -impl_int!(usize, ZSerde::ZENOH_UINT); +impl_int!(u8); +impl_int!(u16); +impl_int!(u32); +impl_int!(u64); +impl_int!(usize); // Zenoh signed integers -impl_int!(i8, ZSerde::ZENOH_INT); -impl_int!(i16, ZSerde::ZENOH_INT); -impl_int!(i32, ZSerde::ZENOH_INT); -impl_int!(i64, ZSerde::ZENOH_INT); -impl_int!(isize, ZSerde::ZENOH_INT); +impl_int!(i8); +impl_int!(i16); +impl_int!(i32); +impl_int!(i64); +impl_int!(isize); // Zenoh floats -impl_int!(f32, ZSerde::ZENOH_FLOAT); -impl_int!(f64, ZSerde::ZENOH_FLOAT); +impl_int!(f32); +impl_int!(f64); // Zenoh bool impl Serialize for ZSerde { @@ -1203,7 +1203,7 @@ mod tests { let v: [usize; 5] = [0, 1, 2, 3, 4]; println!("Serialize:\t{:?}", v); let p = Payload::from_iter(v.iter()); - println!("Deerialize:\t{:?}", p); + println!("Deserialize:\t{:?}", p); for (i, t) in p.iter::().enumerate() { assert_eq!(i, t); } @@ -1211,7 +1211,7 @@ mod tests { let mut v = vec![[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]; println!("Serialize:\t{:?}", v); let p = Payload::from_iter(v.drain(..)); - println!("Deerialize:\t{:?}", p); + println!("Deserialize:\t{:?}", p); let mut iter = p.iter::<[u8; 4]>(); assert_eq!(iter.next().unwrap(), [0, 1, 2, 3]); assert_eq!(iter.next().unwrap(), [4, 5, 6, 7]); @@ -1225,7 +1225,7 @@ mod tests { hm.insert(1, 1); println!("Serialize:\t{:?}", hm); let p = Payload::from_iter(hm.drain()); - println!("Deerialize:\t{:?}", p); + println!("Deserialize:\t{:?}", p); let o = HashMap::from_iter(p.iter::<(usize, usize)>()); assert_eq!(hm, o); } From 7240f0169556a66fb4abca47dcfbcce736a01e53 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 12:22:08 +0200 Subject: [PATCH 182/598] Fix encoding doc: suffix to schema --- commons/zenoh-codec/src/core/encoding.rs | 4 ++-- commons/zenoh-protocol/src/core/encoding.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/commons/zenoh-codec/src/core/encoding.rs b/commons/zenoh-codec/src/core/encoding.rs index cfbe0084ba..c8033cdd5f 100644 --- a/commons/zenoh-codec/src/core/encoding.rs +++ b/commons/zenoh-codec/src/core/encoding.rs @@ -62,13 +62,13 @@ where fn read(self, reader: &mut R) -> Result { let zodec = Zenoh080Bounded::::new(); let id: u32 = zodec.read(&mut *reader)?; - let (id, has_suffix) = ( + let (id, has_schema) = ( (id >> 1) as EncodingId, imsg::has_flag(id as u8, flag::S as u8), ); let mut schema = None; - if has_suffix { + if has_schema { let zodec = Zenoh080Bounded::::new(); schema = Some(zodec.read(&mut *reader)?); } diff --git a/commons/zenoh-protocol/src/core/encoding.rs b/commons/zenoh-protocol/src/core/encoding.rs index 9b9aa5bf2f..70afdbf143 100644 --- a/commons/zenoh-protocol/src/core/encoding.rs +++ b/commons/zenoh-protocol/src/core/encoding.rs @@ -18,8 +18,8 @@ pub type EncodingId = u16; /// [`Encoding`] is a metadata that indicates how the data payload should be interpreted. /// For wire-efficiency and extensibility purposes, Zenoh defines an [`Encoding`] as -/// composed of an unsigned integer prefix and a string suffix. The actual meaning of the -/// prefix and suffix are out-of-scope of the protocol definition. Therefore, Zenoh does not +/// composed of an unsigned integer prefix and a bytes schema. The actual meaning of the +/// prefix and schema are out-of-scope of the protocol definition. Therefore, Zenoh does not /// impose any encoding mapping and users are free to use any mapping they like. /// Nevertheless, it is worth highlighting that Zenoh still provides a default mapping as part /// of the API as per user convenience. That mapping has no impact on the Zenoh protocol definition. @@ -40,7 +40,7 @@ pub struct Encoding { /// +---------------+ /// ``` pub mod flag { - pub const S: u32 = 1; // 0x01 Suffix if S==1 then suffix is present + pub const S: u32 = 1; // 0x01 Suffix if S==1 then schema is present } impl Encoding { From ccf48c38167dc2e6b4a6cb42974dc70e5c1b98de Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 9 Apr 2024 14:09:47 +0200 Subject: [PATCH 183/598] buffer reader exported --- zenoh/src/lib.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index f071360567..cb25ab8efc 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -106,8 +106,8 @@ pub const FEATURES: &str = zenoh_util::concat_enabled_features!( ); // Expose some functions directly to root `zenoh::`` namespace for convenience -pub use crate::api::session::open; pub use crate::api::scouting::scout; +pub use crate::api::session::open; pub mod prelude; @@ -127,7 +127,10 @@ pub mod core { /// reading and writing data. pub mod buffers { pub use zenoh_buffers::buffer::SplitBuffer; - pub use zenoh_buffers::{ZBuf, ZSlice}; + pub use zenoh_buffers::reader::HasReader; + pub use zenoh_buffers::reader::Reader; + pub use zenoh_buffers::ZBufReader; + pub use zenoh_buffers::{ZBuf, ZSlice, ZSliceBuffer}; } /// [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. @@ -251,6 +254,7 @@ pub mod subscriber { /// Publishing primitives pub mod publication { pub use crate::api::builders::publication::PublisherBuilder; + pub use crate::api::publication::MatchingListener; pub use crate::api::publication::Priority; pub use crate::api::publication::Publisher; #[zenoh_macros::unstable] From 989509c447df98771abcfaea786e203e885db5b5 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 17:11:28 +0200 Subject: [PATCH 184/598] impl Seek for ZBufReader --- commons/zenoh-buffers/src/zbuf.rs | 144 +++++++++++++++++++++++++++--- zenoh/src/payload.rs | 7 +- 2 files changed, 134 insertions(+), 17 deletions(-) diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index fd86f454af..6fded06ae7 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -21,6 +21,8 @@ use crate::{ }; use alloc::{sync::Arc, vec::Vec}; use core::{cmp, iter, mem, num::NonZeroUsize, ops::RangeBounds, ptr}; +#[cfg(feature = "std")] +use std::io; use zenoh_collections::SingleOrVec; fn get_mut_unchecked(arc: &mut Arc) -> &mut T { @@ -270,7 +272,7 @@ impl<'a> Reader for ZBufReader<'a> { } fn read_exact(&mut self, into: &mut [u8]) -> Result<(), DidntRead> { - let len = self.read(into)?; + let len = Reader::read(self, into)?; if len.get() == into.len() { Ok(()) } else { @@ -317,7 +319,7 @@ impl<'a> Reader for ZBufReader<'a> { match (slice.len() - self.cursor.byte).cmp(&len) { cmp::Ordering::Less => { let mut buffer = crate::vec::uninit(len); - self.read_exact(&mut buffer)?; + Reader::read_exact(self, &mut buffer)?; Ok(buffer.into()) } cmp::Ordering::Equal => { @@ -388,18 +390,58 @@ impl<'a> SiphonableReader for ZBufReader<'a> { } #[cfg(feature = "std")] -impl<'a> std::io::Read for ZBufReader<'a> { - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { +impl<'a> io::Read for ZBufReader<'a> { + fn read(&mut self, buf: &mut [u8]) -> io::Result { match ::read(self, buf) { Ok(n) => Ok(n.get()), - Err(_) => Err(std::io::Error::new( - std::io::ErrorKind::UnexpectedEof, + Err(_) => Err(io::Error::new( + io::ErrorKind::UnexpectedEof, "UnexpectedEof", )), } } } +#[cfg(feature = "std")] +impl<'a> io::Seek for ZBufReader<'a> { + fn seek(&mut self, pos: io::SeekFrom) -> io::Result { + // Compute the index + let len = self.inner.len(); + let index = match pos { + io::SeekFrom::Start(pos) => pos.try_into().unwrap_or(i64::MAX), + io::SeekFrom::End(pos) => { + pos + i64::try_from(len) + .map_err(|e| io::Error::new(io::ErrorKind::UnexpectedEof, e))? + } + io::SeekFrom::Current(pos) => { + pos + i64::try_from(len - self.remaining()) + .map_err(|e| io::Error::new(io::ErrorKind::UnexpectedEof, e))? + } + }; + + let index = usize::try_from(index) + .map_err(|e| io::Error::new(io::ErrorKind::UnexpectedEof, e))? + .min(len); + + // Seek the position + let mut left = index; + let mut pos = ZBufPos { slice: 0, byte: 0 }; + while let Some(slice) = self.inner.slices.get(pos.slice) { + let len = slice.len(); + if len >= left { + pos.byte = left; + self.cursor = pos; + return Ok(index as u64); + } else { + left -= len; + } + pos.slice += 1; + } + + Err(io::ErrorKind::UnexpectedEof.into()) + } +} + // ZSlice iterator pub struct ZBufSliceIterator<'a, 'b> { reader: &'a mut ZBufReader<'b>, @@ -614,18 +656,18 @@ impl BacktrackableWriter for ZBufWriter<'_> { } #[cfg(feature = "std")] -impl<'a> std::io::Write for ZBufWriter<'a> { - fn write(&mut self, buf: &[u8]) -> std::io::Result { +impl<'a> io::Write for ZBufWriter<'a> { + fn write(&mut self, buf: &[u8]) -> io::Result { match ::write(self, buf) { Ok(n) => Ok(n.get()), - Err(_) => Err(std::io::Error::new( - std::io::ErrorKind::UnexpectedEof, + Err(_) => Err(io::Error::new( + io::ErrorKind::UnexpectedEof, "UnexpectedEof", )), } } - fn flush(&mut self) -> std::io::Result<()> { + fn flush(&mut self) -> io::Result<()> { Ok(()) } } @@ -668,4 +710,84 @@ mod tests { assert_eq!(zbuf1, zbuf2); } + + #[cfg(feature = "std")] + #[test] + fn zbuf_seek() { + use crate::reader::HasReader; + use std::io::{Seek, SeekFrom}; + + use super::{ZBuf, ZSlice}; + + let slice: ZSlice = [0u8, 1, 2, 3, 4, 5, 6, 7].to_vec().into(); + + let mut zbuf = ZBuf::empty(); + zbuf.push_zslice(slice.subslice(0, 1).unwrap()); + zbuf.push_zslice(slice.subslice(1, 4).unwrap()); + zbuf.push_zslice(slice.subslice(4, 8).unwrap()); + + let mut reader = zbuf.reader(); + + let index = reader.seek(SeekFrom::Start(0)).unwrap(); + assert_eq!(index, 0); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Start(4)).unwrap(); + assert_eq!(index, 4); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Start(8)).unwrap(); + assert_eq!(index, 8); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Start(u64::MAX)).unwrap(); + assert_eq!(index, 8); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::End(0)).unwrap(); + assert_eq!(index, 8); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::End(-4)).unwrap(); + assert_eq!(index, 4); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::End(-8)).unwrap(); + assert_eq!(index, 0); + assert_eq!(index, reader.stream_position().unwrap()); + + reader.seek(SeekFrom::End(i64::MIN)).unwrap_err(); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Start(0)).unwrap(); + assert_eq!(index, 0); + assert_eq!(index, reader.stream_position().unwrap()); + + reader.seek(SeekFrom::Current(-1)).unwrap_err(); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Current(2)).unwrap(); + assert_eq!(index, 2); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Current(2)).unwrap(); + assert_eq!(index, 4); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Current(-2)).unwrap(); + assert_eq!(index, 2); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Current(-2)).unwrap(); + assert_eq!(index, 0); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Current(i64::MAX)).unwrap(); + assert_eq!(index, 8); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Current(-1)).unwrap(); + assert_eq!(index, 7); + assert_eq!(index, reader.stream_position().unwrap()); + } } diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 59ad8b79b5..4de36f2d94 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -22,7 +22,7 @@ use std::{ use unwrap_infallible::UnwrapInfallible; use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, - reader::{HasReader, Reader}, + reader::HasReader, writer::HasWriter, ZBufReader, ZSlice, }; @@ -171,11 +171,6 @@ where let t = ZSerde.deserialize(&kpld).ok()?; Some(t) } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.reader.remaining(); - (remaining, Some(remaining)) - } } impl FromIterator for Payload From 140526b6881ef3ddcc7536ccf879cd86692e36bf Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 17:14:18 +0200 Subject: [PATCH 185/598] impl Seek for PayloadReader --- zenoh/src/payload.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 4de36f2d94..7e42b4564a 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -14,7 +14,6 @@ //! Payload primitives. use crate::buffers::ZBuf; -use std::io::Read; use std::marker::PhantomData; use std::{ borrow::Cow, convert::Infallible, fmt::Debug, ops::Deref, string::FromUtf8Error, sync::Arc, @@ -143,6 +142,12 @@ impl std::io::Read for PayloadReader<'_> { } } +impl std::io::Seek for PayloadReader<'_> { + fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { + std::io::Seek::seek(&mut self.0, pos) + } +} + /// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`Payload`]. /// Note that [`Payload`] contains a serialized version of `T` and iterating over a [`Payload`] performs lazy deserialization. #[repr(transparent)] @@ -285,6 +290,8 @@ impl Deserialize<'_, [u8; N]> for ZSerde { type Error = ZDeserializeError; fn deserialize(self, v: &Payload) -> Result<[u8; N], Self::Error> { + use std::io::Read; + if v.0.len() != N { return Err(ZDeserializeError); } From 2dceb52b4db864d4616fd0ca1d271d2e423752cc Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 17:40:18 +0200 Subject: [PATCH 186/598] Fix tests --- zenoh/src/payload.rs | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 7e42b4564a..a63d19d4a9 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -19,6 +19,7 @@ use std::{ borrow::Cow, convert::Infallible, fmt::Debug, ops::Deref, string::FromUtf8Error, sync::Arc, }; use unwrap_infallible::UnwrapInfallible; +use zenoh_buffers::ZBufWriter; use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, @@ -148,6 +149,21 @@ impl std::io::Seek for PayloadReader<'_> { } } +/// A writer that implements [`std::io::Write`] trait to write into a [`Payload`]. +#[repr(transparent)] +#[derive(Debug)] +pub struct PayloadWriter<'a>(ZBufWriter<'a>); + +impl std::io::Write for PayloadWriter<'_> { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + std::io::Write::write(&mut self.0, buf) + } + + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } +} + /// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`Payload`]. /// Note that [`Payload`] contains a serialized version of `T` and iterating over a [`Payload`] performs lazy deserialization. #[repr(transparent)] @@ -1205,7 +1221,7 @@ mod tests { let v: [usize; 5] = [0, 1, 2, 3, 4]; println!("Serialize:\t{:?}", v); let p = Payload::from_iter(v.iter()); - println!("Deserialize:\t{:?}", p); + println!("Deserialize:\t{:?}\n", p); for (i, t) in p.iter::().enumerate() { assert_eq!(i, t); } @@ -1213,7 +1229,7 @@ mod tests { let mut v = vec![[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]; println!("Serialize:\t{:?}", v); let p = Payload::from_iter(v.drain(..)); - println!("Deserialize:\t{:?}", p); + println!("Deserialize:\t{:?}\n", p); let mut iter = p.iter::<[u8; 4]>(); assert_eq!(iter.next().unwrap(), [0, 1, 2, 3]); assert_eq!(iter.next().unwrap(), [4, 5, 6, 7]); @@ -1226,8 +1242,8 @@ mod tests { hm.insert(0, 0); hm.insert(1, 1); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.drain()); - println!("Deserialize:\t{:?}", p); + let p = Payload::from_iter(hm.clone().drain()); + println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, usize)>()); assert_eq!(hm, o); } From 0294dc953da588ec190364474bc0dce60dcf5363 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 9 Apr 2024 17:41:21 +0200 Subject: [PATCH 187/598] zserde exported --- zenoh/src/api/encoding.rs | 3 +++ zenoh/src/lib.rs | 1 + 2 files changed, 4 insertions(+) diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index 7518671eed..aba01e01b4 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -848,3 +848,6 @@ impl EncodingMapping for Box { impl EncodingMapping for SharedMemoryBuf { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } + +pub struct EncodingBuilder(Encoding); + diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index cb25ab8efc..5d35f35c8a 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -231,6 +231,7 @@ pub mod payload { pub use crate::api::payload::PayloadReader; pub use crate::api::payload::Serialize; pub use crate::api::payload::StringOrBase64; + pub use crate::api::payload::ZSerde; } /// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries From c2c6217bcb894fe7d5319249c3b46f2f5230d998 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 18:07:06 +0200 Subject: [PATCH 188/598] Add SHM support for deserializer --- Cargo.lock | 1 + examples/Cargo.toml | 7 +++- examples/examples/z_sub.rs | 1 - examples/examples/z_sub_shm.rs | 66 ++++++++++++++++++++++++++++++++++ zenoh/src/payload.rs | 23 ++++++++++++ 5 files changed, 96 insertions(+), 2 deletions(-) create mode 100644 examples/examples/z_sub_shm.rs diff --git a/Cargo.lock b/Cargo.lock index 3f74af9ed1..a9d327a978 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4612,6 +4612,7 @@ dependencies = [ "zenoh", "zenoh-collections", "zenoh-ext", + "zenoh-shm", ] [[package]] diff --git a/examples/Cargo.toml b/examples/Cargo.toml index fc1db17fe8..fb9c4c481d 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -27,7 +27,7 @@ readme = "README.md" publish = false [features] -shared-memory = ["zenoh/shared-memory"] +shared-memory = ["zenoh-shm","zenoh/shared-memory"] unstable = ["zenoh/unstable"] transport_unixpipe = ["zenoh/transport_unixpipe"] @@ -52,6 +52,7 @@ log = { workspace = true } zenoh = { workspace = true } zenoh-collections = { workspace = true } zenoh-ext = { workspace = true } +zenoh-shm = { workspace = true, optional = true } [dev-dependencies] rand = { workspace = true, features = ["default"] } @@ -96,6 +97,10 @@ required-features = ["shared-memory"] name = "z_sub" path = "examples/z_sub.rs" +[[example]] +name = "z_sub_shm" +path = "examples/z_sub_shm.rs" + [[example]] name = "z_pull" path = "examples/z_pull.rs" diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index fbce562c2e..299f0c8f49 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -32,7 +32,6 @@ async fn main() { let session = zenoh::open(config).res().await.unwrap(); println!("Declaring Subscriber on '{}'...", &key_expr); - let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); println!("Press CTRL-C to quit..."); diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs new file mode 100644 index 0000000000..630876f287 --- /dev/null +++ b/examples/examples/z_sub_shm.rs @@ -0,0 +1,66 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use clap::Parser; +use zenoh::config::Config; +use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; +use zenoh_shm::SharedMemoryBuf; + +#[tokio::main] +async fn main() { + // Initiate logging + env_logger::init(); + + let (mut config, key_expr) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); + + println!("Opening session..."); + let session = zenoh::open(config).res().await.unwrap(); + + println!("Declaring Subscriber on '{}'...", &key_expr); + let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); + + println!("Press CTRL-C to quit..."); + while let Ok(sample) = subscriber.recv_async().await { + match sample.payload().deserialize::() { + Ok(payload) => println!( + ">> [Subscriber] Received {} ('{}': '{:02x?}')", + sample.kind(), + sample.key_expr().as_str(), + payload.as_slice() + ), + Err(e) => { + println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); + } + } + } +} + +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct SubArgs { + #[arg(short, long, default_value = "demo/example/**")] + /// The Key Expression to subscribe to. + key: KeyExpr<'static>, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> (Config, KeyExpr<'static>) { + let args = SubArgs::parse(); + (args.common.into(), args.key) +} diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index a63d19d4a9..b05cf868a8 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -1000,6 +1000,29 @@ impl From for Payload { } } +#[cfg(feature = "shared-memory")] +impl Deserialize<'_, SharedMemoryBuf> for ZSerde { + type Error = ZDeserializeError; + + fn deserialize(self, v: &Payload) -> Result { + for zs in v.0.zslices() { + if let Some(shmb) = zs.downcast_ref::() { + return Ok(shmb.clone()); + } + } + Err(ZDeserializeError) + } +} + +#[cfg(feature = "shared-memory")] +impl TryFrom for SharedMemoryBuf { + type Error = ZDeserializeError; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} + // Tuple impl Serialize<(A, B)> for ZSerde where From e4ee3069e4cff58a79e983d2bdb9c357a5975177 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 18:30:30 +0200 Subject: [PATCH 189/598] Fix SharedMemoryBuf deserialize --- zenoh/src/payload.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index b05cf868a8..1cb9fae783 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -1005,7 +1005,9 @@ impl Deserialize<'_, SharedMemoryBuf> for ZSerde { type Error = ZDeserializeError; fn deserialize(self, v: &Payload) -> Result { - for zs in v.0.zslices() { + // A SharedMemoryBuf is expected to have only one slice + let mut zslices = v.0.zslices(); + if let Some(zs) = zslices.next() { if let Some(shmb) = zs.downcast_ref::() { return Ok(shmb.clone()); } From 8e87318dd343c3931ce8df024c48fbcf681cd7be Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 9 Apr 2024 18:30:35 +0200 Subject: [PATCH 190/598] export keyexpr SetIntersectionLevel --- zenoh/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 5d35f35c8a..2d71f81c97 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -174,6 +174,7 @@ pub mod key_expr { pub use crate::api::key_expr::KeyExpr; pub use zenoh_keyexpr::keyexpr; pub use zenoh_keyexpr::OwnedKeyExpr; + pub use zenoh_keyexpr::SetIntersectionLevel; pub use zenoh_macros::{kedefine, keformat, kewrite}; // keyexpr format macro support pub mod format { From 0a38e277ec4c5be9714efe2b277e2e108428ba91 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 9 Apr 2024 18:32:57 +0200 Subject: [PATCH 191/598] liveliness token export --- zenoh/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 2d71f81c97..247d1d68a0 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -305,6 +305,7 @@ pub mod scouting { pub mod liveliness { pub use crate::api::liveliness::Liveliness; pub use crate::api::liveliness::LivelinessSubscriberBuilder; + pub use crate::api::liveliness::LivelinessToken; } /// Timestamp support From cf861e1ecaa75930488e72b8288027828d1eadb4 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 18:36:10 +0200 Subject: [PATCH 192/598] Fix clippy examples --- examples/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index fb9c4c481d..4a4a4fef3e 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -100,6 +100,7 @@ path = "examples/z_sub.rs" [[example]] name = "z_sub_shm" path = "examples/z_sub_shm.rs" +required-features = ["shared-memory"] [[example]] name = "z_pull" From 28e23ab3c2713c2b65e331a7d432c0c2856c63b9 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 19:42:25 +0200 Subject: [PATCH 193/598] Add writer method to payload --- zenoh/src/payload.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 1cb9fae783..f8af7e182a 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -14,9 +14,9 @@ //! Payload primitives. use crate::buffers::ZBuf; -use std::marker::PhantomData; use std::{ - borrow::Cow, convert::Infallible, fmt::Debug, ops::Deref, string::FromUtf8Error, sync::Arc, + borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, + string::FromUtf8Error, sync::Arc, }; use unwrap_infallible::UnwrapInfallible; use zenoh_buffers::ZBufWriter; @@ -57,7 +57,7 @@ impl Payload { Self(ZBuf::empty()) } - /// Create a [`Payload`] from any type `T` that can implements [`Into`]. + /// Create a [`Payload`] from any type `T` that implements [`Into`]. pub fn new(t: T) -> Self where T: Into, @@ -80,7 +80,7 @@ impl Payload { PayloadReader(self.0.reader()) } - /// Build a [`Payload`] from a [`Reader`]. This operation copies data from the reader. + /// Build a [`Payload`] from a generic reader implementing [`std::io::Read`]. This operation copies data from the reader. pub fn from_reader(mut reader: R) -> Result where R: std::io::Read, @@ -103,6 +103,11 @@ impl Payload { } } + /// Get a [`PayloadWriter`] implementing [`std::io::Write`] trait. + pub fn writer(&mut self) -> PayloadWriter<'_> { + PayloadWriter(self.0.writer()) + } + /// Encode an object of type `T` as a [`Value`] using the [`ZSerde`]. /// /// ```rust @@ -733,7 +738,7 @@ impl Serialize<&serde_json::Value> for ZSerde { fn serialize(self, t: &serde_json::Value) -> Self::Output { let mut payload = Payload::empty(); - serde_json::to_writer(payload.0.writer(), t)?; + serde_json::to_writer(payload.writer(), t)?; Ok(payload) } } From 042964e11e8a6aa423611f669b33e5426bdfd7bc Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 10 Apr 2024 11:49:09 +0200 Subject: [PATCH 194/598] Add ZSlice payload support --- commons/zenoh-buffers/src/zbuf.rs | 41 ++++- commons/zenoh-buffers/src/zslice.rs | 4 + .../zenoh-collections/src/single_or_vec.rs | 3 + zenoh/src/payload.rs | 174 +++++++++++++++--- 4 files changed, 187 insertions(+), 35 deletions(-) diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index 6fded06ae7..cfface650a 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -17,7 +17,7 @@ use crate::{ buffer::{Buffer, SplitBuffer}, reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, - ZSlice, + ZSlice, ZSliceBuffer, }; use alloc::{sync::Arc, vec::Vec}; use core::{cmp, iter, mem, num::NonZeroUsize, ops::RangeBounds, ptr}; @@ -60,6 +60,21 @@ impl ZBuf { } } + pub fn to_zslice(&self) -> ZSlice { + let mut slices = self.zslices(); + match self.slices.len() { + 0 => ZSlice::empty(), + // SAFETY: it's safe to use unwrap_unchecked() beacuse we are explicitly checking the length is 1. + 1 => unsafe { slices.next().unwrap_unchecked().clone() }, + _ => slices + .fold(Vec::new(), |mut acc, it| { + acc.extend(it.as_slice()); + acc + }) + .into(), + } + } + pub fn splice>(&mut self, erased: Range, replacement: &[u8]) { let start = match erased.start_bound() { core::ops::Bound::Included(n) => *n, @@ -201,15 +216,31 @@ impl PartialEq for ZBuf { } // From impls +impl From for ZBuf { + fn from(t: ZSlice) -> Self { + let mut zbuf = ZBuf::empty(); + zbuf.push_zslice(t); + zbuf + } +} + +impl From> for ZBuf +where + T: ZSliceBuffer + 'static, +{ + fn from(t: Arc) -> Self { + let zslice: ZSlice = t.into(); + Self::from(zslice) + } +} + impl From for ZBuf where - T: Into, + T: ZSliceBuffer + 'static, { fn from(t: T) -> Self { - let mut zbuf = ZBuf::empty(); let zslice: ZSlice = t.into(); - zbuf.push_zslice(zslice); - zbuf + Self::from(zslice) } } diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index c15cbc6828..05c77cac7d 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -114,6 +114,10 @@ impl ZSlice { } } + pub fn empty() -> Self { + unsafe { ZSlice::new_unchecked(Arc::new([]), 0, 0) } + } + /// # Safety /// This function does not verify wether the `start` and `end` indexes are within the buffer boundaries. /// If a [`ZSlice`] is built via this constructor, a later access may panic if `start` and `end` indexes are out-of-bound. diff --git a/commons/zenoh-collections/src/single_or_vec.rs b/commons/zenoh-collections/src/single_or_vec.rs index ceb43e4025..ed82bf49af 100644 --- a/commons/zenoh-collections/src/single_or_vec.rs +++ b/commons/zenoh-collections/src/single_or_vec.rs @@ -182,14 +182,17 @@ impl SingleOrVec { self.vectorize().insert(at, value); } } + enum DrainInner<'a, T> { Vec(alloc::vec::Drain<'a, T>), Single(&'a mut SingleOrVecInner), Done, } + pub struct Drain<'a, T> { inner: DrainInner<'a, T>, } + impl<'a, T> Iterator for Drain<'a, T> { type Item = T; diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index f8af7e182a..1b91757329 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -91,11 +91,11 @@ impl Payload { } /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. - pub fn iter<'a, T>(&'a self) -> PayloadIterator<'a, T> + pub fn iter(&self) -> PayloadIterator<'_, T> where - T: TryFrom, - ZSerde: Deserialize<'a, T>, - >::Error: Debug, + T: for<'b> TryFrom<&'b Payload>, + for<'b> ZSerde: Deserialize<'b, T>, + for<'b> >::Error: Debug, { PayloadIterator { reader: self.0.reader(), @@ -126,14 +126,23 @@ impl Payload { } /// Decode an object of type `T` from a [`Value`] using the [`ZSerde`]. - /// See [encode](Value::encode) for an example. pub fn deserialize<'a, T>(&'a self) -> ZResult where ZSerde: Deserialize<'a, T>, >::Error: Debug, { - let t: T = ZSerde.deserialize(self).map_err(|e| zerror!("{:?}", e))?; - Ok(t) + ZSerde + .deserialize(self) + .map_err(|e| zerror!("{:?}", e).into()) + } + + /// Decode an object of type `T` from a [`Value`] using the [`ZSerde`]. + pub fn into<'a, T>(&'a self) -> T + where + ZSerde: Deserialize<'a, T, Error = Infallible>, + >::Error: Debug, + { + ZSerde.deserialize(self).unwrap_infallible() } } @@ -181,10 +190,10 @@ where _t: PhantomData, } -impl<'a, T> Iterator for PayloadIterator<'a, T> +impl Iterator for PayloadIterator<'_, T> where - ZSerde: for<'b> Deserialize<'b, T>, - >::Error: Debug, + for<'a> ZSerde: Deserialize<'a, T>, + for<'a> >::Error: Debug, { type Item = T; @@ -278,6 +287,55 @@ impl From<&Payload> for ZBuf { } } +// ZSlice +impl Serialize for ZSerde { + type Output = Payload; + + fn serialize(self, t: ZSlice) -> Self::Output { + Payload::new(t) + } +} + +impl From for Payload { + fn from(t: ZSlice) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&ZSlice> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &ZSlice) -> Self::Output { + Payload::new(t.clone()) + } +} + +impl From<&ZSlice> for Payload { + fn from(t: &ZSlice) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize<'_, ZSlice> for ZSerde { + type Error = Infallible; + + fn deserialize(self, v: &Payload) -> Result { + Ok(v.0.to_zslice()) + } +} + +impl From for ZSlice { + fn from(value: Payload) -> Self { + ZBuf::from(value).to_zslice() + } +} + +impl From<&Payload> for ZSlice { + fn from(value: &Payload) -> Self { + ZSerde.deserialize(value).unwrap_infallible() + } +} + // [u8; N] impl Serialize<[u8; N]> for ZSerde { type Output = Payload; @@ -515,7 +573,6 @@ impl From<&str> for Payload { } } -// Cow impl<'a> Serialize> for ZSerde { type Output = Payload; @@ -1069,16 +1126,16 @@ where } } -impl<'a, A, B> Deserialize<'a, (A, B)> for ZSerde +impl Deserialize<'_, (A, B)> for ZSerde where - A: TryFrom, - >::Error: Debug, - B: TryFrom, - >::Error: Debug, + for<'a> A: TryFrom<&'a Payload>, + for<'a> >::Error: Debug, + for<'b> B: TryFrom<&'b Payload>, + for<'b> >::Error: Debug, { type Error = ZError; - fn deserialize(self, payload: &'a Payload) -> Result<(A, B), Self::Error> { + fn deserialize(self, payload: &Payload) -> Result<(A, B), Self::Error> { let codec = Zenoh080::new(); let mut reader = payload.0.reader(); @@ -1088,18 +1145,18 @@ where let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; let bpld = Payload::new(bbuf); - let a = A::try_from(apld).map_err(|e| zerror!("{:?}", e))?; - let b = B::try_from(bpld).map_err(|e| zerror!("{:?}", e))?; + let a = A::try_from(&apld).map_err(|e| zerror!("{:?}", e))?; + let b = B::try_from(&bpld).map_err(|e| zerror!("{:?}", e))?; Ok((a, b)) } } impl TryFrom for (A, B) where - A: TryFrom, - >::Error: Debug, - B: TryFrom, - >::Error: Debug, + A: for<'a> TryFrom<&'a Payload>, + for<'a> >::Error: Debug, + for<'b> B: TryFrom<&'b Payload>, + for<'b> >::Error: Debug, { type Error = ZError; @@ -1108,6 +1165,20 @@ where } } +impl TryFrom<&Payload> for (A, B) +where + for<'a> A: TryFrom<&'a Payload>, + for<'a> >::Error: Debug, + for<'b> B: TryFrom<&'b Payload>, + for<'b> >::Error: Debug, +{ + type Error = ZError; + + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) + } +} + // For convenience to always convert a Value in the examples #[derive(Debug, Clone, PartialEq, Eq)] pub enum StringOrBase64 { @@ -1142,12 +1213,9 @@ impl std::fmt::Display for StringOrBase64 { impl From<&Payload> for StringOrBase64 { fn from(v: &Payload) -> Self { use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; - match v.deserialize::>() { - Ok(s) => StringOrBase64::String(s.into_owned()), - Err(_) => { - let cow: Cow<'_, [u8]> = Cow::from(v); - StringOrBase64::Base64(b64_std_engine.encode(cow)) - } + match v.deserialize::() { + Ok(s) => StringOrBase64::String(s), + Err(_) => StringOrBase64::Base64(b64_std_engine.encode(v.into::>())), } } } @@ -1157,7 +1225,7 @@ mod tests { fn serializer() { use super::Payload; use rand::Rng; - use zenoh_buffers::ZBuf; + use zenoh_buffers::{ZBuf, ZSlice}; const NUM: usize = 1_000; @@ -1276,5 +1344,51 @@ mod tests { println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, usize)>()); assert_eq!(hm, o); + + let mut hm: HashMap> = HashMap::new(); + hm.insert(0, vec![0u8; 8]); + hm.insert(1, vec![1u8; 16]); + println!("Serialize:\t{:?}", hm); + let p = Payload::from_iter(hm.clone().drain()); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); + assert_eq!(hm, o); + + let mut hm: HashMap> = HashMap::new(); + hm.insert(0, vec![0u8; 8]); + hm.insert(1, vec![1u8; 16]); + println!("Serialize:\t{:?}", hm); + let p = Payload::from_iter(hm.clone().drain()); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); + assert_eq!(hm, o); + + let mut hm: HashMap = HashMap::new(); + hm.insert(0, ZSlice::from(vec![0u8; 8])); + hm.insert(1, ZSlice::from(vec![1u8; 16])); + println!("Serialize:\t{:?}", hm); + let p = Payload::from_iter(hm.clone().drain()); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(usize, ZSlice)>()); + assert_eq!(hm, o); + + let mut hm: HashMap = HashMap::new(); + hm.insert(0, ZBuf::from(vec![0u8; 8])); + hm.insert(1, ZBuf::from(vec![1u8; 16])); + println!("Serialize:\t{:?}", hm); + let p = Payload::from_iter(hm.clone().drain()); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(usize, ZBuf)>()); + assert_eq!(hm, o); + + use std::borrow::Cow; + let mut hm: HashMap> = HashMap::new(); + hm.insert(0, vec![0u8; 8]); + hm.insert(1, vec![1u8; 16]); + println!("Serialize:\t{:?}", hm); + let p = Payload::from_iter(hm.clone().iter().map(|(k, v)| (k, Cow::from(v)))); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); + assert_eq!(hm, o); } } From abded105583f165d939ac9b24174e6a65b11abbb Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 10 Apr 2024 11:58:42 +0200 Subject: [PATCH 195/598] Improve payload --- zenoh/src/payload.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 1b91757329..aed0d15834 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -1225,6 +1225,7 @@ mod tests { fn serializer() { use super::Payload; use rand::Rng; + use std::borrow::Cow; use zenoh_buffers::{ZBuf, ZSlice}; const NUM: usize = 1_000; @@ -1302,10 +1303,21 @@ mod tests { serialize_deserialize!(String, ""); serialize_deserialize!(String, String::from("abcdefghijklmnopqrstuvwxyz")); + // Cow + serialize_deserialize!(Cow, Cow::from("")); + serialize_deserialize!( + Cow, + Cow::from(String::from("abcdefghijklmnopqrstuvwxyz")) + ); + // Vec serialize_deserialize!(Vec, vec![0u8; 0]); serialize_deserialize!(Vec, vec![0u8; 64]); + // Cow<[u8]> + serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 0])); + serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 64])); + // ZBuf serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); @@ -1381,7 +1393,6 @@ mod tests { let o = HashMap::from_iter(p.iter::<(usize, ZBuf)>()); assert_eq!(hm, o); - use std::borrow::Cow; let mut hm: HashMap> = HashMap::new(); hm.insert(0, vec![0u8; 8]); hm.insert(1, vec![1u8; 16]); From adf422d89945f1958ff2460f0816c684fa2cfe37 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 10 Apr 2024 12:30:41 +0200 Subject: [PATCH 196/598] allowed build zenoh without unstable feature set (#910) --- zenoh/src/publication.rs | 1 + zenoh/src/query.rs | 2 ++ zenoh/src/queryable.rs | 12 ++++++++++-- zenoh/src/sample/builder.rs | 1 + zenoh/src/sample/mod.rs | 8 ++++---- zenoh/src/session.rs | 6 +++++- zenoh/src/subscriber.rs | 3 --- 7 files changed, 23 insertions(+), 10 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index c176ad32e0..4f31c73a24 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -157,6 +157,7 @@ impl

ValueBuilderTrait for PublicationBuilder { } } +#[zenoh_macros::unstable] impl SampleBuilderTrait for PublicationBuilder { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index cb1116130d..3a380bd1c9 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -133,6 +133,7 @@ pub struct GetBuilder<'a, 'b, Handler> { pub(crate) source_info: SourceInfo, } +#[zenoh_macros::unstable] impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { @@ -430,6 +431,7 @@ where self.value, #[cfg(feature = "unstable")] self.attachment, + #[cfg(feature = "unstable")] self.source_info, callback, ) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 6fbb4e9090..0ad3a36c07 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -18,12 +18,15 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::{QoSBuilder, SourceInfo}; +use crate::sample::builder::SampleBuilder; +use crate::sample::QoSBuilder; +#[cfg(feature = "unstable")] +use crate::sample::SourceInfo; use crate::Id; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] -use crate::{query::ReplyKeyExpr, sample::builder::SampleBuilder, sample::Attachment}; +use crate::{query::ReplyKeyExpr, sample::Attachment}; use std::fmt; use std::future::Ready; use std::ops::Deref; @@ -155,7 +158,9 @@ impl Query { encoding: Encoding::default(), }, timestamp: None, + #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] attachment: None, } } @@ -193,7 +198,9 @@ impl Query { qos: response::ext::QoSType::RESPONSE.into(), kind: ReplyBuilderDelete, timestamp: None, + #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] attachment: None, } } @@ -298,6 +305,7 @@ impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { } } +#[cfg(feature = "unstable")] impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] fn attachment>>(self, attachment: U) -> Self { diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index fca55edd09..bad35024ef 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -163,6 +163,7 @@ impl TimestampBuilderTrait for SampleBuilder { } } +#[cfg(feature = "unstable")] impl SampleBuilderTrait for SampleBuilder { #[zenoh_macros::unstable] fn source_info(self, source_info: SourceInfo) -> Self { diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index 6e457578a3..0ef8462d2a 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -22,9 +22,9 @@ use crate::Priority; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; +use zenoh_protocol::core::CongestionControl; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::network::declare::ext::QoSType; -use zenoh_protocol::{core::CongestionControl, zenoh}; pub mod builder; @@ -178,12 +178,12 @@ impl SourceInfo { } #[zenoh_macros::unstable] -impl From for Option { - fn from(source_info: SourceInfo) -> Option { +impl From for Option { + fn from(source_info: SourceInfo) -> Option { if source_info.is_empty() { None } else { - Some(zenoh::put::ext::SourceInfoType { + Some(zenoh_protocol::zenoh::put::ext::SourceInfoType { id: source_info.source_id.unwrap_or_default(), sn: source_info.source_sn.unwrap_or_default() as u32, }) diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index f694eb6420..181976dcb0 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -61,6 +61,8 @@ use zenoh_config::unwrap_or_default; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; #[cfg(feature = "unstable")] use zenoh_protocol::network::declare::SubscriberId; +#[cfg(feature = "unstable")] +use zenoh_protocol::network::ext; use zenoh_protocol::network::AtomicRequestId; use zenoh_protocol::network::RequestId; use zenoh_protocol::zenoh::reply::ReplyBody; @@ -77,7 +79,6 @@ use zenoh_protocol::{ subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareMode, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, - ext, request::{self, ext::TargetType, Request}, Mapping, Push, Response, ResponseFinal, }, @@ -1687,7 +1688,10 @@ impl Session { payload: RequestBody::Query(zenoh_protocol::zenoh::Query { consolidation, parameters: selector.parameters().to_string(), + #[cfg(feature = "unstable")] ext_sinfo: source.into(), + #[cfg(not(feature = "unstable"))] + ext_sinfo: None, ext_body: value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 60a31a6577..47d41ebb1f 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -202,9 +202,6 @@ pub struct SubscriberBuilder<'a, 'b, Handler> { #[cfg(not(feature = "unstable"))] pub(crate) reliability: Reliability, - #[cfg(not(feature = "unstable"))] - pub(crate) mode: Mode, - #[cfg(feature = "unstable")] pub origin: Locality, #[cfg(not(feature = "unstable"))] From 7d9d57c2b2ef023a0c5887efb092250f2ff2ef44 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 10 Apr 2024 13:03:54 +0200 Subject: [PATCH 197/598] Fix tests --- zenoh/src/payload.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index aed0d15834..eac4f58e7c 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -14,6 +14,7 @@ //! Payload primitives. use crate::buffers::ZBuf; +use std::str::Utf8Error; use std::{ borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, string::FromUtf8Error, sync::Arc, @@ -494,7 +495,7 @@ impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { type Error = Infallible; fn deserialize(self, v: &'a Payload) -> Result, Self::Error> { - Ok(Cow::from(v)) + Ok(v.0.contiguous()) } } @@ -602,16 +603,19 @@ impl From<&Cow<'_, str>> for Payload { } impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { - type Error = FromUtf8Error; + type Error = Utf8Error; - fn deserialize(self, v: &Payload) -> Result, Self::Error> { - let v: String = Self.deserialize(v)?; - Ok(Cow::Owned(v)) + fn deserialize(self, v: &'a Payload) -> Result, Self::Error> { + let v: Cow<[u8]> = Self.deserialize(v).unwrap_infallible(); + let _ = core::str::from_utf8(v.as_ref())?; + // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 + // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. + Ok(unsafe { core::mem::transmute(v) }) } } impl<'a> TryFrom<&'a Payload> for Cow<'a, str> { - type Error = FromUtf8Error; + type Error = Utf8Error; fn try_from(value: &'a Payload) -> Result { ZSerde.deserialize(value) @@ -1301,14 +1305,11 @@ mod tests { // String serialize_deserialize!(String, ""); - serialize_deserialize!(String, String::from("abcdefghijklmnopqrstuvwxyz")); + serialize_deserialize!(String, String::from("abcdef")); // Cow serialize_deserialize!(Cow, Cow::from("")); - serialize_deserialize!( - Cow, - Cow::from(String::from("abcdefghijklmnopqrstuvwxyz")) - ); + serialize_deserialize!(Cow, Cow::from(String::from("abcdef"))); // Vec serialize_deserialize!(Vec, vec![0u8; 0]); From 27063b6fd2f15be36aa3988c37cf1cbb46933c40 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 10 Apr 2024 15:10:17 +0200 Subject: [PATCH 198/598] Integrating #918 --- commons/zenoh-buffers/src/lib.rs | 12 ++ commons/zenoh-buffers/src/zbuf.rs | 213 +++++++++++++++--------------- 2 files changed, 117 insertions(+), 108 deletions(-) diff --git a/commons/zenoh-buffers/src/lib.rs b/commons/zenoh-buffers/src/lib.rs index eae7f1715c..117fb412b7 100644 --- a/commons/zenoh-buffers/src/lib.rs +++ b/commons/zenoh-buffers/src/lib.rs @@ -199,6 +199,18 @@ pub mod reader { fn rewind(&mut self, mark: Self::Mark) -> bool; } + pub trait AdvanceableReader: Reader { + fn skip(&mut self, offset: usize) -> Result<(), DidntRead>; + fn backtrack(&mut self, offset: usize) -> Result<(), DidntRead>; + fn advance(&mut self, offset: isize) -> Result<(), DidntRead> { + if offset > 0 { + self.skip(offset as usize) + } else { + self.backtrack((-offset) as usize) + } + } + } + #[derive(Debug, Clone, Copy)] pub struct DidntSiphon; diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index cfface650a..f3621049b0 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -15,7 +15,10 @@ use crate::ZSliceKind; use crate::{ buffer::{Buffer, SplitBuffer}, - reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, + reader::{ + AdvanceableReader, BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, + SiphonableReader, + }, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, ZSlice, ZSliceBuffer, }; @@ -433,43 +436,74 @@ impl<'a> io::Read for ZBufReader<'a> { } } -#[cfg(feature = "std")] -impl<'a> io::Seek for ZBufReader<'a> { - fn seek(&mut self, pos: io::SeekFrom) -> io::Result { - // Compute the index - let len = self.inner.len(); - let index = match pos { - io::SeekFrom::Start(pos) => pos.try_into().unwrap_or(i64::MAX), - io::SeekFrom::End(pos) => { - pos + i64::try_from(len) - .map_err(|e| io::Error::new(io::ErrorKind::UnexpectedEof, e))? - } - io::SeekFrom::Current(pos) => { - pos + i64::try_from(len - self.remaining()) - .map_err(|e| io::Error::new(io::ErrorKind::UnexpectedEof, e))? +impl<'a> AdvanceableReader for ZBufReader<'a> { + fn skip(&mut self, offset: usize) -> Result<(), DidntRead> { + let mut remaining_offset = offset; + while remaining_offset > 0 { + let s = self.inner.slices.get(self.cursor.slice).ok_or(DidntRead)?; + let remains_in_current_slice = s.len() - self.cursor.byte; + let advance = remaining_offset.min(remains_in_current_slice); + remaining_offset -= advance; + self.cursor.byte += advance; + if self.cursor.byte == s.len() { + self.cursor.slice += 1; + self.cursor.byte = 0; } - }; + } + Ok(()) + } - let index = usize::try_from(index) - .map_err(|e| io::Error::new(io::ErrorKind::UnexpectedEof, e))? - .min(len); - - // Seek the position - let mut left = index; - let mut pos = ZBufPos { slice: 0, byte: 0 }; - while let Some(slice) = self.inner.slices.get(pos.slice) { - let len = slice.len(); - if len >= left { - pos.byte = left; - self.cursor = pos; - return Ok(index as u64); - } else { - left -= len; + fn backtrack(&mut self, offset: usize) -> Result<(), DidntRead> { + let mut remaining_offset = offset; + while remaining_offset > 0 { + let backtrack = remaining_offset.min(self.cursor.byte); + remaining_offset -= backtrack; + self.cursor.byte -= backtrack; + if self.cursor.byte == 0 { + if self.cursor.slice == 0 { + break; + } + self.cursor.slice -= 1; + self.cursor.byte = self + .inner + .slices + .get(self.cursor.slice) + .ok_or(DidntRead)? + .len(); } - pos.slice += 1; } + if remaining_offset == 0 { + Ok(()) + } else { + Err(DidntRead) + } + } +} - Err(io::ErrorKind::UnexpectedEof.into()) +#[cfg(feature = "std")] +impl<'a> io::Seek for ZBufReader<'a> { + fn seek(&mut self, pos: io::SeekFrom) -> io::Result { + let current_pos = self + .inner + .slices() + .take(self.cursor.slice) + .fold(0, |acc, s| acc + s.len()) + + self.cursor.byte; + let current_pos = i64::try_from(current_pos) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, format!("{}", e)))?; + + let offset = match pos { + std::io::SeekFrom::Start(s) => i64::try_from(s).unwrap_or(i64::MAX) - current_pos, + std::io::SeekFrom::Current(s) => s, + std::io::SeekFrom::End(s) => self.inner.len() as i64 + s - current_pos, + }; + match self.advance(offset as isize) { + Ok(()) => Ok((offset + current_pos) as u64), + Err(_) => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "InvalidInput", + )), + } } } @@ -745,80 +779,43 @@ mod tests { #[cfg(feature = "std")] #[test] fn zbuf_seek() { - use crate::reader::HasReader; - use std::io::{Seek, SeekFrom}; - - use super::{ZBuf, ZSlice}; - - let slice: ZSlice = [0u8, 1, 2, 3, 4, 5, 6, 7].to_vec().into(); - - let mut zbuf = ZBuf::empty(); - zbuf.push_zslice(slice.subslice(0, 1).unwrap()); - zbuf.push_zslice(slice.subslice(1, 4).unwrap()); - zbuf.push_zslice(slice.subslice(4, 8).unwrap()); - - let mut reader = zbuf.reader(); - - let index = reader.seek(SeekFrom::Start(0)).unwrap(); - assert_eq!(index, 0); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Start(4)).unwrap(); - assert_eq!(index, 4); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Start(8)).unwrap(); - assert_eq!(index, 8); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Start(u64::MAX)).unwrap(); - assert_eq!(index, 8); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::End(0)).unwrap(); - assert_eq!(index, 8); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::End(-4)).unwrap(); - assert_eq!(index, 4); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::End(-8)).unwrap(); - assert_eq!(index, 0); - assert_eq!(index, reader.stream_position().unwrap()); - - reader.seek(SeekFrom::End(i64::MIN)).unwrap_err(); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Start(0)).unwrap(); - assert_eq!(index, 0); - assert_eq!(index, reader.stream_position().unwrap()); - - reader.seek(SeekFrom::Current(-1)).unwrap_err(); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Current(2)).unwrap(); - assert_eq!(index, 2); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Current(2)).unwrap(); - assert_eq!(index, 4); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Current(-2)).unwrap(); - assert_eq!(index, 2); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Current(-2)).unwrap(); - assert_eq!(index, 0); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Current(i64::MAX)).unwrap(); - assert_eq!(index, 8); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Current(-1)).unwrap(); - assert_eq!(index, 7); - assert_eq!(index, reader.stream_position().unwrap()); + use super::{HasReader, ZBuf}; + use crate::reader::Reader; + use std::io::Seek; + + let mut buf = ZBuf::empty(); + buf.push_zslice([0u8, 1u8, 2u8, 3u8].into()); + buf.push_zslice([4u8, 5u8, 6u8, 7u8, 8u8].into()); + buf.push_zslice([9u8, 10u8, 11u8, 12u8, 13u8, 14u8].into()); + let mut reader = buf.reader(); + + assert_eq!(reader.stream_position().unwrap(), 0); + assert_eq!(reader.read_u8().unwrap(), 0); + assert_eq!(reader.seek(std::io::SeekFrom::Current(6)).unwrap(), 7); + assert_eq!(reader.read_u8().unwrap(), 7); + assert_eq!(reader.seek(std::io::SeekFrom::Current(-5)).unwrap(), 3); + assert_eq!(reader.read_u8().unwrap(), 3); + assert_eq!(reader.seek(std::io::SeekFrom::Current(10)).unwrap(), 14); + assert_eq!(reader.read_u8().unwrap(), 14); + reader.seek(std::io::SeekFrom::Current(100)).unwrap_err(); + + assert_eq!(reader.seek(std::io::SeekFrom::Start(0)).unwrap(), 0); + assert_eq!(reader.read_u8().unwrap(), 0); + assert_eq!(reader.seek(std::io::SeekFrom::Start(12)).unwrap(), 12); + assert_eq!(reader.read_u8().unwrap(), 12); + assert_eq!(reader.seek(std::io::SeekFrom::Start(15)).unwrap(), 15); + reader.read_u8().unwrap_err(); + reader.seek(std::io::SeekFrom::Start(100)).unwrap_err(); + + assert_eq!(reader.seek(std::io::SeekFrom::End(0)).unwrap(), 15); + reader.read_u8().unwrap_err(); + assert_eq!(reader.seek(std::io::SeekFrom::End(-5)).unwrap(), 10); + assert_eq!(reader.read_u8().unwrap(), 10); + assert_eq!(reader.seek(std::io::SeekFrom::End(-15)).unwrap(), 0); + assert_eq!(reader.read_u8().unwrap(), 0); + reader.seek(std::io::SeekFrom::End(-20)).unwrap_err(); + + assert_eq!(reader.seek(std::io::SeekFrom::Start(10)).unwrap(), 10); + reader.seek(std::io::SeekFrom::Current(-100)).unwrap_err(); } } From a1c2a024e6343222eb110595ee166804b24d0397 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 10 Apr 2024 15:12:25 +0200 Subject: [PATCH 199/598] Fix ZBur io::Read impl --- commons/zenoh-buffers/src/zbuf.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index f3621049b0..4a655ce36a 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -428,10 +428,7 @@ impl<'a> io::Read for ZBufReader<'a> { fn read(&mut self, buf: &mut [u8]) -> io::Result { match ::read(self, buf) { Ok(n) => Ok(n.get()), - Err(_) => Err(io::Error::new( - io::ErrorKind::UnexpectedEof, - "UnexpectedEof", - )), + Err(_) => Ok(0), } } } From 834be851a79b4787ad4ad3639c28dd86e66c8c12 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 10 Apr 2024 18:00:39 +0200 Subject: [PATCH 200/598] compilation fixes --- zenoh-ext/src/publication_cache.rs | 4 +- zenoh/src/net/tests/tables.rs | 69 ------------------------------ 2 files changed, 2 insertions(+), 71 deletions(-) diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 608a051d05..9f2b645da9 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -181,9 +181,9 @@ impl<'a> PublicationCache<'a> { sample = sub_recv.recv_async() => { if let Ok(sample) = sample { let queryable_key_expr: KeyExpr<'_> = if let Some(prefix) = &queryable_prefix { - prefix.join(&sample.key_expr).unwrap().into() + prefix.join(&sample.key_expr()).unwrap().into() } else { - sample.key_expr.clone() + sample.key_expr().clone() }; if let Some(queue) = cache.get_mut(queryable_key_expr.as_keyexpr()) { diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index f5a4b24e2a..35db2a7ac4 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -234,75 +234,6 @@ fn multisub_test() { tables::close_face(&tables, &face0); } -#[test] -fn multisub_test() { - let config = Config::default(); - let router = Router::new( - ZenohId::try_from([1]).unwrap(), - WhatAmI::Client, - Some(Arc::new(HLC::default())), - &config, - ) - .unwrap(); - let tables = router.tables.clone(); - - let primitives = Arc::new(DummyPrimitives {}); - let face0 = Arc::downgrade(&router.new_primitives(primitives).state); - assert!(face0.upgrade().is_some()); - - // -------------- - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, - }; - declare_subscription( - zlock!(tables.ctrl_lock).as_ref(), - &tables, - &mut face0.upgrade().unwrap(), - 0, - &"sub".into(), - &sub_info, - NodeId::default(), - ); - let optres = Resource::get_resource(zread!(tables.tables)._get_root(), "sub") - .map(|res| Arc::downgrade(&res)); - assert!(optres.is_some()); - let res = optres.unwrap(); - assert!(res.upgrade().is_some()); - - declare_subscription( - zlock!(tables.ctrl_lock).as_ref(), - &tables, - &mut face0.upgrade().unwrap(), - 1, - &"sub".into(), - &sub_info, - NodeId::default(), - ); - assert!(res.upgrade().is_some()); - - undeclare_subscription( - zlock!(tables.ctrl_lock).as_ref(), - &tables, - &mut face0.upgrade().unwrap(), - 0, - &WireExpr::empty(), - NodeId::default(), - ); - assert!(res.upgrade().is_some()); - - undeclare_subscription( - zlock!(tables.ctrl_lock).as_ref(), - &tables, - &mut face0.upgrade().unwrap(), - 1, - &WireExpr::empty(), - NodeId::default(), - ); - assert!(res.upgrade().is_none()); - - tables::close_face(&tables, &face0); -} - #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn clean_test() { let config = Config::default(); From 9888b6b4685b7f6a9f3667ed9b5b8acf7c7dd488 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 11 Apr 2024 12:41:25 +0200 Subject: [PATCH 201/598] compilation fixes --- zenoh-ext/src/publication_cache.rs | 4 ++-- zenoh/src/api/queryable.rs | 11 ----------- zenoh/src/api/sample.rs | 6 ------ zenoh/src/api/scouting.rs | 1 - zenoh/src/api/session.rs | 10 ++-------- zenoh/src/lib.rs | 3 +++ zenoh/src/net/routing/hat/linkstate_peer/network.rs | 2 +- zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 2 +- zenoh/tests/session.rs | 6 +++--- 9 files changed, 12 insertions(+), 33 deletions(-) diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 7687593b7d..8b9cc261df 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -17,7 +17,7 @@ use std::future::Ready; use std::time::Duration; use zenoh::core::Error; use zenoh::core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; -use zenoh::internal::ResolveFuture; +use zenoh::internal::{ResolveFuture, TerminatableTask}; use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; use zenoh::queryable::{Query, Queryable}; use zenoh::runtime::ZRuntime; @@ -173,7 +173,7 @@ impl<'a> PublicationCache<'a> { let token = TerminatableTask::create_cancellation_token(); let token2 = token.clone(); let task = TerminatableTask::spawn( - zenoh_runtime::ZRuntime::Application, + ZRuntime::Application, async move { let mut cache: HashMap> = HashMap::with_capacity(resources_limit.unwrap_or(32)); diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 3607a2aa0a..8cd6292e3d 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -27,17 +27,6 @@ use super::{ }; use crate::net::primitives::Primitives; use std::{fmt, future::Ready, ops::Deref, sync::Arc}; -use crate::prelude::*; -use crate::sample::builder::SampleBuilder; -use crate::sample::QoSBuilder; -#[cfg(feature = "unstable")] -use crate::sample::SourceInfo; -use crate::Id; -use crate::SessionRef; -use crate::Undeclarable; -#[cfg(feature = "unstable")] -use crate::{query::ReplyKeyExpr, sample::Attachment}; -use std::fmt; use uhlc::Timestamp; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_protocol::{ diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 46ecf7cc16..f2ff96fb04 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -30,12 +30,6 @@ use zenoh_protocol::{ pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; #[zenoh_macros::unstable] use serde::Serialize; -use std::{convert::TryFrom, fmt}; -use zenoh_protocol::core::CongestionControl; -use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::network::declare::ext::QoSType; - -pub mod builder; pub type SourceSn = u64; diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 3e213539c3..058ab82058 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -13,7 +13,6 @@ // use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::net::runtime::{orchestrator::Loop, Runtime}; -use futures::StreamExt; use std::time::Duration; use std::{fmt, future::Ready, net::SocketAddr, ops::Deref}; use tokio::net::UdpSocket; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 5dce02d9e6..3f2bcb07f3 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -51,8 +51,6 @@ use zenoh_config::{unwrap_or_default, Config, Notifier}; use zenoh_core::{ zconfigurable, zread, Resolvable, Resolve, ResolveClosure, ResolveFuture, SyncResolve, }; -use zenoh_config::unwrap_or_default; -use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; #[cfg(feature = "unstable")] use zenoh_protocol::network::{declare::SubscriberId, ext}; use zenoh_protocol::{ @@ -80,16 +78,12 @@ use zenoh_task::TaskController; use zenoh_util::core::AsyncResolve; #[cfg(feature = "unstable")] -use { - super::{ +use super::{ liveliness::{Liveliness, LivelinessTokenState}, publication::Publisher, publication::{MatchingListenerState, MatchingStatus}, sample::{Attachment, SourceInfo}, - }, - zenoh_protocol::network::declare::SubscriberId, - zenoh_protocol::network::ext, -}; + }; zconfigurable! { pub(crate) static ref API_DATA_RECEPTION_CHANNEL_SIZE: usize = 256; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 247d1d68a0..c0840c8829 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -223,6 +223,7 @@ pub mod value { /// Encoding support pub mod encoding { pub use crate::api::encoding::Encoding; + pub use crate::api::encoding::EncodingBuilder; } /// Payload primitives @@ -256,6 +257,7 @@ pub mod subscriber { /// Publishing primitives pub mod publication { pub use crate::api::builders::publication::PublisherBuilder; + #[zenoh_macros::unstable] pub use crate::api::publication::MatchingListener; pub use crate::api::publication::Priority; pub use crate::api::publication::Publisher; @@ -349,6 +351,7 @@ pub mod internal { pub use zenoh_util::core::ResolveFuture; pub use zenoh_util::LibLoader; pub use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; + pub use zenoh_task::TerminatableTask; } #[cfg(feature = "shared-memory")] diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index f1c376df20..541594f0ca 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -15,7 +15,7 @@ use crate::net::codec::Zenoh080Routing; use crate::net::protocol::linkstate::{LinkState, LinkStateList}; use crate::net::routing::dispatcher::tables::NodeId; use crate::net::runtime::Runtime; -use crate::runtime::WeakRuntime; +use crate::net::runtime::WeakRuntime; use petgraph::graph::NodeIndex; use petgraph::visit::{VisitMap, Visitable}; use std::convert::TryInto; diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index 9a19dac6f6..88a86f51f4 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -14,7 +14,7 @@ use crate::net::codec::Zenoh080Routing; use crate::net::protocol::linkstate::{LinkState, LinkStateList}; use crate::net::runtime::Runtime; -use crate::runtime::WeakRuntime; +use crate::net::runtime::WeakRuntime; use petgraph::graph::NodeIndex; use std::convert::TryInto; use vec_map::VecMap; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 0a6e48d228..06d61dbd5a 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -293,11 +293,11 @@ async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) async fn zenoh_2sessions_1runtime_init() { let (r1, r2) = open_session_unicast_runtime(&["tcp/127.0.0.1:17449"]).await; println!("[RI][02a] Creating peer01 session from runtime 1"); - let peer01 = zenoh::init(r1.clone()).res_async().await.unwrap(); + let peer01 = zenoh::session::init(r1.clone()).res_async().await.unwrap(); println!("[RI][02b] Creating peer02 session from runtime 2"); - let peer02 = zenoh::init(r2.clone()).res_async().await.unwrap(); + let peer02 = zenoh::session::init(r2.clone()).res_async().await.unwrap(); println!("[RI][02c] Creating peer01a session from runtime 1"); - let peer01a = zenoh::init(r1.clone()).res_async().await.unwrap(); + let peer01a = zenoh::session::init(r1.clone()).res_async().await.unwrap(); println!("[RI][03c] Closing peer01a session"); std::mem::drop(peer01a); test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; From a1b50dd4ae6edb1a345acece51d5aec075f750e2 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 11 Apr 2024 13:40:23 +0200 Subject: [PATCH 202/598] Protocol attachment to payload (#923) * Attachment typedef * Fix io::Write for ZBuf and Payload * FIx doc * Add payload serializer test * OptionPayload for API ergonomicity --- commons/zenoh-buffers/src/zbuf.rs | 8 +- examples/examples/z_pub.rs | 27 +--- examples/examples/z_sub.rs | 9 +- zenoh/src/payload.rs | 106 ++++++++++++--- zenoh/src/publication.rs | 4 +- zenoh/src/query.rs | 4 +- zenoh/src/queryable.rs | 4 +- zenoh/src/sample/builder.rs | 8 +- zenoh/src/sample/mod.rs | 216 ++---------------------------- zenoh/tests/attachments.rs | 72 +++++----- 10 files changed, 167 insertions(+), 291 deletions(-) diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index 4a655ce36a..616dbb1b96 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -720,12 +720,12 @@ impl BacktrackableWriter for ZBufWriter<'_> { #[cfg(feature = "std")] impl<'a> io::Write for ZBufWriter<'a> { fn write(&mut self, buf: &[u8]) -> io::Result { + if buf.is_empty() { + return Ok(0); + } match ::write(self, buf) { Ok(n) => Ok(n.get()), - Err(_) => Err(io::Error::new( - io::ErrorKind::UnexpectedEof, - "UnexpectedEof", - )), + Err(_) => Err(io::ErrorKind::UnexpectedEof.into()), } } diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 8cd3c4edba..68fbf02ca2 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -35,16 +35,12 @@ async fn main() { tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] {value}"); println!("Putting Data ('{}': '{}')...", &key_expr, buf); - let mut put = publisher.put(buf); - if let Some(attachment) = &attachment { - put = put.attachment(Some( - attachment - .split('&') - .map(|pair| split_once(pair, '=')) - .collect(), - )) - } - put.res().await.unwrap(); + publisher + .put(buf) + .attachment(&attachment) + .res() + .await + .unwrap(); } } @@ -65,17 +61,6 @@ struct Args { common: CommonArgs, } -fn split_once(s: &str, c: char) -> (&[u8], &[u8]) { - let s_bytes = s.as_bytes(); - match s.find(c) { - Some(index) => { - let (l, r) = s_bytes.split_at(index); - (l, &r[1..]) - } - None => (s_bytes, &[]), - } -} - fn parse_args() -> (Config, KeyExpr<'static>, String, Option) { let args = Args::parse(); (args.common.into(), args.key, args.value, args.attach) diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index 299f0c8f49..1e19bbff0e 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -40,12 +40,19 @@ async fn main() { .payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)); - println!( + print!( ">> [Subscriber] Received {} ('{}': '{}')", sample.kind(), sample.key_expr().as_str(), payload ); + if let Some(att) = sample.attachment() { + let att = att + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + print!(" ({})", att); + } + println!(); } } diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index eac4f58e7c..11a6f0c360 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -91,6 +91,11 @@ impl Payload { Ok(Payload::new(buf)) } + /// Get a [`PayloadWriter`] implementing [`std::io::Write`] trait. + pub fn writer(&mut self) -> PayloadWriter<'_> { + PayloadWriter(self.0.writer()) + } + /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. pub fn iter(&self) -> PayloadIterator<'_, T> where @@ -104,12 +109,7 @@ impl Payload { } } - /// Get a [`PayloadWriter`] implementing [`std::io::Write`] trait. - pub fn writer(&mut self) -> PayloadWriter<'_> { - PayloadWriter(self.0.writer()) - } - - /// Encode an object of type `T` as a [`Value`] using the [`ZSerde`]. + /// Serialize an object of type `T` as a [`Value`] using the [`ZSerde`]. /// /// ```rust /// use zenoh::payload::Payload; @@ -126,7 +126,7 @@ impl Payload { ZSerde.serialize(t) } - /// Decode an object of type `T` from a [`Value`] using the [`ZSerde`]. + /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. pub fn deserialize<'a, T>(&'a self) -> ZResult where ZSerde: Deserialize<'a, T>, @@ -137,7 +137,7 @@ impl Payload { .map_err(|e| zerror!("{:?}", e).into()) } - /// Decode an object of type `T` from a [`Value`] using the [`ZSerde`]. + /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. pub fn into<'a, T>(&'a self) -> T where ZSerde: Deserialize<'a, T, Error = Infallible>, @@ -231,6 +231,50 @@ where } } +/// Wrapper type for API ergonomicity to allow any type `T` to be converted into `Option` where `T` implements `Into`. +#[repr(transparent)] +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct OptionPayload(Option); + +impl From for OptionPayload +where + T: Into, +{ + fn from(value: T) -> Self { + Self(Some(value.into())) + } +} + +impl From> for OptionPayload +where + T: Into, +{ + fn from(mut value: Option) -> Self { + match value.take() { + Some(v) => Self(Some(v.into())), + None => Self(None), + } + } +} + +impl From<&Option> for OptionPayload +where + for<'a> &'a T: Into, +{ + fn from(value: &Option) -> Self { + match value.as_ref() { + Some(v) => Self(Some(v.into())), + None => Self(None), + } + } +} + +impl From for Option { + fn from(value: OptionPayload) -> Self { + value.0 + } +} + /// The default serializer for Zenoh payload. It supports primitives types, such as: vec, int, uint, float, string, bool. /// It also supports common Rust serde values. #[derive(Clone, Copy, Debug)] @@ -858,7 +902,7 @@ impl Serialize<&serde_yaml::Value> for ZSerde { fn serialize(self, t: &serde_yaml::Value) -> Self::Output { let mut payload = Payload::empty(); - serde_yaml::to_writer(payload.0.writer(), t)?; + serde_yaml::to_writer(payload.writer(), t)?; Ok(payload) } } @@ -1092,15 +1136,9 @@ impl TryFrom for SharedMemoryBuf { } // Tuple -impl Serialize<(A, B)> for ZSerde -where - A: Into, - B: Into, -{ - type Output = Payload; - - fn serialize(self, t: (A, B)) -> Self::Output { - let (a, b) = t; +macro_rules! impl_tuple { + ($t:expr) => {{ + let (a, b) = $t; let codec = Zenoh080::new(); let mut buffer: ZBuf = ZBuf::empty(); @@ -1117,6 +1155,29 @@ where } Payload::new(buffer) + }}; +} +impl Serialize<(A, B)> for ZSerde +where + A: Into, + B: Into, +{ + type Output = Payload; + + fn serialize(self, t: (A, B)) -> Self::Output { + impl_tuple!(t) + } +} + +impl Serialize<&(A, B)> for ZSerde +where + for<'a> &'a A: Into, + for<'b> &'b B: Into, +{ + type Output = Payload; + + fn serialize(self, t: &(A, B)) -> Self::Output { + impl_tuple!(t) } } @@ -1402,5 +1463,14 @@ mod tests { println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); assert_eq!(hm, o); + + let mut hm: HashMap = HashMap::new(); + hm.insert(String::from("0"), String::from("a")); + hm.insert(String::from("1"), String::from("b")); + println!("Serialize:\t{:?}", hm); + let p = Payload::from_iter(hm.iter()); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(String, String)>()); + assert_eq!(hm, o); } } diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 4f31c73a24..cdd9e810a6 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -14,6 +14,7 @@ //! Publishing primitives. use crate::net::primitives::Primitives; +use crate::payload::OptionPayload; use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; @@ -167,7 +168,8 @@ impl SampleBuilderTrait for PublicationBuilder { } } #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: TA) -> Self { + fn attachment>(self, attachment: TA) -> Self { + let attachment: OptionPayload = attachment.into(); Self { attachment: attachment.into(), ..self diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 3a380bd1c9..96b2ccec38 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -14,6 +14,7 @@ //! Query primitives. use crate::handlers::{locked, Callback, DefaultHandler}; +use crate::payload::OptionPayload; use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; @@ -144,7 +145,8 @@ impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { } #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { + fn attachment>(self, attachment: T) -> Self { + let attachment: OptionPayload = attachment.into(); Self { attachment: attachment.into(), ..self diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 0ad3a36c07..8d057c592b 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -17,6 +17,7 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; +use crate::payload::OptionPayload; use crate::prelude::*; use crate::sample::builder::SampleBuilder; use crate::sample::QoSBuilder; @@ -308,7 +309,8 @@ impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: U) -> Self { + fn attachment>(self, attachment: U) -> Self { + let attachment: OptionPayload = attachment.into(); Self { attachment: attachment.into(), ..self diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index bad35024ef..79acde33a3 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -14,8 +14,9 @@ use std::marker::PhantomData; +use crate::payload::OptionPayload; #[cfg(feature = "unstable")] -use crate::sample::{Attachment, SourceInfo}; +use crate::sample::SourceInfo; use crate::sample::{QoS, QoSBuilder}; use crate::Encoding; use crate::KeyExpr; @@ -51,7 +52,7 @@ pub trait SampleBuilderTrait { fn source_info(self, source_info: SourceInfo) -> Self; /// Attach user-provided data in key-value format #[zenoh_macros::unstable] - fn attachment>>(self, attachment: T) -> Self; + fn attachment>(self, attachment: T) -> Self; } pub trait ValueBuilderTrait { @@ -177,7 +178,8 @@ impl SampleBuilderTrait for SampleBuilder { } #[zenoh_macros::unstable] - fn attachment>>(self, attachment: U) -> Self { + fn attachment>(self, attachment: U) -> Self { + let attachment: OptionPayload = attachment.into(); Self { sample: Sample { attachment: attachment.into(), diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index 0ef8462d2a..2429f138ee 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -212,226 +212,26 @@ impl From> for SourceInfo { } mod attachment { - #[zenoh_macros::unstable] - use zenoh_buffers::{ - reader::{HasReader, Reader}, - writer::HasWriter, - ZBuf, ZBufReader, ZSlice, - }; - #[zenoh_macros::unstable] - use zenoh_codec::{RCodec, WCodec, Zenoh080}; + use crate::Payload; #[zenoh_macros::unstable] use zenoh_protocol::zenoh::ext::AttachmentType; - /// A builder for [`Attachment`] - #[zenoh_macros::unstable] - #[derive(Debug)] - pub struct AttachmentBuilder { - pub(crate) inner: Vec, - } #[zenoh_macros::unstable] - impl Default for AttachmentBuilder { - fn default() -> Self { - Self::new() - } - } - #[zenoh_macros::unstable] - impl AttachmentBuilder { - pub fn new() -> Self { - Self { inner: Vec::new() } - } - fn _insert(&mut self, key: &[u8], value: &[u8]) { - let codec = Zenoh080; - let mut writer = self.inner.writer(); - codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure - codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure - } - /// Inserts a key-value pair to the attachment. - /// - /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. - pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( - &mut self, - key: &Key, - value: &Value, - ) { - self._insert(key.as_ref(), value.as_ref()) - } - pub fn build(self) -> Attachment { - Attachment { - inner: self.inner.into(), - } - } - } - #[zenoh_macros::unstable] - impl From for Attachment { - fn from(value: AttachmentBuilder) -> Self { - Attachment { - inner: value.inner.into(), - } - } - } - #[zenoh_macros::unstable] - impl From for Option { - fn from(value: AttachmentBuilder) -> Self { - if value.inner.is_empty() { - None - } else { - Some(value.into()) - } - } - } + pub type Attachment = Payload; - #[zenoh_macros::unstable] - #[derive(Clone)] - pub struct Attachment { - pub(crate) inner: ZBuf, - } - #[zenoh_macros::unstable] - impl Default for Attachment { - fn default() -> Self { - Self::new() - } - } #[zenoh_macros::unstable] impl From for AttachmentType { fn from(this: Attachment) -> Self { - AttachmentType { buffer: this.inner } + AttachmentType { + buffer: this.into(), + } } } + #[zenoh_macros::unstable] impl From> for Attachment { fn from(this: AttachmentType) -> Self { - Attachment { inner: this.buffer } - } - } - #[zenoh_macros::unstable] - impl Attachment { - pub fn new() -> Self { - Self { - inner: ZBuf::empty(), - } - } - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - pub fn len(&self) -> usize { - self.iter().count() - } - pub fn iter(&self) -> AttachmentIterator { - self.into_iter() - } - fn _get(&self, key: &[u8]) -> Option { - self.iter() - .find_map(|(k, v)| (k.as_slice() == key).then_some(v)) - } - pub fn get>(&self, key: &Key) -> Option { - self._get(key.as_ref()) - } - fn _insert(&mut self, key: &[u8], value: &[u8]) { - let codec = Zenoh080; - let mut writer = self.inner.writer(); - codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure - codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure - } - /// Inserts a key-value pair to the attachment. - /// - /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. - /// - /// [`Attachment`] is not very efficient at inserting, so if you wish to perform multiple inserts, it's generally better to [`Attachment::extend`] after performing the inserts on an [`AttachmentBuilder`] - pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( - &mut self, - key: &Key, - value: &Value, - ) { - self._insert(key.as_ref(), value.as_ref()) - } - fn _extend(&mut self, with: Self) -> &mut Self { - for slice in with.inner.zslices().cloned() { - self.inner.push_zslice(slice); - } - self - } - pub fn extend(&mut self, with: impl Into) -> &mut Self { - let with = with.into(); - self._extend(with) - } - } - #[zenoh_macros::unstable] - pub struct AttachmentIterator<'a> { - reader: ZBufReader<'a>, - } - #[zenoh_macros::unstable] - impl<'a> core::iter::IntoIterator for &'a Attachment { - type Item = (ZSlice, ZSlice); - type IntoIter = AttachmentIterator<'a>; - fn into_iter(self) -> Self::IntoIter { - AttachmentIterator { - reader: self.inner.reader(), - } - } - } - #[zenoh_macros::unstable] - impl core::fmt::Debug for Attachment { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{{")?; - for (key, value) in self { - let key = key.as_slice(); - let value = value.as_slice(); - match core::str::from_utf8(key) { - Ok(key) => write!(f, "\"{key}\": ")?, - Err(_) => { - write!(f, "0x")?; - for byte in key { - write!(f, "{byte:02X}")? - } - } - } - match core::str::from_utf8(value) { - Ok(value) => write!(f, "\"{value}\", ")?, - Err(_) => { - write!(f, "0x")?; - for byte in value { - write!(f, "{byte:02X}")? - } - write!(f, ", ")? - } - } - } - write!(f, "}}") - } - } - #[zenoh_macros::unstable] - impl<'a> core::iter::Iterator for AttachmentIterator<'a> { - type Item = (ZSlice, ZSlice); - fn next(&mut self) -> Option { - let key = Zenoh080.read(&mut self.reader).ok()?; - let value = Zenoh080.read(&mut self.reader).ok()?; - Some((key, value)) - } - fn size_hint(&self) -> (usize, Option) { - ( - (self.reader.remaining() != 0) as usize, - Some(self.reader.remaining() / 2), - ) - } - } - #[zenoh_macros::unstable] - impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for AttachmentBuilder { - fn from_iter>(iter: T) -> Self { - let codec = Zenoh080; - let mut buffer: Vec = Vec::new(); - let mut writer = buffer.writer(); - for (key, value) in iter { - codec.write(&mut writer, key).unwrap(); // Infallible, barring allocation failures - codec.write(&mut writer, value).unwrap(); // Infallible, barring allocation failures - } - Self { inner: buffer } - } - } - #[zenoh_macros::unstable] - impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for Attachment { - fn from_iter>(iter: T) -> Self { - AttachmentBuilder::from_iter(iter).into() + this.buffer.into() } } } @@ -468,7 +268,7 @@ impl TryFrom for SampleKind { } #[zenoh_macros::unstable] -pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; +pub use attachment::Attachment; /// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. pub struct SampleFields { diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 9fb99b7cc0..2a58749701 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -13,20 +13,25 @@ // #[cfg(feature = "unstable")] #[test] -fn pubsub() { +fn attachment_pubsub() { use zenoh::prelude::sync::*; + use zenoh::sample::Attachment; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh .declare_subscriber("test/attachment") .callback(|sample| { println!("{}", sample.payload().deserialize::().unwrap()); - for (k, v) in sample.attachment().unwrap() { + for (k, v) in sample.attachment().unwrap().iter::<( + [u8; std::mem::size_of::()], + [u8; std::mem::size_of::()], + )>() { assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)) } }) .res() .unwrap(); + let publisher = zenoh.declare_publisher("test/attachment").res().unwrap(); for i in 0..10 { let mut backer = [( @@ -36,55 +41,57 @@ fn pubsub() { for (j, backer) in backer.iter_mut().enumerate() { *backer = ((i * 10 + j).to_le_bytes(), (i * 10 + j).to_be_bytes()) } + zenoh .put("test/attachment", "put") - .attachment(Some( - backer - .iter() - .map(|b| (b.0.as_slice(), b.1.as_slice())) - .collect(), - )) + .attachment(Attachment::from_iter(backer.iter())) .res() .unwrap(); publisher .put("publisher") - .attachment(Some( - backer - .iter() - .map(|b| (b.0.as_slice(), b.1.as_slice())) - .collect(), - )) + .attachment(Attachment::from_iter(backer.iter())) .res() .unwrap(); } } + #[cfg(feature = "unstable")] #[test] -fn queries() { +fn attachment_queries() { use zenoh::{prelude::sync::*, sample::builder::SampleBuilderTrait, sample::Attachment}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh .declare_queryable("test/attachment") .callback(|query| { - println!( - "{}", - query - .value() - .map(|q| q.payload.deserialize::().unwrap()) - .unwrap_or_default() - ); - let mut attachment = Attachment::new(); - for (k, v) in query.attachment().unwrap() { + let s = query + .value() + .map(|q| q.payload.deserialize::().unwrap()) + .unwrap_or_default(); + println!("Query value: {}", s); + + let attachment = query.attachment().unwrap(); + println!("Query attachment: {:?}", attachment); + for (k, v) in attachment.iter::<( + [u8; std::mem::size_of::()], + [u8; std::mem::size_of::()], + )>() { assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)); - attachment.insert(&k, &k); } + query .reply( query.key_expr().clone(), query.value().unwrap().payload.clone(), ) - .attachment(attachment) + .attachment(Attachment::from_iter( + attachment + .iter::<( + [u8; std::mem::size_of::()], + [u8; std::mem::size_of::()], + )>() + .map(|(k, _)| (k, k)), + )) .res() .unwrap(); }) @@ -98,20 +105,19 @@ fn queries() { for (j, backer) in backer.iter_mut().enumerate() { *backer = ((i * 10 + j).to_le_bytes(), (i * 10 + j).to_be_bytes()) } + let get = zenoh .get("test/attachment") .payload("query") - .attachment(Some( - backer - .iter() - .map(|b| (b.0.as_slice(), b.1.as_slice())) - .collect(), - )) + .attachment(Attachment::from_iter(backer.iter())) .res() .unwrap(); while let Ok(reply) = get.recv() { let response = reply.sample.as_ref().unwrap(); - for (k, v) in response.attachment().unwrap() { + for (k, v) in response.attachment().unwrap().iter::<( + [u8; std::mem::size_of::()], + [u8; std::mem::size_of::()], + )>() { assert_eq!(k, v) } } From be6d3b0165bacc68238079f9f318d5894ca97c45 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 11 Apr 2024 14:11:02 +0200 Subject: [PATCH 203/598] Fix ci/valgrind --- .../src/pub_sub/bin/z_pub_sub.rs | 9 +++-- .../src/queryable_get/bin/z_queryable_get.rs | 33 ++++++++++++------- zenoh/src/publication.rs | 5 ++- zenoh/src/query.rs | 5 ++- zenoh/src/queryable.rs | 9 ++--- zenoh/src/sample/builder.rs | 9 ++--- zenoh/src/sample/mod.rs | 7 ++-- 7 files changed, 44 insertions(+), 33 deletions(-) diff --git a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs index fac3437f39..454be2a869 100644 --- a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs +++ b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs @@ -38,9 +38,12 @@ async fn main() { .callback(|sample| { println!( ">> [Subscriber] Received {} ('{}': '{}')", - sample.kind, - sample.key_expr.as_str(), - sample.value + sample.kind(), + sample.key_expr().as_str(), + sample + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) ); }) .res() diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 102b6a036c..ea0f16399c 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -30,13 +30,16 @@ async fn main() { .declare_queryable(&queryable_key_expr.clone()) .callback(move |query| { println!(">> Handling query '{}'", query.selector()); - let reply = Ok(Sample::new( - queryable_key_expr.clone(), - query.value().unwrap().clone(), - )); - zenoh_runtime::ZRuntime::Application.block_in_place( - async move { query.reply(reply).res().await.unwrap(); } - ); + zenoh_runtime::ZRuntime::Application.block_in_place(async move { + query + .reply( + query.selector().key_expr, + query.value().unwrap().payload.clone(), + ) + .res() + .await + .unwrap(); + }); }) .complete(true) .res() @@ -51,7 +54,7 @@ async fn main() { println!("Sending Query '{get_selector}'..."); let replies = get_session .get(&get_selector) - .with_value(idx) + .value(idx) .target(QueryTarget::All) .res() .await @@ -60,10 +63,18 @@ async fn main() { match reply.sample { Ok(sample) => println!( ">> Received ('{}': '{}')", - sample.key_expr.as_str(), - sample.value, + sample.key_expr().as_str(), + sample + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + Err(err) => println!( + ">> Received (ERROR: '{}')", + err.payload + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) ), - Err(err) => println!(">> Received (ERROR: '{}')", String::try_from(&err).unwrap()), } } } diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index cdd9e810a6..f36e253636 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -14,16 +14,15 @@ //! Publishing primitives. use crate::net::primitives::Primitives; -use crate::payload::OptionPayload; use crate::prelude::*; -#[zenoh_macros::unstable] -use crate::sample::Attachment; use crate::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] use crate::{ handlers::{Callback, DefaultHandler, IntoHandler}, + payload::OptionPayload, + sample::Attachment, Id, }; use std::future::Ready; diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 96b2ccec38..7b8da9f768 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -14,12 +14,11 @@ //! Query primitives. use crate::handlers::{locked, Callback, DefaultHandler}; -use crate::payload::OptionPayload; use crate::prelude::*; -#[zenoh_macros::unstable] -use crate::sample::Attachment; use crate::sample::QoSBuilder; use crate::Session; +#[cfg(feature = "unstable")] +use crate::{payload::OptionPayload, sample::Attachment}; use std::collections::HashMap; use std::future::Ready; use std::time::Duration; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 8d057c592b..a6d87df5a4 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -17,17 +17,18 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; -use crate::payload::OptionPayload; use crate::prelude::*; use crate::sample::builder::SampleBuilder; use crate::sample::QoSBuilder; -#[cfg(feature = "unstable")] -use crate::sample::SourceInfo; use crate::Id; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] -use crate::{query::ReplyKeyExpr, sample::Attachment}; +use crate::{ + payload::OptionPayload, + query::ReplyKeyExpr, + sample::{Attachment, SourceInfo}, +}; use std::fmt; use std::future::Ready; use std::ops::Deref; diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 79acde33a3..6dc85c4046 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -11,12 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // - -use std::marker::PhantomData; - -use crate::payload::OptionPayload; -#[cfg(feature = "unstable")] -use crate::sample::SourceInfo; use crate::sample::{QoS, QoSBuilder}; use crate::Encoding; use crate::KeyExpr; @@ -25,6 +19,9 @@ use crate::Priority; use crate::Sample; use crate::SampleKind; use crate::Value; +#[cfg(feature = "unstable")] +use crate::{payload::OptionPayload, sample::SourceInfo}; +use std::marker::PhantomData; use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index 2429f138ee..f4fb1e074a 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -19,7 +19,7 @@ use crate::prelude::{KeyExpr, Value}; use crate::sample::builder::{QoSBuilderTrait, ValueBuilderTrait}; use crate::time::Timestamp; use crate::Priority; -#[zenoh_macros::unstable] +#[cfg(feature = "unstable")] use serde::Serialize; use std::{convert::TryFrom, fmt}; use zenoh_protocol::core::CongestionControl; @@ -212,8 +212,9 @@ impl From> for SourceInfo { } mod attachment { - use crate::Payload; - #[zenoh_macros::unstable] + #[cfg(feature = "unstable")] + use crate::payload::Payload; + #[cfg(feature = "unstable")] use zenoh_protocol::zenoh::ext::AttachmentType; #[zenoh_macros::unstable] From d86653e4a59b7c28ea016ceabfa734c8b81596cc Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 11 Apr 2024 16:09:01 +0200 Subject: [PATCH 204/598] RingChannel sync/async/blocking/non-blocking (#903) * RingChannel sync/async/blocking/non-blocking * Add comment in the examples --- Cargo.lock | 50 ++++----- examples/examples/z_get.rs | 4 + examples/examples/z_pull.rs | 56 +++++++--- examples/examples/z_queryable.rs | 4 + zenoh/src/handlers.rs | 186 ------------------------------- zenoh/src/handlers/callback.rs | 90 +++++++++++++++ zenoh/src/handlers/fifo.rs | 61 ++++++++++ zenoh/src/handlers/mod.rs | 52 +++++++++ zenoh/src/handlers/ring.rs | 116 +++++++++++++++++++ zenoh/src/lib.rs | 2 +- zenoh/src/liveliness.rs | 4 +- zenoh/src/publication.rs | 4 +- zenoh/src/session.rs | 10 +- zenoh/tests/handler.rs | 11 +- 14 files changed, 407 insertions(+), 243 deletions(-) delete mode 100644 zenoh/src/handlers.rs create mode 100644 zenoh/src/handlers/callback.rs create mode 100644 zenoh/src/handlers/fifo.rs create mode 100644 zenoh/src/handlers/mod.rs create mode 100644 zenoh/src/handlers/ring.rs diff --git a/Cargo.lock b/Cargo.lock index 09e598d878..66c6c4f2c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -165,9 +165,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.13" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" dependencies = [ "anstyle", "anstyle-parse", @@ -1103,9 +1103,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.3" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" +checksum = "6c012a26a7f605efc424dd53697843a72be7dc86ad2d01f7814337794a12231d" dependencies = [ "anstream", "anstyle", @@ -1122,9 +1122,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "erased-serde" -version = "0.4.4" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b73807008a3c7f171cc40312f37d95ef0396e048b5848d775f54b1a4dd4a0d3" +checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" dependencies = [ "serde", ] @@ -1541,9 +1541,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" dependencies = [ "bytes", "fnv", @@ -1854,9 +1854,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" dependencies = [ "serde", "value-bag", @@ -2865,9 +2865,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.3" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c" +checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" dependencies = [ "log", "ring 0.17.6", @@ -2923,9 +2923,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.4.1" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" +checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" [[package]] name = "rustls-webpki" @@ -3701,9 +3701,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.37.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", @@ -3743,7 +3743,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" dependencies = [ - "rustls 0.22.3", + "rustls 0.22.2", "rustls-pki-types", "tokio", ] @@ -4036,9 +4036,9 @@ dependencies = [ [[package]] name = "value-bag" -version = "1.8.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74797339c3b98616c009c7c3eb53a0ce41e85c8ec66bd3db96ed132d20cfdee8" +checksum = "d92ccd67fb88503048c01b59152a04effd0782d035a83a6d256ce6085f08f4a3" dependencies = [ "value-bag-serde1", "value-bag-sval2", @@ -4046,9 +4046,9 @@ dependencies = [ [[package]] name = "value-bag-serde1" -version = "1.8.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc35703541cbccb5278ef7b589d79439fc808ff0b5867195a3230f9a47421d39" +checksum = "b0b9f3feef403a50d4d67e9741a6d8fc688bcbb4e4f31bd4aab72cc690284394" dependencies = [ "erased-serde", "serde", @@ -4057,9 +4057,9 @@ dependencies = [ [[package]] name = "value-bag-sval2" -version = "1.8.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "285b43c29d0b4c0e65aad24561baee67a1b69dc9be9375d4a85138cbf556f7f8" +checksum = "30b24f4146b6f3361e91cbf527d1fb35e9376c3c0cef72ca5ec5af6d640fad7d" dependencies = [ "sval", "sval_buffer", @@ -4686,7 +4686,7 @@ dependencies = [ "flume", "futures", "log", - "rustls 0.22.3", + "rustls 0.22.2", "rustls-webpki 0.102.2", "serde", "tokio", @@ -4773,7 +4773,7 @@ dependencies = [ "base64 0.21.4", "futures", "log", - "rustls 0.22.3", + "rustls 0.22.2", "rustls-pemfile 2.0.0", "rustls-pki-types", "rustls-webpki 0.102.2", diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 8735ae8daa..486346a8ea 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -30,6 +30,10 @@ async fn main() { println!("Sending Query '{selector}'..."); let replies = session .get(&selector) + // // By default get receives replies from a FIFO. + // // Uncomment this line to use a ring channel instead. + // // More information on the ring channel are available in the z_pull example. + .with(zenoh::handlers::RingChannel::default()) .value(value) .target(target) .timeout(timeout) diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 4e44930f4f..d6ae465555 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::{config::Config, handlers::RingBuffer, prelude::r#async::*}; +use zenoh::{config::Config, handlers::RingChannel, prelude::r#async::*}; use zenoh_examples::CommonArgs; #[tokio::main] @@ -29,32 +29,27 @@ async fn main() { println!("Declaring Subscriber on '{key_expr}'..."); let subscriber = session .declare_subscriber(&key_expr) - .with(RingBuffer::new(size)) + .with(RingChannel::new(size)) .res() .await .unwrap(); - println!( - "Pulling data every {:#?} seconds. Press CTRL-C to quit...", - interval - ); + println!("Press CTRL-C to quit..."); + + // Blocking recv. If the ring is empty, wait for the first sample to arrive. loop { - match subscriber.recv() { - Ok(Some(sample)) => { + // Use .recv() for the synchronous version. + match subscriber.recv_async().await { + Ok(sample) => { let payload = sample .payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)); println!( - ">> [Subscriber] Pulled {} ('{}': '{}')", + ">> [Subscriber] Pulled {} ('{}': '{}')... performing a computation of {:#?}", sample.kind(), sample.key_expr().as_str(), payload, - ); - } - Ok(None) => { - println!( - ">> [Subscriber] Pulled nothing... sleep for {:#?}", interval ); tokio::time::sleep(interval).await; @@ -65,6 +60,35 @@ async fn main() { } } } + + // Non-blocking recv. This can be usually used to implement a polling mechanism. + // loop { + // match subscriber.try_recv() { + // Ok(Some(sample)) => { + // let payload = sample + // .payload() + // .deserialize::() + // .unwrap_or_else(|e| format!("{}", e)); + // println!( + // ">> [Subscriber] Pulled {} ('{}': '{}')", + // sample.kind(), + // sample.key_expr().as_str(), + // payload, + // ); + // } + // Ok(None) => { + // println!( + // ">> [Subscriber] Pulled nothing... sleep for {:#?}", + // interval + // ); + // tokio::time::sleep(interval).await; + // } + // Err(e) => { + // println!(">> [Subscriber] Pull error: {e}"); + // return; + // } + // } + // } } #[derive(clap::Parser, Clone, PartialEq, Debug)] @@ -73,10 +97,10 @@ struct SubArgs { /// The Key Expression to subscribe to. key: KeyExpr<'static>, /// The size of the ringbuffer. - #[arg(long, default_value = "3")] + #[arg(short, long, default_value = "3")] size: usize, /// The interval for pulling the ringbuffer. - #[arg(long, default_value = "5.0")] + #[arg(short, long, default_value = "5.0")] interval: f32, #[command(flatten)] common: CommonArgs, diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 83ac63ce1f..5113f1c2b7 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -29,6 +29,10 @@ async fn main() { println!("Declaring Queryable on '{key_expr}'..."); let queryable = session .declare_queryable(&key_expr) + // // By default queryable receives queries from a FIFO. + // // Uncomment this line to use a ring channel instead. + // // More information on the ring channel are available in the z_pull example. + // .with(zenoh::handlers::RingChannel::default()) .complete(complete) .res() .await diff --git a/zenoh/src/handlers.rs b/zenoh/src/handlers.rs deleted file mode 100644 index c5d2c6bb90..0000000000 --- a/zenoh/src/handlers.rs +++ /dev/null @@ -1,186 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! Callback handler trait. -use crate::API_DATA_RECEPTION_CHANNEL_SIZE; - -use std::sync::{Arc, Mutex, Weak}; -use zenoh_collections::RingBuffer as RingBufferInner; -use zenoh_result::ZResult; - -/// An alias for `Arc`. -pub type Dyn = std::sync::Arc; - -/// An immutable callback function. -pub type Callback<'a, T> = Dyn; - -/// A type that can be converted into a [`Callback`]-handler pair. -/// -/// When Zenoh functions accept types that implement these, it intends to use the [`Callback`] as just that, -/// while granting you access to the handler through the returned value via [`std::ops::Deref`] and [`std::ops::DerefMut`]. -/// -/// Any closure that accepts `T` can be converted into a pair of itself and `()`. -pub trait IntoHandler<'a, T> { - type Handler; - - fn into_handler(self) -> (Callback<'a, T>, Self::Handler); -} - -impl<'a, T, F> IntoHandler<'a, T> for F -where - F: Fn(T) + Send + Sync + 'a, -{ - type Handler = (); - fn into_handler(self) -> (Callback<'a, T>, Self::Handler) { - (Dyn::from(self), ()) - } -} - -impl IntoHandler<'static, T> for (flume::Sender, flume::Receiver) { - type Handler = flume::Receiver; - - fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { - let (sender, receiver) = self; - ( - Dyn::new(move |t| { - if let Err(e) = sender.send(t) { - log::error!("{}", e) - } - }), - receiver, - ) - } -} - -/// The default handler in Zenoh is a FIFO queue. -pub struct DefaultHandler; - -impl IntoHandler<'static, T> for DefaultHandler { - type Handler = flume::Receiver; - - fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { - flume::bounded(*API_DATA_RECEPTION_CHANNEL_SIZE).into_handler() - } -} - -impl IntoHandler<'static, T> - for (std::sync::mpsc::SyncSender, std::sync::mpsc::Receiver) -{ - type Handler = std::sync::mpsc::Receiver; - - fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { - let (sender, receiver) = self; - ( - Dyn::new(move |t| { - if let Err(e) = sender.send(t) { - log::error!("{}", e) - } - }), - receiver, - ) - } -} - -/// Ring buffer with a limited queue size, which allows users to keep the last N data. -pub struct RingBuffer { - ring: Arc>>, -} - -impl RingBuffer { - /// Initialize the RingBuffer with the capacity size. - pub fn new(capacity: usize) -> Self { - RingBuffer { - ring: Arc::new(Mutex::new(RingBufferInner::new(capacity))), - } - } -} - -pub struct RingBufferHandler { - ring: Weak>>, -} - -impl RingBufferHandler { - pub fn recv(&self) -> ZResult> { - let Some(ring) = self.ring.upgrade() else { - bail!("The ringbuffer has been deleted."); - }; - let mut guard = ring.lock().map_err(|e| zerror!("{}", e))?; - Ok(guard.pull()) - } -} - -impl IntoHandler<'static, T> for RingBuffer { - type Handler = RingBufferHandler; - - fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { - let receiver = RingBufferHandler { - ring: Arc::downgrade(&self.ring), - }; - ( - Dyn::new(move |t| match self.ring.lock() { - Ok(mut g) => { - // Eventually drop the oldest element. - g.push_force(t); - } - Err(e) => log::error!("{}", e), - }), - receiver, - ) - } -} - -/// A function that can transform a [`FnMut`]`(T)` to -/// a [`Fn`]`(T)` with the help of a [`Mutex`](std::sync::Mutex). -pub fn locked(fnmut: impl FnMut(T)) -> impl Fn(T) { - let lock = std::sync::Mutex::new(fnmut); - move |x| zlock!(lock)(x) -} - -/// A handler containing 2 callback functions: -/// - `callback`: the typical callback function. `context` will be passed as its last argument. -/// - `drop`: a callback called when this handler is dropped. -/// -/// It is guaranteed that: -/// -/// - `callback` will never be called once `drop` has started. -/// - `drop` will only be called **once**, and **after every** `callback` has ended. -/// - The two previous guarantees imply that `call` and `drop` are never called concurrently. -pub struct CallbackDrop -where - DropFn: FnMut() + Send + Sync + 'static, -{ - pub callback: Callback, - pub drop: DropFn, -} - -impl Drop for CallbackDrop -where - DropFn: FnMut() + Send + Sync + 'static, -{ - fn drop(&mut self) { - (self.drop)() - } -} - -impl<'a, OnEvent, Event, DropFn> IntoHandler<'a, Event> for CallbackDrop -where - OnEvent: Fn(Event) + Send + Sync + 'a, - DropFn: FnMut() + Send + Sync + 'static, -{ - type Handler = (); - - fn into_handler(self) -> (Callback<'a, Event>, Self::Handler) { - (Dyn::from(move |evt| (self.callback)(evt)), ()) - } -} diff --git a/zenoh/src/handlers/callback.rs b/zenoh/src/handlers/callback.rs new file mode 100644 index 0000000000..21c1b0878c --- /dev/null +++ b/zenoh/src/handlers/callback.rs @@ -0,0 +1,90 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Callback handler trait. +use super::{Dyn, IntoHandler}; + +/// A function that can transform a [`FnMut`]`(T)` to +/// a [`Fn`]`(T)` with the help of a [`Mutex`](std::sync::Mutex). +pub fn locked(fnmut: impl FnMut(T)) -> impl Fn(T) { + let lock = std::sync::Mutex::new(fnmut); + move |x| zlock!(lock)(x) +} + +/// An immutable callback function. +pub type Callback<'a, T> = Dyn; + +impl<'a, T, F> IntoHandler<'a, T> for F +where + F: Fn(T) + Send + Sync + 'a, +{ + type Handler = (); + fn into_handler(self) -> (Callback<'a, T>, Self::Handler) { + (Dyn::from(self), ()) + } +} + +impl IntoHandler<'static, T> for (flume::Sender, flume::Receiver) { + type Handler = flume::Receiver; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + let (sender, receiver) = self; + ( + Dyn::new(move |t| { + if let Err(e) = sender.send(t) { + log::error!("{}", e) + } + }), + receiver, + ) + } +} + +/// A handler containing 2 callback functions: +/// - `callback`: the typical callback function. `context` will be passed as its last argument. +/// - `drop`: a callback called when this handler is dropped. +/// +/// It is guaranteed that: +/// +/// - `callback` will never be called once `drop` has started. +/// - `drop` will only be called **once**, and **after every** `callback` has ended. +/// - The two previous guarantees imply that `call` and `drop` are never called concurrently. +pub struct CallbackDrop +where + DropFn: FnMut() + Send + Sync + 'static, +{ + pub callback: Callback, + pub drop: DropFn, +} + +impl Drop for CallbackDrop +where + DropFn: FnMut() + Send + Sync + 'static, +{ + fn drop(&mut self) { + (self.drop)() + } +} + +impl<'a, OnEvent, Event, DropFn> IntoHandler<'a, Event> for CallbackDrop +where + OnEvent: Fn(Event) + Send + Sync + 'a, + DropFn: FnMut() + Send + Sync + 'static, +{ + type Handler = (); + + fn into_handler(self) -> (Callback<'a, Event>, Self::Handler) { + (Dyn::from(move |evt| (self.callback)(evt)), ()) + } +} diff --git a/zenoh/src/handlers/fifo.rs b/zenoh/src/handlers/fifo.rs new file mode 100644 index 0000000000..0fa3ab304c --- /dev/null +++ b/zenoh/src/handlers/fifo.rs @@ -0,0 +1,61 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Callback handler trait. +use super::{callback::Callback, Dyn, IntoHandler, API_DATA_RECEPTION_CHANNEL_SIZE}; + +/// The default handler in Zenoh is a FIFO queue. + +pub struct FifoChannel { + capacity: usize, +} + +impl FifoChannel { + /// Initialize the RingBuffer with the capacity size. + pub fn new(capacity: usize) -> Self { + Self { capacity } + } +} + +impl Default for FifoChannel { + fn default() -> Self { + Self::new(*API_DATA_RECEPTION_CHANNEL_SIZE) + } +} + +impl IntoHandler<'static, T> for FifoChannel { + type Handler = flume::Receiver; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + flume::bounded(self.capacity).into_handler() + } +} + +impl IntoHandler<'static, T> + for (std::sync::mpsc::SyncSender, std::sync::mpsc::Receiver) +{ + type Handler = std::sync::mpsc::Receiver; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + let (sender, receiver) = self; + ( + Dyn::new(move |t| { + if let Err(e) = sender.send(t) { + log::error!("{}", e) + } + }), + receiver, + ) + } +} diff --git a/zenoh/src/handlers/mod.rs b/zenoh/src/handlers/mod.rs new file mode 100644 index 0000000000..627c166795 --- /dev/null +++ b/zenoh/src/handlers/mod.rs @@ -0,0 +1,52 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Callback handler trait. +mod callback; +mod fifo; +mod ring; + +pub use callback::*; +pub use fifo::*; +pub use ring::*; + +use crate::API_DATA_RECEPTION_CHANNEL_SIZE; + +/// An alias for `Arc`. +pub type Dyn = std::sync::Arc; + +/// A type that can be converted into a [`Callback`]-handler pair. +/// +/// When Zenoh functions accept types that implement these, it intends to use the [`Callback`] as just that, +/// while granting you access to the handler through the returned value via [`std::ops::Deref`] and [`std::ops::DerefMut`]. +/// +/// Any closure that accepts `T` can be converted into a pair of itself and `()`. +pub trait IntoHandler<'a, T> { + type Handler; + + fn into_handler(self) -> (Callback<'a, T>, Self::Handler); +} + +/// The default handler in Zenoh is a FIFO queue. +#[repr(transparent)] +#[derive(Default)] +pub struct DefaultHandler(FifoChannel); + +impl IntoHandler<'static, T> for DefaultHandler { + type Handler = >::Handler; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + self.0.into_handler() + } +} diff --git a/zenoh/src/handlers/ring.rs b/zenoh/src/handlers/ring.rs new file mode 100644 index 0000000000..341a3efadd --- /dev/null +++ b/zenoh/src/handlers/ring.rs @@ -0,0 +1,116 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Callback handler trait. +use crate::API_DATA_RECEPTION_CHANNEL_SIZE; + +use super::{callback::Callback, Dyn, IntoHandler}; +use std::sync::{Arc, Weak}; +use zenoh_collections::RingBuffer; +use zenoh_result::ZResult; + +/// A synchrounous ring channel with a limited size that allows users to keep the last N data. +pub struct RingChannel { + capacity: usize, +} + +impl RingChannel { + /// Initialize the RingBuffer with the capacity size. + pub fn new(capacity: usize) -> Self { + Self { capacity } + } +} + +impl Default for RingChannel { + fn default() -> Self { + Self::new(*API_DATA_RECEPTION_CHANNEL_SIZE) + } +} + +struct RingChannelInner { + ring: std::sync::Mutex>, + not_empty: flume::Receiver<()>, +} + +pub struct RingChannelHandler { + ring: Weak>, +} + +impl RingChannelHandler { + /// Receive from the ring channel. If the ring channel is empty, this call will block until an element is available in the channel. + pub fn recv(&self) -> ZResult { + let Some(channel) = self.ring.upgrade() else { + bail!("The ringbuffer has been deleted."); + }; + loop { + if let Some(t) = channel.ring.lock().map_err(|e| zerror!("{}", e))?.pull() { + return Ok(t); + } + channel.not_empty.recv().map_err(|e| zerror!("{}", e))?; + } + } + + /// Receive from the ring channel. If the ring channel is empty, this call will block until an element is available in the channel. + pub async fn recv_async(&self) -> ZResult { + let Some(channel) = self.ring.upgrade() else { + bail!("The ringbuffer has been deleted."); + }; + loop { + if let Some(t) = channel.ring.lock().map_err(|e| zerror!("{}", e))?.pull() { + return Ok(t); + } + channel + .not_empty + .recv_async() + .await + .map_err(|e| zerror!("{}", e))?; + } + } + + /// Try to receive from the ring channel. If the ring channel is empty, this call will return immediately without blocking. + pub fn try_recv(&self) -> ZResult> { + let Some(channel) = self.ring.upgrade() else { + bail!("The ringbuffer has been deleted."); + }; + let mut guard = channel.ring.lock().map_err(|e| zerror!("{}", e))?; + Ok(guard.pull()) + } +} + +impl IntoHandler<'static, T> for RingChannel { + type Handler = RingChannelHandler; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + let (sender, receiver) = flume::bounded(1); + let inner = Arc::new(RingChannelInner { + ring: std::sync::Mutex::new(RingBuffer::new(self.capacity)), + not_empty: receiver, + }); + let receiver = RingChannelHandler { + ring: Arc::downgrade(&inner), + }; + ( + Dyn::new(move |t| match inner.ring.lock() { + Ok(mut g) => { + // Eventually drop the oldest element. + g.push_force(t); + drop(g); + let _ = sender.try_send(()); + } + Err(e) => log::error!("{}", e), + }), + receiver, + ) + } +} diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index ea212485ec..90b4b2af58 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -214,7 +214,7 @@ where ScoutBuilder { what: what.into(), config: config.try_into().map_err(|e| e.into()), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 23e1846741..a28292fda2 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -155,7 +155,7 @@ impl<'a> Liveliness<'a> { LivelinessSubscriberBuilder { session: self.session.clone(), key_expr: TryIntoKeyExpr::try_into(key_expr).map_err(Into::into), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } @@ -198,7 +198,7 @@ impl<'a> Liveliness<'a> { session: &self.session, key_expr, timeout, - handler: DefaultHandler, + handler: DefaultHandler::default(), } } } diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index f36e253636..e3d43993f3 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -518,7 +518,7 @@ impl<'a> Publisher<'a> { pub fn matching_listener(&self) -> MatchingListenerBuilder<'_, DefaultHandler> { MatchingListenerBuilder { publisher: PublisherRef::Borrow(self), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } @@ -623,7 +623,7 @@ impl PublisherDeclarations for std::sync::Arc> { fn matching_listener(&self) -> MatchingListenerBuilder<'static, DefaultHandler> { MatchingListenerBuilder { publisher: PublisherRef::Shared(self.clone()), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } } diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index ca5d44c3a6..3f1c382a66 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -302,7 +302,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { key_expr: TryIntoKeyExpr::try_into(key_expr).map_err(Into::into), reliability: Reliability::DEFAULT, origin: Locality::default(), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } fn declare_queryable<'b, TryIntoKeyExpr>( @@ -318,7 +318,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { key_expr: key_expr.try_into().map_err(Into::into), complete: false, origin: Locality::default(), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } fn declare_publisher<'b, TryIntoKeyExpr>( @@ -814,7 +814,7 @@ impl Session { value: None, #[cfg(feature = "unstable")] attachment: None, - handler: DefaultHandler, + handler: DefaultHandler::default(), #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), } @@ -1865,7 +1865,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { key_expr: key_expr.try_into().map_err(Into::into), reliability: Reliability::DEFAULT, origin: Locality::default(), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } @@ -1910,7 +1910,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { key_expr: key_expr.try_into().map_err(Into::into), complete: false, origin: Locality::default(), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index ceed15e2c3..57910bf3d6 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -14,12 +14,12 @@ #[test] fn pubsub_with_ringbuffer() { use std::{thread, time::Duration}; - use zenoh::{handlers::RingBuffer, prelude::sync::*}; + use zenoh::{handlers::RingChannel, prelude::sync::*}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let sub = zenoh .declare_subscriber("test/ringbuffer") - .with(RingBuffer::new(3)) + .with(RingChannel::new(3)) .res() .unwrap(); for i in 0..10 { @@ -32,7 +32,6 @@ fn pubsub_with_ringbuffer() { for i in 7..10 { assert_eq!( sub.recv() - .unwrap() .unwrap() .payload() .deserialize::() @@ -46,12 +45,12 @@ fn pubsub_with_ringbuffer() { #[test] fn query_with_ringbuffer() { - use zenoh::{handlers::RingBuffer, prelude::sync::*}; + use zenoh::{handlers::RingChannel, prelude::sync::*}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let queryable = zenoh .declare_queryable("test/ringbuffer_query") - .with(RingBuffer::new(1)) + .with(RingChannel::new(1)) .res() .unwrap(); @@ -66,7 +65,7 @@ fn query_with_ringbuffer() { .res() .unwrap(); - let query = queryable.recv().unwrap().unwrap(); + let query = queryable.recv().unwrap(); // Only receive the latest query assert_eq!( query From d6da7a852c48f464cd8d7ef2a2f962e382e36d63 Mon Sep 17 00:00:00 2001 From: DenisBiryukov91 <155981813+DenisBiryukov91@users.noreply.github.com> Date: Thu, 11 Apr 2024 20:10:09 +0200 Subject: [PATCH 205/598] Accessors for Value (#927) * accessors for Value * doctest fix * valgrind test fix --- .../src/queryable_get/bin/z_queryable_get.rs | 4 +- examples/examples/z_get.rs | 2 +- examples/examples/z_get_liveliness.rs | 2 +- examples/examples/z_queryable.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 13 ++++--- .../src/replica/align_queryable.rs | 4 +- .../src/replica/aligner.rs | 7 +--- .../src/replica/storage.rs | 34 ++++++++--------- zenoh/src/liveliness.rs | 2 +- zenoh/src/net/runtime/adminspace.rs | 4 +- zenoh/src/query.rs | 16 ++++++-- zenoh/src/queryable.rs | 14 +++---- zenoh/src/sample/mod.rs | 4 +- zenoh/src/value.rs | 38 +++++++------------ zenoh/tests/attachments.rs | 4 +- zenoh/tests/handler.rs | 2 +- zenoh/tests/session.rs | 2 +- 17 files changed, 72 insertions(+), 82 deletions(-) diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index ea0f16399c..84c3a82f88 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -34,7 +34,7 @@ async fn main() { query .reply( query.selector().key_expr, - query.value().unwrap().payload.clone(), + query.value().unwrap().payload().clone(), ) .res() .await @@ -71,7 +71,7 @@ async fn main() { ), Err(err) => println!( ">> Received (ERROR: '{}')", - err.payload + err.payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)) ), diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 486346a8ea..77b67b90ed 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -55,7 +55,7 @@ async fn main() { } Err(err) => { let payload = err - .payload + .payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)); println!(">> Received (ERROR: '{}')", payload); diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 487f3c25d6..0a15b287c7 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -40,7 +40,7 @@ async fn main() { Ok(sample) => println!(">> Alive token ('{}')", sample.key_expr().as_str(),), Err(err) => { let payload = err - .payload + .payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)); println!(">> Received (ERROR: '{}')", payload); diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 5113f1c2b7..5ef73d905b 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -44,7 +44,7 @@ async fn main() { None => println!(">> [Queryable ] Received Query '{}'", query.selector()), Some(value) => { let payload = value - .payload + .payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)); println!( diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 43c3f33776..6edcfdb945 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -97,8 +97,8 @@ fn result_to_json(sample: Result) -> JSONSample { Ok(sample) => sample_to_json(&sample), Err(err) => JSONSample { key: "ERROR".into(), - value: payload_to_json(&err.payload, &err.encoding), - encoding: err.encoding.to_string(), + value: payload_to_json(err.payload(), err.encoding()), + encoding: err.encoding().to_string(), time: None, }, } @@ -139,7 +139,7 @@ fn result_to_html(sample: Result) -> String { Err(err) => { format!( "

ERROR
\n
{}
\n", - err.payload.deserialize::>().unwrap_or_default() + err.payload().deserialize::>().unwrap_or_default() ) } } @@ -172,8 +172,11 @@ async fn to_raw_response(results: flume::Receiver) -> Response { ), Err(value) => response( StatusCode::Ok, - Cow::from(&value.encoding).as_ref(), - &value.payload.deserialize::>().unwrap_or_default(), + Cow::from(value.encoding()).as_ref(), + &value + .payload() + .deserialize::>() + .unwrap_or_default(), ), }, Err(_) => response(StatusCode::Ok, "", ""), diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 1ce6a1cb16..3a37095f67 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -126,8 +126,8 @@ impl AlignQueryable { } AlignData::Data(k, (v, ts)) => { query - .reply(k, v.payload) - .encoding(v.encoding) + .reply(k, v.payload().clone()) + .encoding(v.encoding().clone()) .timestamp(ts) .res() .await diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 64d5cfa1cd..f33b370200 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -106,11 +106,8 @@ impl Aligner { log::trace!("[ALIGNER] Received queried samples: {missing_data:?}"); for (key, (ts, value)) in missing_data { - let Value { - payload, encoding, .. - } = value; - let sample = SampleBuilder::put(key, payload) - .encoding(encoding) + let sample = SampleBuilder::put(key, value.payload().clone()) + .encoding(value.encoding().clone()) .timestamp(ts) .into(); log::debug!("[ALIGNER] Adding {:?} to storage", sample); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 93075170ac..d2c2984c21 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -301,12 +301,12 @@ impl StorageService { { match update.kind { SampleKind::Put => { - SampleBuilder::put(KeyExpr::from(k.clone()), update.data.value.payload) - .encoding(update.data.value.encoding) + SampleBuilder::put(k.clone(), update.data.value.payload().clone()) + .encoding(update.data.value.encoding().clone()) .timestamp(update.data.timestamp) .into() } - SampleKind::Delete => SampleBuilder::delete(KeyExpr::from(k.clone())) + SampleKind::Delete => SampleBuilder::delete(k.clone()) .timestamp(update.data.timestamp) .into(), } @@ -329,8 +329,10 @@ impl StorageService { storage .put( stripped_key, - Value::new(sample_to_store.payload().clone()) - .encoding(sample_to_store.encoding().clone()), + Value::new( + sample_to_store.payload().clone(), + sample_to_store.encoding().clone(), + ), *sample_to_store.timestamp().unwrap(), ) .await @@ -514,8 +516,8 @@ impl StorageService { Ok(stored_data) => { for entry in stored_data { if let Err(e) = q - .reply(key.clone(), entry.value.payload) - .encoding(entry.value.encoding) + .reply(key.clone(), entry.value.payload().clone()) + .encoding(entry.value.encoding().clone()) .timestamp(entry.timestamp) .res() .await @@ -546,8 +548,8 @@ impl StorageService { Ok(stored_data) => { for entry in stored_data { if let Err(e) = q - .reply(q.key_expr().clone(), entry.value.payload) - .encoding(entry.value.encoding) + .reply(q.key_expr().clone(), entry.value.payload().clone()) + .encoding(entry.value.encoding().clone()) .timestamp(entry.timestamp) .res() .await @@ -665,20 +667,14 @@ impl StorageService { fn serialize_update(update: &Update) -> String { let Update { kind, - data: - StoredData { - value: Value { - payload, encoding, .. - }, - timestamp, - }, + data: StoredData { value, timestamp }, } = update; - let zbuf: ZBuf = payload.into(); + let zbuf: ZBuf = value.payload().into(); let result = ( kind.to_string(), timestamp.to_string(), - encoding.to_string(), + value.encoding().to_string(), zbuf.slices().collect::>(), ); serde_json::to_string_pretty(&result).unwrap() @@ -690,7 +686,7 @@ fn construct_update(data: String) -> Update { for slice in result.3 { payload.push_zslice(slice.to_vec().into()); } - let value = Value::new(payload).encoding(result.2); + let value = Value::new(payload, result.2); let data = StoredData { value, timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index a28292fda2..0b539ba636 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -608,7 +608,7 @@ where /// while let Ok(token) = tokens.recv_async().await { /// match token.sample { /// Ok(sample) => println!("Alive token ('{}')", sample.key_expr().as_str()), -/// Err(err) => println!("Received (ERROR: '{:?}')", err.payload), +/// Err(err) => println!("Received (ERROR: '{:?}')", err.payload()), /// } /// } /// # } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 62c38b16ee..5b5b41b390 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -426,9 +426,7 @@ impl Primitives for AdminSpace { inner: Arc::new(QueryInner { key_expr: key_expr.clone(), parameters, - value: query - .ext_body - .map(|b| Value::from(b.payload).encoding(b.encoding)), + value: query.ext_body.map(|b| Value::new(b.payload, b.encoding)), qid: msg.id, zid, primitives, diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 7b8da9f768..d089290326 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -172,13 +172,21 @@ impl QoSBuilderTrait for GetBuilder<'_, '_, DefaultHandler> { impl ValueBuilderTrait for GetBuilder<'_, '_, Handler> { fn encoding>(self, encoding: T) -> Self { - let value = Some(self.value.unwrap_or_default().encoding(encoding)); - Self { value, ..self } + let mut value = self.value.unwrap_or_default(); + value.encoding = encoding.into(); + Self { + value: Some(value), + ..self + } } fn payload>(self, payload: T) -> Self { - let value = Some(self.value.unwrap_or_default().payload(payload)); - Self { value, ..self } + let mut value = self.value.unwrap_or_default(); + value.payload = payload.into(); + Self { + value: Some(value), + ..self + } } fn value>(self, value: T) -> Self { let value: Value = value.into(); diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index a6d87df5a4..7d36fe8f99 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -489,17 +489,15 @@ pub struct ReplyErrBuilder<'a> { impl ValueBuilderTrait for ReplyErrBuilder<'_> { fn encoding>(self, encoding: T) -> Self { - Self { - value: self.value.encoding(encoding), - ..self - } + let mut value = self.value.clone(); + value.encoding = encoding.into(); + Self { value, ..self } } fn payload>(self, payload: T) -> Self { - Self { - value: self.value.payload(payload), - ..self - } + let mut value = self.value.clone(); + value.payload = payload.into(); + Self { value, ..self } } fn value>(self, value: T) -> Self { diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index f4fb1e074a..7bb3fe9cde 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -16,7 +16,7 @@ use crate::encoding::Encoding; use crate::payload::Payload; use crate::prelude::{KeyExpr, Value}; -use crate::sample::builder::{QoSBuilderTrait, ValueBuilderTrait}; +use crate::sample::builder::QoSBuilderTrait; use crate::time::Timestamp; use crate::Priority; #[cfg(feature = "unstable")] @@ -378,7 +378,7 @@ impl Sample { impl From for Value { fn from(sample: Sample) -> Self { - Value::new(sample.payload).encoding(sample.encoding) + Value::new(sample.payload, sample.encoding) } } diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 92a87cb6c5..d1b582111a 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -13,27 +13,26 @@ // //! Value primitives. -use crate::{encoding::Encoding, payload::Payload, sample::builder::ValueBuilderTrait}; +use crate::{encoding::Encoding, payload::Payload}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] #[derive(Clone, Debug, PartialEq, Eq)] pub struct Value { - /// The binary [`Payload`] of this [`Value`]. - pub payload: Payload, - /// The [`Encoding`] of this [`Value`]. - pub encoding: Encoding, + pub(crate) payload: Payload, + pub(crate) encoding: Encoding, } impl Value { - /// Creates a new [`Value`] with default [`Encoding`]. - pub fn new(payload: T) -> Self + /// Creates a new [`Value`] with specified [`Payload`] and [`Encoding`]. + pub fn new(payload: T, encoding: E) -> Self where T: Into, + E: Into, { Value { payload: payload.into(), - encoding: Encoding::default(), + encoding: encoding.into(), } } /// Creates an empty [`Value`]. @@ -48,24 +47,15 @@ impl Value { pub fn is_empty(&self) -> bool { self.payload.is_empty() && self.encoding == Encoding::default() } -} -impl ValueBuilderTrait for Value { - fn encoding>(self, encoding: T) -> Self { - Self { - encoding: encoding.into(), - ..self - } + /// Gets binary [`Payload`] of this [`Value`]. + pub fn payload(&self) -> &Payload { + &self.payload } - fn payload>(self, payload: T) -> Self { - Self { - payload: payload.into(), - ..self - } - } - fn value>(self, value: T) -> Self { - let Value { payload, encoding } = value.into(); - Self { payload, encoding } + + /// Gets [`Encoding`] of this [`Value`]. + pub fn encoding(&self) -> &Encoding { + &self.encoding } } diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 2a58749701..844e2985bc 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -66,7 +66,7 @@ fn attachment_queries() { .callback(|query| { let s = query .value() - .map(|q| q.payload.deserialize::().unwrap()) + .map(|q| q.payload().deserialize::().unwrap()) .unwrap_or_default(); println!("Query value: {}", s); @@ -82,7 +82,7 @@ fn attachment_queries() { query .reply( query.key_expr().clone(), - query.value().unwrap().payload.clone(), + query.value().unwrap().payload().clone(), ) .attachment(Attachment::from_iter( attachment diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index 57910bf3d6..ad6648dc27 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -71,7 +71,7 @@ fn query_with_ringbuffer() { query .value() .unwrap() - .payload + .payload() .deserialize::() .unwrap(), "query2" diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 267bb5c284..603ebdac49 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -230,7 +230,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { let e = s.sample.unwrap_err(); - assert_eq!(e.payload.len(), size); + assert_eq!(e.payload().len(), size); cnt += 1; } } From de84b6fb56982791523a968c79ce73a3cc3fe20d Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 12 Apr 2024 14:23:04 +0200 Subject: [PATCH 206/598] Properties use Parameters internally --- Cargo.lock | 7 + commons/zenoh-collections/Cargo.toml | 2 + commons/zenoh-collections/src/lib.rs | 3 + commons/zenoh-collections/src/parameters.rs | 168 ++++++++++++++++ commons/zenoh-collections/src/properties.rs | 189 ++++++++++-------- commons/zenoh-protocol/Cargo.toml | 3 +- commons/zenoh-protocol/src/core/endpoint.rs | 147 ++------------ io/zenoh-links/zenoh-link-quic/Cargo.toml | 1 + io/zenoh-links/zenoh-link-quic/src/lib.rs | 9 +- io/zenoh-links/zenoh-link-tls/Cargo.toml | 1 + io/zenoh-links/zenoh-link-tls/src/lib.rs | 9 +- io/zenoh-links/zenoh-link-unixpipe/Cargo.toml | 1 + .../zenoh-link-unixpipe/src/unix/mod.rs | 6 +- io/zenoh-transport/src/multicast/manager.rs | 7 +- io/zenoh-transport/src/unicast/manager.rs | 11 +- zenoh/src/selector.rs | 2 + 16 files changed, 318 insertions(+), 248 deletions(-) create mode 100644 commons/zenoh-collections/src/parameters.rs diff --git a/Cargo.lock b/Cargo.lock index 09e598d878..07e166c57c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4553,6 +4553,9 @@ dependencies = [ [[package]] name = "zenoh-collections" version = "0.11.0-dev" +dependencies = [ + "rand 0.8.5", +] [[package]] name = "zenoh-config" @@ -4717,6 +4720,7 @@ dependencies = [ "tokio", "tokio-rustls 0.24.1", "tokio-util", + "zenoh-collections", "zenoh-config", "zenoh-core", "zenoh-link-commons", @@ -4782,6 +4786,7 @@ dependencies = [ "tokio-rustls 0.25.0", "tokio-util", "webpki-roots", + "zenoh-collections", "zenoh-config", "zenoh-core", "zenoh-link-commons", @@ -4826,6 +4831,7 @@ dependencies = [ "tokio-util", "unix-named-pipe", "zenoh-buffers", + "zenoh-collections", "zenoh-config", "zenoh-core", "zenoh-link-commons", @@ -5005,6 +5011,7 @@ dependencies = [ "serde", "uhlc", "zenoh-buffers", + "zenoh-collections", "zenoh-keyexpr", "zenoh-result", ] diff --git a/commons/zenoh-collections/Cargo.toml b/commons/zenoh-collections/Cargo.toml index ca01d7460e..27787e8c6a 100644 --- a/commons/zenoh-collections/Cargo.toml +++ b/commons/zenoh-collections/Cargo.toml @@ -31,5 +31,7 @@ description = "Internal crate for zenoh." [features] default = ["std"] std = [] +test = ["rand"] [dependencies] +rand = { workspace = true, optional = true } diff --git a/commons/zenoh-collections/src/lib.rs b/commons/zenoh-collections/src/lib.rs index ea9a9209e6..6690d372da 100644 --- a/commons/zenoh-collections/src/lib.rs +++ b/commons/zenoh-collections/src/lib.rs @@ -20,6 +20,9 @@ #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; +pub mod parameters; +pub use parameters::*; + pub mod single_or_vec; pub use single_or_vec::*; diff --git a/commons/zenoh-collections/src/parameters.rs b/commons/zenoh-collections/src/parameters.rs new file mode 100644 index 0000000000..6c34f6502d --- /dev/null +++ b/commons/zenoh-collections/src/parameters.rs @@ -0,0 +1,168 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +pub const LIST_SEPARATOR: char = ';'; +pub const FIELD_SEPARATOR: char = '='; +pub const VALUE_SEPARATOR: char = '|'; + +fn split_once(s: &str, c: char) -> (&str, &str) { + match s.find(c) { + Some(index) => { + let (l, r) = s.split_at(index); + (l, &r[1..]) + } + None => (s, ""), + } +} + +// tcp/localhost:7557?mymetadata=asdasd#myconfig=asdasd;asdasd=1;asdijabdiasd=1a + +/// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g` +pub struct Parameters; + +impl Parameters { + pub fn iter(s: &str) -> impl DoubleEndedIterator { + s.split(LIST_SEPARATOR).filter_map(|prop| { + if prop.is_empty() { + None + } else { + Some(split_once(prop, FIELD_SEPARATOR)) + } + }) + } + + #[allow(clippy::should_implement_trait)] + pub fn from_iter<'s, I>(iter: I) -> String + where + I: Iterator, + { + let mut into = String::new(); + Self::from_iter_into(iter, &mut into); + into + } + + pub fn from_iter_into<'s, I>(iter: I, into: &mut String) + where + I: Iterator, + { + let mut from = iter.collect::>(); + from.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); + Self::extend_into(from.iter().copied(), into); + } + + pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { + Self::iter(s).find(|x| x.0 == k).map(|x| x.1) + } + + pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { + match Self::get(s, k) { + Some(v) => v.split(VALUE_SEPARATOR), + None => { + let mut i = "".split(VALUE_SEPARATOR); + i.next(); + i + } + } + } + + pub fn insert<'s, I>(mut iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + where + I: Iterator, + { + let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); + + let current = iter.filter(|x| x.0 != k); + let new = Some((k, v)).into_iter(); + let iter = current.chain(new); + (Parameters::concat(iter), item) + } + + pub fn remove<'s, I>(mut iter: I, k: &'s str) -> (String, Option<&'s str>) + where + I: Iterator, + { + let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); + let iter = iter.filter(|x| x.0 != k); + (Parameters::concat(iter), item) + } + + pub fn concat<'s, I>(iter: I) -> String + where + I: Iterator, + { + let mut into = String::new(); + Parameters::extend_into(iter, &mut into); + into + } + + pub fn extend_into<'s, I>(iter: I, into: &mut String) + where + I: Iterator, + { + let mut first = into.is_empty(); + for (k, v) in iter { + if !first { + into.push(LIST_SEPARATOR); + } + into.push_str(k); + if !v.is_empty() { + into.push(FIELD_SEPARATOR); + into.push_str(v); + } + first = false; + } + } + + pub fn is_sorted<'s, I>(iter: I) -> bool + where + I: Iterator, + { + let mut prev = None; + for (k, _) in iter { + match prev.take() { + Some(p) if k < p => return false, + _ => prev = Some(k), + } + } + true + } + + #[cfg(feature = "test")] + pub fn rand(into: &mut String) { + use rand::{ + distributions::{Alphanumeric, DistString}, + Rng, + }; + + const MIN: usize = 2; + const MAX: usize = 8; + + let mut rng = rand::thread_rng(); + + let num = rng.gen_range(MIN..MAX); + for i in 0..num { + if i != 0 { + into.push(LIST_SEPARATOR); + } + let len = rng.gen_range(MIN..MAX); + let key = Alphanumeric.sample_string(&mut rng, len); + into.push_str(key.as_str()); + + into.push(FIELD_SEPARATOR); + + let len = rng.gen_range(MIN..MAX); + let value = Alphanumeric.sample_string(&mut rng, len); + into.push_str(value.as_str()); + } + } +} diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index 281ac8ca68..030eca7d53 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -1,3 +1,6 @@ +use alloc::borrow::Cow; +use core::borrow::Borrow; + // // Copyright (c) 2022 ZettaScale Technology // @@ -11,128 +14,142 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::{ - collections::HashMap, - fmt, - ops::{Deref, DerefMut}, -}; - -const PROP_SEPS: &[&str] = &["\r\n", "\n", ";"]; -const DEFAULT_PROP_SEP: char = ';'; -const KV_SEP: char = '='; -const COMMENT_PREFIX: char = '#'; +use crate::Parameters; +use std::{collections::HashMap, fmt}; /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties /// and `=` as separator between a key and its value. Keys and values are trimed. #[non_exhaustive] #[derive(Clone, PartialEq, Eq, Default)] -pub struct Properties(HashMap); - -impl Deref for Properties { - type Target = HashMap; +pub struct Properties<'s>(Cow<'s, str>); - fn deref(&self) -> &Self::Target { +impl Properties<'_> { + pub fn as_str(&self) -> &str { &self.0 } -} -impl DerefMut for Properties { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 + pub fn get(&self, k: K) -> Option<&str> + where + K: Borrow, + { + Parameters::get(self.as_str(), k.borrow()) + } + + pub fn values(&self, k: K) -> impl DoubleEndedIterator + where + K: Borrow, + { + Parameters::values(self.as_str(), k.borrow()) + } + + pub fn iter(&self) -> impl DoubleEndedIterator { + Parameters::iter(self.as_str()) + } + + pub fn insert(&mut self, k: K, v: V) -> Option + where + K: Borrow, + V: Borrow, + { + let (inner, removed) = Parameters::insert(self.iter(), k.borrow(), v.borrow()); + let removed = removed.map(|s| s.to_string()); + self.0 = Cow::Owned(inner); + removed + } + + pub fn remove(&mut self, k: K) -> Option + where + K: Borrow, + { + let (inner, removed) = Parameters::remove(self.iter(), k.borrow()); + let removed = removed.map(|s| s.to_string()); + self.0 = Cow::Owned(inner); + removed } } -impl fmt::Display for Properties { - /// Format the Properties as a string, using `'='` for key/value separator - /// and `';'` for separator between each keys/values. - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut it = self.0.iter(); - if let Some((k, v)) = it.next() { - if v.is_empty() { - write!(f, "{k}")? - } else { - write!(f, "{k}{KV_SEP}{v}")? - } - for (k, v) in it { - if v.is_empty() { - write!(f, "{DEFAULT_PROP_SEP}{k}")? - } else { - write!(f, "{DEFAULT_PROP_SEP}{k}{KV_SEP}{v}")? - } - } +impl<'s> From<&'s str> for Properties<'s> { + fn from(value: &'s str) -> Self { + if Parameters::is_sorted(Parameters::iter(value)) { + Self(Cow::Borrowed(value)) + } else { + Self(Cow::Owned(Parameters::from_iter(Parameters::iter(value)))) } - Ok(()) } } -impl fmt::Debug for Properties { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{self}") +impl From for Properties<'_> { + fn from(value: String) -> Self { + if Parameters::is_sorted(Parameters::iter(value.as_str())) { + Self(Cow::Owned(value)) + } else { + Self(Cow::Owned(Parameters::from_iter(Parameters::iter( + value.as_str(), + )))) + } } } -impl From<&str> for Properties { - fn from(s: &str) -> Self { - let mut props = vec![s]; - for sep in PROP_SEPS { - props = props - .into_iter() - .flat_map(|s| s.split(sep)) - .collect::>(); - } - props = props.into_iter().map(str::trim).collect::>(); - let inner = props - .iter() - .filter_map(|prop| { - if prop.is_empty() || prop.starts_with(COMMENT_PREFIX) { - None - } else { - let mut it = prop.splitn(2, KV_SEP); - Some(( - it.next().unwrap().trim().to_string(), - it.next().unwrap_or("").trim().to_string(), - )) - } - }) - .collect(); - Self(inner) +impl<'s, K, V> FromIterator<(&'s K, &'s V)> for Properties<'_> +where + K: AsRef + 's, + V: AsRef + 's, +{ + fn from_iter>(iter: T) -> Self { + let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.as_ref(), v.as_ref()))); + Self(Cow::Owned(inner)) } } -impl From for Properties { - fn from(s: String) -> Self { - Self::from(s.as_str()) +impl<'s, K, V> FromIterator<&'s (K, V)> for Properties<'_> +where + K: AsRef + 's, + V: AsRef + 's, +{ + fn from_iter>(iter: T) -> Self { + let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.as_ref(), v.as_ref()))); + Self(Cow::Owned(inner)) } } -impl From> for Properties { - fn from(map: HashMap) -> Self { - Self(map) +impl<'s, K, V> From<&'s [(K, V)]> for Properties<'_> +where + K: AsRef + 's, + V: AsRef + 's, +{ + fn from(value: &'s [(K, V)]) -> Self { + Self::from_iter(value.iter()) } } -impl From<&[(&str, &str)]> for Properties { - fn from(kvs: &[(&str, &str)]) -> Self { - let inner = kvs - .iter() - .map(|(k, v)| ((*k).to_string(), (*v).to_string())) - .collect(); - Self(inner) +impl From> for Properties<'_> +where + K: AsRef, + V: AsRef, +{ + fn from(map: HashMap) -> Self { + Self::from_iter(map.iter()) } } -impl TryFrom<&std::path::Path> for Properties { - type Error = std::io::Error; +impl From> for HashMap { + fn from(props: Properties) -> Self { + HashMap::from_iter( + Parameters::iter(props.as_str()).map(|(k, v)| (k.to_string(), v.to_string())), + ) + } +} - fn try_from(p: &std::path::Path) -> Result { - Ok(Self::from(std::fs::read_to_string(p)?)) +impl fmt::Display for Properties<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) } } -impl From for HashMap { - fn from(props: Properties) -> Self { - props.0 +impl fmt::Debug for Properties<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self) } } diff --git a/commons/zenoh-protocol/Cargo.toml b/commons/zenoh-protocol/Cargo.toml index 9d7e35d690..2c3a36b7a7 100644 --- a/commons/zenoh-protocol/Cargo.toml +++ b/commons/zenoh-protocol/Cargo.toml @@ -33,7 +33,7 @@ std = [ "zenoh-keyexpr/std", "zenoh-result/std", ] -test = ["rand", "zenoh-buffers/test"] +test = ["rand", "zenoh-buffers/test", "zenoh-collections/test"] shared-memory = ["std", "zenoh-buffers/shared-memory"] stats = [] @@ -43,6 +43,7 @@ rand = { workspace = true, features = ["alloc", "getrandom"], optional = true } serde = { workspace = true, features = ["alloc"] } uhlc = { workspace = true, default-features = false } zenoh-buffers = { workspace = true, default-features = false } +zenoh-collections = { workspace = true, default-features = false } zenoh-keyexpr = { workspace = true } zenoh-result = { workspace = true } diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index a8fcb3ae98..898ee615e6 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -12,27 +12,15 @@ // ZettaScale Zenoh Team, // use super::locator::*; -use alloc::{borrow::ToOwned, format, string::String, vec::Vec}; +use alloc::{borrow::ToOwned, format, string::String}; use core::{convert::TryFrom, fmt, str::FromStr}; +use zenoh_collections::Parameters; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; // Parsing chars pub const PROTO_SEPARATOR: char = '/'; pub const METADATA_SEPARATOR: char = '?'; -pub const LIST_SEPARATOR: char = ';'; -pub const FIELD_SEPARATOR: char = '='; pub const CONFIG_SEPARATOR: char = '#'; -pub const VALUE_SEPARATOR: char = '|'; - -fn split_once(s: &str, c: char) -> (&str, &str) { - match s.find(c) { - Some(index) => { - let (l, r) = s.split_at(index); - (l, &r[1..]) - } - None => (s, ""), - } -} // Parsing functions pub(super) fn protocol(s: &str) -> &str { @@ -64,77 +52,6 @@ pub(super) fn config(s: &str) -> &str { } } -pub struct Parameters; - -impl Parameters { - pub fn extend<'s, I>(iter: I, into: &mut String) - where - I: Iterator, - { - let mut first = into.is_empty(); - for (k, v) in iter { - if !first { - into.push(LIST_SEPARATOR); - } - into.push_str(k); - if !v.is_empty() { - into.push(FIELD_SEPARATOR); - into.push_str(v); - } - first = false; - } - } - - pub fn iter(s: &str) -> impl DoubleEndedIterator { - s.split(LIST_SEPARATOR).filter_map(|prop| { - if prop.is_empty() { - None - } else { - Some(split_once(prop, FIELD_SEPARATOR)) - } - }) - } - - pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { - Self::iter(s).find(|x| x.0 == k).map(|x| x.1) - } - - pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { - match Self::get(s, k) { - Some(v) => v.split(VALUE_SEPARATOR), - None => { - let mut i = "".split(VALUE_SEPARATOR); - i.next(); - i - } - } - } - - pub(super) fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> String - where - I: Iterator, - { - let current = iter.filter(|x| x.0 != k); - let new = Some((k, v)).into_iter(); - let iter = current.chain(new); - - let mut into = String::new(); - Parameters::extend(iter, &mut into); - into - } - - pub(super) fn remove<'s, I>(iter: I, k: &'s str) -> String - where - I: Iterator, - { - let iter = iter.filter(|x| x.0 != k); - - let mut into = String::new(); - Parameters::extend(iter, &mut into); - into - } -} - // Protocol #[repr(transparent)] #[derive(Copy, Clone, PartialEq, Eq, Hash)] @@ -341,7 +258,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::insert(self.0.metadata().iter(), k, v), + Parameters::insert(self.0.metadata().iter(), k, v).0, self.0.config(), )?; @@ -353,7 +270,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::remove(self.0.metadata().iter(), k), + Parameters::remove(self.0.metadata().iter(), k).0, self.0.config(), )?; @@ -459,7 +376,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::insert(self.0.config().iter(), k, v), + Parameters::insert(self.0.config().iter(), k, v).0, )?; self.0.inner = ep.inner; @@ -471,7 +388,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::remove(self.0.config().iter(), k), + Parameters::remove(self.0.config().iter(), k).0, )?; self.0.inner = ep.inner; @@ -621,27 +538,6 @@ impl TryFrom for EndPoint { const ERR: &str = "Endpoints must be of the form /
[?][#]"; - fn sort_hashmap(from: &str, into: &mut String) { - let mut from = from - .split(LIST_SEPARATOR) - .map(|p| split_once(p, FIELD_SEPARATOR)) - .collect::>(); - from.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); - - let mut first = true; - for (k, v) in from.iter() { - if !first { - into.push(LIST_SEPARATOR); - } - into.push_str(k); - if !v.is_empty() { - into.push(FIELD_SEPARATOR); - into.push_str(v); - } - first = false; - } - } - let pidx = s .find(PROTO_SEPARATOR) .and_then(|i| (!s[..i].is_empty() && !s[i + 1..].is_empty()).then_some(i)) @@ -654,14 +550,14 @@ impl TryFrom for EndPoint { (Some(midx), None) if midx > pidx && !s[midx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - sort_hashmap(&s[midx + 1..], &mut inner); + Parameters::from_iter_into(Parameters::iter(&s[midx + 1..]), &mut inner); Ok(EndPoint { inner }) } // There is some config (None, Some(cidx)) if cidx > pidx && !s[cidx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..cidx + 1]); // Includes config separator - sort_hashmap(&s[cidx + 1..], &mut inner); + Parameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); Ok(EndPoint { inner }) } // There is some metadata and some config @@ -674,10 +570,10 @@ impl TryFrom for EndPoint { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - sort_hashmap(&s[midx + 1..cidx], &mut inner); + Parameters::from_iter_into(Parameters::iter(&s[midx + 1..cidx]), &mut inner); inner.push(CONFIG_SEPARATOR); - sort_hashmap(&s[cidx + 1..], &mut inner); + Parameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); Ok(EndPoint { inner }) } @@ -699,31 +595,12 @@ impl EndPoint { pub fn rand() -> Self { use rand::{ distributions::{Alphanumeric, DistString}, - rngs::ThreadRng, Rng, }; const MIN: usize = 2; const MAX: usize = 8; - fn gen_hashmap(rng: &mut ThreadRng, endpoint: &mut String) { - let num = rng.gen_range(MIN..MAX); - for i in 0..num { - if i != 0 { - endpoint.push(LIST_SEPARATOR); - } - let len = rng.gen_range(MIN..MAX); - let key = Alphanumeric.sample_string(rng, len); - endpoint.push_str(key.as_str()); - - endpoint.push(FIELD_SEPARATOR); - - let len = rng.gen_range(MIN..MAX); - let value = Alphanumeric.sample_string(rng, len); - endpoint.push_str(value.as_str()); - } - } - let mut rng = rand::thread_rng(); let mut endpoint = String::new(); @@ -739,11 +616,11 @@ impl EndPoint { if rng.gen_bool(0.5) { endpoint.push(METADATA_SEPARATOR); - gen_hashmap(&mut rng, &mut endpoint); + Parameters::rand(&mut endpoint); } if rng.gen_bool(0.5) { endpoint.push(CONFIG_SEPARATOR); - gen_hashmap(&mut rng, &mut endpoint); + Parameters::rand(&mut endpoint); } endpoint.parse().unwrap() diff --git a/io/zenoh-links/zenoh-link-quic/Cargo.toml b/io/zenoh-links/zenoh-link-quic/Cargo.toml index 496830b5ef..421db99e25 100644 --- a/io/zenoh-links/zenoh-link-quic/Cargo.toml +++ b/io/zenoh-links/zenoh-link-quic/Cargo.toml @@ -36,6 +36,7 @@ rustls-webpki = { workspace = true } secrecy = {workspace = true } tokio = { workspace = true, features = ["io-util", "net", "fs", "sync", "time"] } tokio-util = { workspace = true, features = ["rt"] } +zenoh-collections = { workspace = true } zenoh-config = { workspace = true } zenoh-core = { workspace = true } zenoh-link-commons = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-quic/src/lib.rs b/io/zenoh-links/zenoh-link-quic/src/lib.rs index 4bcabaf5b6..7f5e2a1587 100644 --- a/io/zenoh-links/zenoh-link-quic/src/lib.rs +++ b/io/zenoh-links/zenoh-link-quic/src/lib.rs @@ -25,14 +25,12 @@ use config::{ }; use secrecy::ExposeSecret; use std::net::SocketAddr; +use zenoh_collections::Parameters; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; use zenoh_protocol::{ - core::{ - endpoint::{Address, Parameters}, - Locator, - }, + core::{endpoint::Address, Locator}, transport::BatchSize, }; use zenoh_result::{bail, zerror, ZResult}; @@ -131,8 +129,7 @@ impl ConfigurationInspector for QuicConfigurator { }; } - let mut s = String::new(); - Parameters::extend(ps.drain(..), &mut s); + let s = Parameters::from_iter(ps.drain(..)); Ok(s) } diff --git a/io/zenoh-links/zenoh-link-tls/Cargo.toml b/io/zenoh-links/zenoh-link-tls/Cargo.toml index 975fa49467..d164476e22 100644 --- a/io/zenoh-links/zenoh-link-tls/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tls/Cargo.toml @@ -38,6 +38,7 @@ tokio = { workspace = true, features = ["io-util", "net", "fs", "sync"] } tokio-rustls = { workspace = true } tokio-util = { workspace = true, features = ["rt"] } webpki-roots = { workspace = true } +zenoh-collections = { workspace = true } zenoh-config = { workspace = true } zenoh-core = { workspace = true } zenoh-link-commons = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-tls/src/lib.rs b/io/zenoh-links/zenoh-link-tls/src/lib.rs index 7faebb4cd9..dae8227cad 100644 --- a/io/zenoh-links/zenoh-link-tls/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tls/src/lib.rs @@ -27,14 +27,12 @@ use config::{ use rustls_pki_types::ServerName; use secrecy::ExposeSecret; use std::{convert::TryFrom, net::SocketAddr}; +use zenoh_collections::Parameters; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; use zenoh_protocol::{ - core::{ - endpoint::{self, Address}, - Locator, - }, + core::{endpoint::Address, Locator}, transport::BatchSize, }; use zenoh_result::{bail, zerror, ZResult}; @@ -166,8 +164,7 @@ impl ConfigurationInspector for TlsConfigurator { }; } - let mut s = String::new(); - endpoint::Parameters::extend(ps.drain(..), &mut s); + let s = Parameters::from_iter(ps.drain(..)); Ok(s) } diff --git a/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml b/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml index 66784728f9..84e083caf8 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml +++ b/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml @@ -32,6 +32,7 @@ async-trait = { workspace = true } log = { workspace = true } rand = { workspace = true, features = ["default"] } zenoh-buffers = { workspace = true } +zenoh-collections = { workspace = true } zenoh-core = { workspace = true } zenoh-config = { workspace = true } zenoh-link-commons = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs index bcafaaba3c..70d3d4dddc 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs @@ -21,10 +21,11 @@ pub mod unicast; use async_trait::async_trait; pub use unicast::*; +use zenoh_collections::Parameters; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::core::{Locator, Parameters}; +use zenoh_protocol::core::Locator; use zenoh_result::ZResult; pub const UNIXPIPE_LOCATOR_PREFIX: &str = "unixpipe"; @@ -56,8 +57,7 @@ impl ConfigurationInspector for UnixPipeConfigurator { properties.push((config::FILE_ACCESS_MASK, &file_access_mask_)); } - let mut s = String::new(); - Parameters::extend(properties.drain(..), &mut s); + let s = Parameters::from_iter(properties.drain(..)); Ok(s) } diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index b9b594205f..daf49ce9a3 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -19,6 +19,7 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use tokio::sync::Mutex; +use zenoh_collections::Parameters; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionMulticastConf; #[cfg(feature = "shared-memory")] @@ -27,7 +28,7 @@ use zenoh_config::{Config, LinkTxConf}; use zenoh_core::zasynclock; use zenoh_link::*; use zenoh_protocol::core::ZenohId; -use zenoh_protocol::{core::endpoint, transport::close}; +use zenoh_protocol::transport::close; use zenoh_result::{bail, zerror, ZResult}; pub struct TransportManagerConfigMulticast { @@ -259,9 +260,7 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint - .config_mut() - .extend(endpoint::Parameters::iter(config))?; + endpoint.config_mut().extend(Parameters::iter(config))?; } // Open the link diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 8a63f4f630..eb0339c35b 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -36,6 +36,7 @@ use std::{ time::Duration, }; use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; +use zenoh_collections::Parameters; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionUnicastConf; #[cfg(feature = "shared-memory")] @@ -45,7 +46,7 @@ use zenoh_core::{zasynclock, zcondfeat}; use zenoh_crypto::PseudoRng; use zenoh_link::*; use zenoh_protocol::{ - core::{endpoint, ZenohId}, + core::ZenohId, transport::{close, TransportSn}, }; use zenoh_result::{bail, zerror, ZResult}; @@ -379,9 +380,7 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint - .config_mut() - .extend(endpoint::Parameters::iter(config))?; + endpoint.config_mut().extend(Parameters::iter(config))?; }; manager.new_listener(endpoint).await } @@ -690,9 +689,7 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint - .config_mut() - .extend(endpoint::Parameters::iter(config))?; + endpoint.config_mut().extend(Parameters::iter(config))?; }; // Create a new link associated by calling the Link Manager diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 2a9a38c02c..a9e8941b33 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -85,6 +85,7 @@ impl<'a> Selector<'a> { { self.decode_into_map() } + /// Extracts the selector parameters' name-value pairs into a hashmap, returning an error in case of duplicated parameters. pub fn parameters_cowmap(&'a self) -> ZResult, Cow<'a, str>>> { self.decode_into_map() @@ -185,6 +186,7 @@ impl<'a> Selector<'a> { selector.drain(splice_start..(splice_end + (splice_end != selector.len()) as usize)); } } + #[cfg(any(feature = "unstable", test))] pub(crate) fn parameter_index(&self, param_name: &str) -> ZResult> { let starts_with_param = |s: &str| { From 434c719f3a75a06a6252e3fa69737e10fbb8a936 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 12 Apr 2024 18:49:46 +0200 Subject: [PATCH 207/598] Rework Selector to use Properties --- commons/zenoh-collections/src/parameters.rs | 78 +-- commons/zenoh-collections/src/properties.rs | 76 ++- commons/zenoh-protocol/src/core/endpoint.rs | 4 +- commons/zenoh-util/src/std_only/time_range.rs | 13 + plugins/zenoh-plugin-rest/src/lib.rs | 4 +- .../src/replica/align_queryable.rs | 10 +- .../src/replica/storage.rs | 4 +- zenoh/src/net/runtime/adminspace.rs | 17 +- zenoh/src/prelude.rs | 2 +- zenoh/src/query.rs | 7 +- zenoh/src/queryable.rs | 11 +- zenoh/src/selector.rs | 471 ++++++------------ zenoh/src/session.rs | 19 +- zenoh/tests/session.rs | 2 +- 14 files changed, 323 insertions(+), 395 deletions(-) diff --git a/commons/zenoh-collections/src/parameters.rs b/commons/zenoh-collections/src/parameters.rs index 6c34f6502d..536f2beb2a 100644 --- a/commons/zenoh-collections/src/parameters.rs +++ b/commons/zenoh-collections/src/parameters.rs @@ -25,13 +25,11 @@ fn split_once(s: &str, c: char) -> (&str, &str) { } } -// tcp/localhost:7557?mymetadata=asdasd#myconfig=asdasd;asdasd=1;asdijabdiasd=1a - -/// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g` +/// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. pub struct Parameters; impl Parameters { - pub fn iter(s: &str) -> impl DoubleEndedIterator { + pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { s.split(LIST_SEPARATOR).filter_map(|prop| { if prop.is_empty() { None @@ -55,13 +53,17 @@ impl Parameters { where I: Iterator, { - let mut from = iter.collect::>(); + let mut from = iter + .filter(|(k, _)| !k.is_empty()) + .collect::>(); from.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); - Self::extend_into(from.iter().copied(), into); + Self::concat_into(from.iter().copied(), into); } pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { - Self::iter(s).find(|x| x.0 == k).map(|x| x.1) + Self::iter(s) + .find(|(key, _)| *key == k) + .map(|(_, value)| value) } pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { @@ -75,16 +77,17 @@ impl Parameters { } } - pub fn insert<'s, I>(mut iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) where - I: Iterator, + I: Iterator + Clone, { - let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); + let mut ic = iter.clone(); + let item = ic.find(|(key, _)| *key == k).map(|(_, v)| v); let current = iter.filter(|x| x.0 != k); let new = Some((k, v)).into_iter(); let iter = current.chain(new); - (Parameters::concat(iter), item) + (Parameters::from_iter(iter), item) } pub fn remove<'s, I>(mut iter: I, k: &'s str) -> (String, Option<&'s str>) @@ -96,31 +99,23 @@ impl Parameters { (Parameters::concat(iter), item) } - pub fn concat<'s, I>(iter: I) -> String + pub fn extend<'s, C, N>(current: C, new: N) -> String where - I: Iterator, + C: Iterator, + N: Iterator, { let mut into = String::new(); - Parameters::extend_into(iter, &mut into); + Parameters::extend_into(current, new, &mut into); into } - pub fn extend_into<'s, I>(iter: I, into: &mut String) + pub fn extend_into<'s, C, N>(current: C, new: N, into: &mut String) where - I: Iterator, + C: Iterator, + N: Iterator, { - let mut first = into.is_empty(); - for (k, v) in iter { - if !first { - into.push(LIST_SEPARATOR); - } - into.push_str(k); - if !v.is_empty() { - into.push(FIELD_SEPARATOR); - into.push_str(v); - } - first = false; - } + let iter = current.chain(new); + Parameters::from_iter_into(iter, into); } pub fn is_sorted<'s, I>(iter: I) -> bool @@ -137,6 +132,33 @@ impl Parameters { true } + fn concat<'s, I>(iter: I) -> String + where + I: Iterator, + { + let mut into = String::new(); + Parameters::concat_into(iter, &mut into); + into + } + + fn concat_into<'s, I>(iter: I, into: &mut String) + where + I: Iterator, + { + let mut first = into.is_empty(); + for (k, v) in iter { + if !first { + into.push(LIST_SEPARATOR); + } + into.push_str(k); + if !v.is_empty() { + into.push(FIELD_SEPARATOR); + into.push_str(v); + } + first = false; + } + } + #[cfg(feature = "test")] pub fn rand(into: &mut String) { use rand::{ diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index 030eca7d53..026dd69f72 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -1,6 +1,3 @@ -use alloc::borrow::Cow; -use core::borrow::Borrow; - // // Copyright (c) 2022 ZettaScale Technology // @@ -14,8 +11,11 @@ use core::borrow::Borrow; // Contributors: // ZettaScale Zenoh Team, // -use crate::Parameters; -use std::{collections::HashMap, fmt}; +use crate::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; +use alloc::borrow::Cow; +use core::{borrow::Borrow, fmt}; +#[cfg(feature = "std")] +use std::collections::HashMap; /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties @@ -25,10 +25,21 @@ use std::{collections::HashMap, fmt}; pub struct Properties<'s>(Cow<'s, str>); impl Properties<'_> { + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + pub fn as_str(&self) -> &str { &self.0 } + pub fn contains_key(&self, k: K) -> bool + where + K: Borrow, + { + self.get(k).is_some() + } + pub fn get(&self, k: K) -> Option<&str> where K: Borrow, @@ -43,7 +54,7 @@ impl Properties<'_> { Parameters::values(self.as_str(), k.borrow()) } - pub fn iter(&self) -> impl DoubleEndedIterator { + pub fn iter(&self) -> impl DoubleEndedIterator + Clone { Parameters::iter(self.as_str()) } @@ -67,11 +78,31 @@ impl Properties<'_> { self.0 = Cow::Owned(inner); removed } + + pub fn extend<'s, I, K, V>(&mut self, iter: I) + where + I: IntoIterator, + // I::Item: std::borrow::Borrow<(K, V)>, + K: AsRef + 's, + V: AsRef + 's, + { + self.0 = Cow::Owned(Parameters::extend( + Parameters::iter(self.as_str()), + iter.into_iter().map(|(k, v)| (k.as_ref(), v.as_ref())), + )); + } + + pub fn into_owned(self) -> Properties<'static> { + Properties(Cow::Owned(self.0.into_owned())) + } } impl<'s> From<&'s str> for Properties<'s> { - fn from(value: &'s str) -> Self { + fn from(mut value: &'s str) -> Self { if Parameters::is_sorted(Parameters::iter(value)) { + value = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); Self(Cow::Borrowed(value)) } else { Self(Cow::Owned(Parameters::from_iter(Parameters::iter(value)))) @@ -80,8 +111,12 @@ impl<'s> From<&'s str> for Properties<'s> { } impl From for Properties<'_> { - fn from(value: String) -> Self { + fn from(mut value: String) -> Self { if Parameters::is_sorted(Parameters::iter(value.as_str())) { + let s = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); + value.truncate(s.len()); Self(Cow::Owned(value)) } else { Self(Cow::Owned(Parameters::from_iter(Parameters::iter( @@ -123,6 +158,7 @@ where } } +#[cfg(feature = "std")] impl From> for Properties<'_> where K: AsRef, @@ -133,14 +169,29 @@ where } } -impl From> for HashMap { - fn from(props: Properties) -> Self { +#[cfg(feature = "std")] +impl<'s> From<&'s Properties<'s>> for HashMap<&'s str, &'s str> { + fn from(props: &'s Properties<'s>) -> Self { + HashMap::from_iter(Parameters::iter(props.as_str())) + } +} + +#[cfg(feature = "std")] +impl From<&Properties<'_>> for HashMap { + fn from(props: &Properties<'_>) -> Self { HashMap::from_iter( Parameters::iter(props.as_str()).map(|(k, v)| (k.to_string(), v.to_string())), ) } } +#[cfg(feature = "std")] +impl From> for HashMap { + fn from(props: Properties) -> Self { + HashMap::from(&props) + } +} + impl fmt::Display for Properties<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) @@ -173,6 +224,11 @@ mod tests { Properties::from(&[("p1", "v1"), ("p2", "v2")][..]) ); + assert_eq!( + Properties::from("p1=v1;p2=v2;|="), + Properties::from(&[("p1", "v1"), ("p2", "v2")][..]) + ); + assert_eq!( Properties::from("p1=v1;p2;p3=v3"), Properties::from(&[("p1", "v1"), ("p2", ""), ("p3", "v3")][..]) diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index 898ee615e6..03678fb675 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -194,7 +194,7 @@ impl<'a> Metadata<'a> { self.as_str().is_empty() } - pub fn iter(&'a self) -> impl DoubleEndedIterator { + pub fn iter(&'a self) -> impl DoubleEndedIterator + Clone { Parameters::iter(self.0) } @@ -311,7 +311,7 @@ impl<'a> Config<'a> { self.as_str().is_empty() } - pub fn iter(&'a self) -> impl DoubleEndedIterator { + pub fn iter(&'a self) -> impl DoubleEndedIterator + Clone { Parameters::iter(self.0) } diff --git a/commons/zenoh-util/src/std_only/time_range.rs b/commons/zenoh-util/src/std_only/time_range.rs index 50e5542fcc..9cfaf32655 100644 --- a/commons/zenoh-util/src/std_only/time_range.rs +++ b/commons/zenoh-util/src/std_only/time_range.rs @@ -51,6 +51,7 @@ const W_TO_SECS: f64 = D_TO_SECS * 7.0; /// iteratively getting values for `[t0..t1[`, `[t1..t2[`, `[t2..t3[`... #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct TimeRange(pub TimeBound, pub TimeBound); + impl TimeRange { /// Resolves the offset bounds in the range using `now` as reference. pub fn resolve_at(self, now: SystemTime) -> TimeRange { @@ -81,6 +82,7 @@ impl TimeRange { } } } + impl TimeRange { /// Returns `true` if the provided `instant` belongs to `self`. pub fn contains(&self, instant: SystemTime) -> bool { @@ -96,17 +98,20 @@ impl TimeRange { } } } + impl From> for TimeRange { fn from(value: TimeRange) -> Self { TimeRange(value.0.into(), value.1.into()) } } + impl TryFrom> for TimeRange { type Error = (); fn try_from(value: TimeRange) -> Result { Ok(TimeRange(value.0.try_into()?, value.1.try_into()?)) } } + impl Display for TimeRange { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.0 { @@ -121,6 +126,7 @@ impl Display for TimeRange { } } } + impl Display for TimeRange { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.0 { @@ -195,6 +201,7 @@ pub enum TimeBound { Exclusive(T), Unbounded, } + impl From> for TimeBound { fn from(value: TimeBound) -> Self { match value { @@ -204,6 +211,7 @@ impl From> for TimeBound { } } } + impl TryFrom> for TimeBound { type Error = (); fn try_from(value: TimeBound) -> Result { @@ -214,6 +222,7 @@ impl TryFrom> for TimeBound { }) } } + impl TimeBound { /// Resolves `self` into a [`TimeBound`], using `now` as a reference for offset expressions. /// If `self` is time boundary that cannot be represented as `SystemTime` (which means it’s not inside @@ -238,11 +247,13 @@ pub enum TimeExpr { Fixed(SystemTime), Now { offset_secs: f64 }, } + impl From for TimeExpr { fn from(t: SystemTime) -> Self { Self::Fixed(t) } } + impl TryFrom for SystemTime { type Error = (); fn try_from(value: TimeExpr) -> Result { @@ -252,6 +263,7 @@ impl TryFrom for SystemTime { } } } + impl TimeExpr { /// Resolves `self` into a [`SystemTime`], using `now` as a reference for offset expressions. /// @@ -295,6 +307,7 @@ impl TimeExpr { } } } + impl Display for TimeExpr { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 43c3f33776..fc74ca5421 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -407,12 +407,12 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result Option { - let properties = selector.parameters_stringmap().unwrap(); // note: this is a hashmap + let properties = selector.parameters(); // note: this is a hashmap log::trace!("[ALIGN QUERYABLE] Properties are: {:?}", properties); - if properties.get(super::ERA).is_some() { + if properties.contains_key(super::ERA) { Some(AlignComponent::Era( EraType::from_str(properties.get(super::ERA).unwrap()).unwrap(), )) - } else if properties.get(super::INTERVALS).is_some() { + } else if properties.contains_key(super::INTERVALS) { let mut intervals = properties.get(super::INTERVALS).unwrap().to_string(); intervals.remove(0); intervals.pop(); @@ -206,7 +206,7 @@ impl AlignQueryable { .map(|x| x.parse::().unwrap()) .collect::>(), )) - } else if properties.get(super::SUBINTERVALS).is_some() { + } else if properties.contains_key(super::SUBINTERVALS) { let mut subintervals = properties.get(super::SUBINTERVALS).unwrap().to_string(); subintervals.remove(0); subintervals.pop(); @@ -216,7 +216,7 @@ impl AlignQueryable { .map(|x| x.parse::().unwrap()) .collect::>(), )) - } else if properties.get(super::CONTENTS).is_some() { + } else if properties.contains_key(super::CONTENTS) { let contents = serde_json::from_str(properties.get(super::CONTENTS).unwrap()).unwrap(); Some(AlignComponent::Contents(contents)) } else { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 93075170ac..b957655579 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -510,7 +510,7 @@ impl StorageService { return; } }; - match storage.get(stripped_key, q.parameters()).await { + match storage.get(stripped_key, q.parameters().as_str()).await { Ok(stored_data) => { for entry in stored_data { if let Err(e) = q @@ -542,7 +542,7 @@ impl StorageService { } }; let mut storage = self.storage.lock().await; - match storage.get(stripped_key, q.parameters()).await { + match storage.get(stripped_key, q.parameters().as_str()).await { Ok(stored_data) => { for entry in stored_data { if let Err(e) = q diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 62c38b16ee..2f066c63bc 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -421,11 +421,10 @@ impl Primitives for AdminSpace { }; let zid = self.zid; - let parameters = query.parameters.to_owned(); let query = Query { inner: Arc::new(QueryInner { key_expr: key_expr.clone(), - parameters, + parameters: query.parameters.into(), value: query .ext_body .map(|b| Value::from(b.payload).encoding(b.encoding)), @@ -530,8 +529,11 @@ fn router_data(context: &AdminContext, query: Query) { }); #[cfg(feature = "stats")] { - let stats = crate::prelude::Parameters::decode(&query.selector()) - .any(|(k, v)| k.as_ref() == "_stats" && v != "false"); + let stats = query + .selector() + .parameters() + .iter() + .any(|(k, v)| k == "_stats" && v != "false"); if stats { json.as_object_mut().unwrap().insert( "stats".to_string(), @@ -561,8 +563,11 @@ fn router_data(context: &AdminContext, query: Query) { #[cfg(feature = "stats")] { - let stats = crate::prelude::Parameters::decode(&query.selector()) - .any(|(k, v)| k.as_ref() == "_stats" && v != "false"); + let stats = query + .selector() + .parameters() + .iter() + .any(|(k, v)| k == "_stats" && v != "false"); if stats { json.as_object_mut().unwrap().insert( "stats".to_string(), diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index e2327c0dcc..9158425034 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -38,7 +38,7 @@ pub(crate) mod common { pub use crate::config::{self, Config, ValidatedMap}; pub use crate::handlers::IntoHandler; - pub use crate::selector::{Parameter, Parameters, Selector}; + pub use crate::selector::Selector; pub use crate::session::{Session, SessionDeclarations}; pub use crate::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 7b8da9f768..20c76fa15b 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -378,9 +378,10 @@ impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { #[zenoh_macros::unstable] pub fn accept_replies(self, accept: ReplyKeyExpr) -> Self { Self { - selector: self - .selector - .and_then(|s| s.accept_any_keyexpr(accept == ReplyKeyExpr::Any)), + selector: self.selector.map(|mut s| { + s.set_accept_any_keyexpr(accept == ReplyKeyExpr::Any); + s + }), ..self } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index a6d87df5a4..36a2c51ba0 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -20,6 +20,7 @@ use crate::net::primitives::Primitives; use crate::prelude::*; use crate::sample::builder::SampleBuilder; use crate::sample::QoSBuilder; +use crate::selector::Parameters; use crate::Id; use crate::SessionRef; use crate::Undeclarable; @@ -46,7 +47,7 @@ pub(crate) struct QueryInner { /// The key expression of this Query. pub(crate) key_expr: KeyExpr<'static>, /// This Query's selector parameters. - pub(crate) parameters: String, + pub(crate) parameters: Parameters<'static>, /// This Query's body. pub(crate) value: Option, @@ -80,7 +81,7 @@ impl Query { pub fn selector(&self) -> Selector<'_> { Selector { key_expr: self.inner.key_expr.clone(), - parameters: (&self.inner.parameters).into(), + parameters: self.inner.parameters.clone(), } } @@ -92,7 +93,7 @@ impl Query { /// This Query's selector parameters. #[inline(always)] - pub fn parameters(&self) -> &str { + pub fn parameters(&self) -> &Parameters { &self.inner.parameters } @@ -220,9 +221,7 @@ impl Query { }) } fn _accepts_any_replies(&self) -> ZResult { - self.parameters() - .get_bools([crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM]) - .map(|a| a[0]) + Ok(self.parameters().accept_any_keyexpr()?.unwrap_or(false)) } } diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index a9e8941b33..7d5326638e 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -14,19 +14,17 @@ //! [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries -use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; -use zenoh_result::ZResult; -pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; - use crate::{prelude::KeyExpr, queryable::Query}; - use std::{ - borrow::{Borrow, Cow}, collections::HashMap, convert::TryFrom, - hash::Hash, + ops::{Deref, DerefMut}, str::FromStr, }; +use zenoh_collections::Properties; +use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; +use zenoh_result::ZResult; +use zenoh_util::time_range::TimeRange; /// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters @@ -67,73 +65,42 @@ pub struct Selector<'a> { /// The part of this selector identifying which keys should be part of the selection. pub key_expr: KeyExpr<'a>, /// the part of this selector identifying which values should be part of the selection. - pub(crate) parameters: Cow<'a, str>, + pub(crate) parameters: Parameters<'a>, } pub const TIME_RANGE_KEY: &str = "_time"; impl<'a> Selector<'a> { - /// Gets the parameters as a raw string. - pub fn parameters(&self) -> &str { + /// Gets the parameters. + pub fn parameters(&self) -> &Parameters { &self.parameters } - /// Extracts the selector parameters into a hashmap, returning an error in case of duplicated parameter names. - pub fn parameters_map(&'a self) -> ZResult> - where - K: AsRef + std::hash::Hash + std::cmp::Eq, - ExtractedName<'a, Self>: Into, - ExtractedValue<'a, Self>: Into, - { - self.decode_into_map() - } - /// Extracts the selector parameters' name-value pairs into a hashmap, returning an error in case of duplicated parameters. - pub fn parameters_cowmap(&'a self) -> ZResult, Cow<'a, str>>> { - self.decode_into_map() - } - /// Extracts the selector parameters' name-value pairs into a hashmap, returning an error in case of duplicated parameters. - pub fn parameters_stringmap(&'a self) -> ZResult> { - self.decode_into_map() - } /// Gets a mutable reference to the parameters as a String. /// /// Note that calling this function may cause an allocation and copy if the selector's parameters wasn't /// already owned by `self`. `self` owns its parameters as soon as this function returns. - pub fn parameters_mut(&mut self) -> &mut String { - if let Cow::Borrowed(s) = self.parameters { - self.parameters = Cow::Owned(s.to_owned()) - } - if let Cow::Owned(s) = &mut self.parameters { - s - } else { - unsafe { std::hint::unreachable_unchecked() } // this is safe because we just replaced the borrowed variant - } - } - pub fn set_parameters(&mut self, selector: impl Into>) { - self.parameters = selector.into(); - } - pub fn borrowing_clone(&'a self) -> Self { - Selector { - key_expr: self.key_expr.clone(), - parameters: self.parameters.as_ref().into(), - } + pub fn parameters_mut(&mut self) -> &mut Parameters<'a> { + &mut self.parameters } + + /// Create an owned version of this selector with `'static` lifetime. pub fn into_owned(self) -> Selector<'static> { Selector { key_expr: self.key_expr.into_owned(), - parameters: self.parameters.into_owned().into(), + parameters: Parameters(self.parameters.0.into_owned()), } } - #[deprecated = "If you have ownership of this selector, prefer `Selector::into_owned`"] - pub fn to_owned(&self) -> Selector<'static> { - self.borrowing_clone().into_owned() - } - /// Returns this selectors components as a tuple. - pub fn split(self) -> (KeyExpr<'a>, Cow<'a, str>) { + pub fn split(self) -> (KeyExpr<'a>, Parameters<'a>) { (self.key_expr, self.parameters) } + /// Sets the time range targeted by the selector. + pub fn set_time_range>>(&mut self, time_range: T) { + self.parameters_mut().set_time_range(time_range); + } + /// Sets the `parameters` part of this `Selector`. #[inline(always)] pub fn with_parameters(mut self, parameters: &'a str) -> Self { @@ -141,300 +108,172 @@ impl<'a> Selector<'a> { self } - pub fn extend<'b, I, K, V>(&'b mut self, parameters: I) - where - I: IntoIterator, - I::Item: std::borrow::Borrow<(K, V)>, - K: AsRef + 'b, - V: AsRef + 'b, - { - let it = parameters.into_iter(); - let selector = self.parameters_mut(); - let mut encoder = form_urlencoded::Serializer::new(selector); - encoder.extend_pairs(it).finish(); - } - - /// Sets the time range targeted by the selector. - pub fn with_time_range(&mut self, time_range: TimeRange) { - self.remove_time_range(); - let selector = self.parameters_mut(); - if !selector.is_empty() { - selector.push('&') - } - use std::fmt::Write; - write!(selector, "{TIME_RANGE_KEY}={time_range}").unwrap(); // This unwrap is safe because `String: Write` should be infallibe. - } - - pub fn remove_time_range(&mut self) { - let selector = self.parameters_mut(); - - let mut splice_start = 0; - let mut splice_end = 0; - for argument in selector.split('&') { - if argument.starts_with(TIME_RANGE_KEY) - && matches!( - argument.as_bytes().get(TIME_RANGE_KEY.len()), - None | Some(b'=') - ) - { - splice_end = splice_start + argument.len(); - break; - } - splice_start += argument.len() + 1 - } - if splice_end > 0 { - selector.drain(splice_start..(splice_end + (splice_end != selector.len()) as usize)); - } + /// Extracts the standardized `_time` argument from the selector parameters. + /// + /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. + pub fn time_range(&self) -> ZResult> { + self.parameters().time_range() } #[cfg(any(feature = "unstable", test))] - pub(crate) fn parameter_index(&self, param_name: &str) -> ZResult> { - let starts_with_param = |s: &str| { - if let Some(rest) = s.strip_prefix(param_name) { - matches!(rest.as_bytes().first(), None | Some(b'=')) - } else { - false - } - }; - let mut acc = 0; - let mut res = None; - for chunk in self.parameters().split('&') { - if starts_with_param(chunk) { - if res.is_none() { - res = Some(acc) - } else { - bail!( - "parameter `{}` appeared multiple times in selector `{}`.", - param_name, - self - ) - } - } - acc += chunk.len() as u32 + 1; - } - Ok(res) + pub(crate) fn set_accept_any_keyexpr>>(&mut self, anyke: T) { + self.parameters_mut().set_accept_any_keyexpr(anyke); } + #[cfg(any(feature = "unstable", test))] - pub(crate) fn accept_any_keyexpr(self, any: bool) -> ZResult> { - use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; - let mut s = self.into_owned(); - let any_selparam = s.parameter_index(_REPLY_KEY_EXPR_ANY_SEL_PARAM)?; - match (any, any_selparam) { - (true, None) => { - let s = s.parameters_mut(); - if !s.is_empty() { - s.push('&') - } - s.push_str(_REPLY_KEY_EXPR_ANY_SEL_PARAM); - } - (false, Some(index)) => { - let s = dbg!(s.parameters_mut()); - let mut start = index as usize; - let pend = start + _REPLY_KEY_EXPR_ANY_SEL_PARAM.len(); - if dbg!(start) != 0 { - start -= 1 - } - match dbg!(&s[pend..]).find('&') { - Some(end) => std::mem::drop(s.drain(start..end + pend)), - None => s.truncate(start), - } - dbg!(s); - } - _ => {} - } - Ok(s) + pub(crate) fn accept_any_keyexpr(&self) -> ZResult> { + self.parameters().accept_any_keyexpr() } } -#[test] -fn selector_accessors() { - let time_range = "[now(-2s)..now(2s)]".parse().unwrap(); - for selector in [ - "hello/there?_timetrick", - "hello/there?_timetrick&_time", - "hello/there?_timetrick&_time&_filter", - "hello/there?_timetrick&_time=[..]", - "hello/there?_timetrick&_time=[..]&_filter", - ] { - let mut selector = Selector::try_from(selector).unwrap(); - selector.with_time_range(time_range); - assert_eq!(selector.time_range().unwrap().unwrap(), time_range); - assert!(dbg!(selector.parameters()).contains("_time=[now(-2s)..now(2s)]")); - let map_selector = selector.parameters_cowmap().unwrap(); - assert_eq!( - selector.time_range().unwrap(), - map_selector.time_range().unwrap() - ); - let without_any = selector.to_string(); - let with_any = selector.to_string() + "&" + crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; - selector = selector.accept_any_keyexpr(false).unwrap(); - assert_eq!(selector.to_string(), without_any); - selector = selector.accept_any_keyexpr(true).unwrap(); - assert_eq!(selector.to_string(), with_any); - selector = selector.accept_any_keyexpr(true).unwrap(); - assert_eq!(selector.to_string(), with_any); - selector = selector.accept_any_keyexpr(false).unwrap(); - assert_eq!(selector.to_string(), without_any); - selector = selector.accept_any_keyexpr(true).unwrap(); - assert_eq!(selector.to_string(), with_any); - selector.parameters_mut().push_str("&other"); - assert_eq!(selector.to_string(), with_any + "&other"); - selector = selector.accept_any_keyexpr(false).unwrap(); - assert_eq!(selector.to_string(), without_any + "&other"); +/// A wrapper type to help decode zenoh selector parameters. +/// +/// Most methods will return an Error if duplicates of a same parameter are found, to avoid HTTP Parameter Pollution like vulnerabilities. +#[repr(transparent)] +#[derive(Clone, PartialEq, Eq)] +pub struct Parameters<'a>(Properties<'a>); + +impl<'a> Deref for Parameters<'a> { + type Target = Properties<'a>; + + fn deref(&self) -> &Self::Target { + &self.0 } } -pub trait Parameter: Sized { - type Name: AsRef + Sized; - type Value: AsRef + Sized; - fn name(&self) -> &Self::Name; - fn value(&self) -> &Self::Value; - fn split(self) -> (Self::Name, Self::Value); - fn extract_name(self) -> Self::Name { - self.split().0 - } - fn extract_value(self) -> Self::Value { - self.split().1 + +impl<'a> DerefMut for Parameters<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 } } -impl + Sized, V: AsRef + Sized> Parameter for (N, V) { - type Name = N; - type Value = V; - fn name(&self) -> &N { - &self.0 - } - fn value(&self) -> &V { - &self.1 + +impl std::fmt::Display for Parameters<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) } - fn split(self) -> (Self::Name, Self::Value) { - self +} + +impl std::fmt::Debug for Parameters<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self) } - fn extract_name(self) -> Self::Name { - self.0 +} + +impl<'a, T> From for Parameters<'a> +where + T: Into>, +{ + fn from(value: T) -> Self { + Parameters(value.into()) } - fn extract_value(self) -> Self::Value { - self.1 +} + +impl<'s> From<&'s Parameters<'s>> for HashMap<&'s str, &'s str> { + fn from(props: &'s Parameters<'s>) -> Self { + HashMap::from(&props.0) } } -#[allow(type_alias_bounds)] -type ExtractedName<'a, VS: Parameters<'a>> = <::Item as Parameter>::Name; -#[allow(type_alias_bounds)] -type ExtractedValue<'a, VS: Parameters<'a>> = <::Item as Parameter>::Value; -/// A trait to help decode zenoh selector parameters. -/// -/// Most methods will return an Error if duplicates of a same parameter are found, to avoid HTTP Parameter Pollution like vulnerabilities. -pub trait Parameters<'a> { - type Decoder: Iterator + 'a; - /// Returns this selector's parameters as an iterator. - fn decode(&'a self) -> Self::Decoder - where - ::Item: Parameter; - - /// Extracts all parameters into a HashMap, returning an error if duplicate parameters arrise. - fn decode_into_map(&'a self) -> ZResult> - where - ::Item: Parameter, - N: AsRef + std::hash::Hash + std::cmp::Eq, - ExtractedName<'a, Self>: Into, - ExtractedValue<'a, Self>: Into, - { - let mut result: HashMap = HashMap::new(); - for (name, value) in self.decode().map(Parameter::split) { - match result.entry(name.into()) { - std::collections::hash_map::Entry::Occupied(e) => { - bail!("Duplicated parameter `{}` detected", e.key().as_ref()) - } - std::collections::hash_map::Entry::Vacant(e) => { - e.insert(value.into()); - } - } - } - Ok(result) +impl From<&Parameters<'_>> for HashMap { + fn from(props: &Parameters) -> Self { + HashMap::from(&props.0) } +} - /// Extracts the requested parameters from the selector parameters. - /// - /// The default implementation is done in a single pass through the selector parameters, returning an error if any of the requested parameters are present more than once. - fn get_parameters( - &'a self, - names: [&str; N], - ) -> ZResult<[Option>; N]> - where - ::Item: Parameter, - { - let mut result = unsafe { - let mut result: std::mem::MaybeUninit<[Option>; N]> = - std::mem::MaybeUninit::uninit(); - for slot in result.assume_init_mut() { - std::ptr::write(slot, None); - } - result.assume_init() - }; - for pair in self.decode() { - if let Some(index) = names.iter().position(|k| *k == pair.name().as_ref()) { - let slot = &mut result[index]; - if slot.is_some() { - bail!("Duplicated parameter `{}` detected.", names[index]) - } - *slot = Some(pair.extract_value()) - } - } - Ok(result) +impl From> for HashMap { + fn from(props: Parameters) -> Self { + HashMap::from(props.0) } +} - /// Extracts the requested arguments from the selector parameters as booleans, following the Zenoh convention that if a parameter name is present and has a value different from "false", its value is truthy. - /// - /// The default implementation is done in a single pass through the selector parameters, returning an error if some of the requested parameters are present more than once. - fn get_bools(&'a self, names: [&str; N]) -> ZResult<[bool; N]> - where - ::Item: Parameter, - { - Ok(self.get_parameters(names)?.map(|v| match v { - None => false, - Some(s) => s.as_ref() != "false", - })) +impl Parameters<'_> { + /// Sets the time range targeted by the selector. + pub fn set_time_range>>(&mut self, time_range: T) { + let mut time_range: Option = time_range.into(); + match time_range.take() { + Some(tr) => self.0.insert(TIME_RANGE_KEY, format!("{}", tr)), + None => self.0.remove(TIME_RANGE_KEY), + }; } /// Extracts the standardized `_time` argument from the selector parameters. /// /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. - fn time_range(&'a self) -> ZResult> - where - ::Item: Parameter, - { - Ok(match &self.get_parameters([TIME_RANGE_KEY])?[0] { - Some(s) => Some(s.as_ref().parse()?), - None => None, - }) + fn time_range(&self) -> ZResult> { + match self.0.get(TIME_RANGE_KEY) { + Some(tr) => Ok(Some(tr.parse()?)), + None => Ok(None), + } } -} -impl<'a> Parameters<'a> for Selector<'a> { - type Decoder = >::Decoder; - fn decode(&'a self) -> Self::Decoder { - self.parameters().decode() + + #[cfg(any(feature = "unstable", test))] + pub(crate) fn set_accept_any_keyexpr>>(&mut self, anyke: T) { + use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; + + let mut anyke: Option = anyke.into(); + match anyke.take() { + Some(ak) => { + if ak { + self.0.insert(ANYKE, "") + } else { + self.0.insert(ANYKE, "false") + } + } + None => self.0.remove(ANYKE), + }; } -} -impl<'a> Parameters<'a> for str { - type Decoder = form_urlencoded::Parse<'a>; - fn decode(&'a self) -> Self::Decoder { - form_urlencoded::parse(self.as_bytes()) + + #[cfg(any(feature = "unstable", test))] + pub(crate) fn accept_any_keyexpr(&self) -> ZResult> { + use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; + + match self.0.get(ANYKE) { + Some(ak) => Ok(Some(ak.parse()?)), + None => Ok(None), + } } } +#[test] +fn selector_accessors() { + let time_range = "[now(-2s)..now(2s)]".parse().unwrap(); + for selector in [ + "hello/there?_timetrick", + "hello/there?_timetrick;_time", + "hello/there?_timetrick;_time;_filter", + "hello/there?_timetrick;_time=[..]", + "hello/there?_timetrick;_time=[..];_filter", + ] { + let mut selector = Selector::try_from(selector).unwrap(); + println!("Parameters start: {}", selector.parameters()); + for i in selector.parameters().iter() { + println!("\t{:?}", i); + } + + assert_eq!(selector.parameters().get("_timetrick").unwrap(), ""); + + selector.set_time_range(time_range); + assert_eq!(selector.time_range().unwrap().unwrap(), time_range); + assert!(selector.parameters().contains_key(TIME_RANGE_KEY)); -impl<'a, K: Borrow + Hash + Eq + 'a, V: Borrow + 'a> Parameters<'a> for HashMap { - type Decoder = std::collections::hash_map::Iter<'a, K, V>; - fn decode(&'a self) -> Self::Decoder { - self.iter() - } - fn get_parameters( - &'a self, - names: [&str; N], - ) -> ZResult<[Option>; N]> - where - ::Item: Parameter, - { - // `Ok(names.map(|key| self.get(key)))` would be very slightly faster, but doesn't compile for some reason :( - Ok(names.map(|key| self.get_key_value(key).map(|kv| kv.extract_value()))) + let hm: HashMap = HashMap::from(selector.parameters()); + assert!(hm.contains_key(TIME_RANGE_KEY)); + + let hm: HashMap<&str, &str> = HashMap::from(selector.parameters()); + assert!(hm.contains_key(TIME_RANGE_KEY)); + + selector.parameters_mut().insert("_filter", ""); + assert_eq!(selector.parameters().get("_filter").unwrap(), ""); + + selector.set_accept_any_keyexpr(true); + + println!("Parameters end: {}", selector.parameters()); + for i in selector.parameters().iter() { + println!("\t{:?}", i); + } + assert_eq!( + &format!("{}", selector), + "hello/there?_anyke;_filter;_time=[now(-2s)..now(2s)];_timetrick" + ); } } @@ -448,7 +287,7 @@ impl std::fmt::Display for Selector<'_> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{}", self.key_expr)?; if !self.parameters.is_empty() { - write!(f, "?{}", self.parameters)?; + write!(f, "?{}", self.parameters.as_str())?; } Ok(()) } @@ -504,7 +343,7 @@ impl<'a> From<&'a Query> for Selector<'a> { fn from(q: &'a Query) -> Self { Selector { key_expr: q.inner.key_expr.clone(), - parameters: (&q.inner.parameters).into(), + parameters: q.inner.parameters.clone(), } } } diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index ca5d44c3a6..beaecba314 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -24,8 +24,8 @@ use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; use crate::payload::Payload; +use crate::prelude::KeyExpr; use crate::prelude::Locality; -use crate::prelude::{KeyExpr, Parameters}; use crate::publication::*; use crate::query::*; use crate::queryable::*; @@ -1621,7 +1621,7 @@ impl Session { let mut state = zwrite!(self.state); let consolidation = match consolidation.mode { ConsolidationMode::Auto => { - if selector.decode().any(|(k, _)| k.as_ref() == TIME_RANGE_KEY) { + if selector.parameters().contains_key(TIME_RANGE_KEY) { ConsolidationMode::None } else { ConsolidationMode::Latest @@ -1728,7 +1728,7 @@ impl Session { self.handle_query( true, &wexpr, - selector.parameters(), + selector.parameters().as_str(), qid, target, consolidation, @@ -1797,13 +1797,11 @@ impl Session { } }; - let parameters = parameters.to_owned(); - let zid = self.runtime.zid(); let query_inner = Arc::new(QueryInner { key_expr, - parameters, + parameters: parameters.to_owned().into(), value: body.map(|b| Value { payload: b.payload.into(), encoding: b.encoding.into(), @@ -2189,13 +2187,8 @@ impl Primitives for Session { }; match state.queries.get_mut(&msg.rid) { Some(query) => { - if !matches!( - query - .selector - .parameters() - .get_bools([crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM]), - Ok([true]) - ) && !query.selector.key_expr.intersects(&key_expr) + if !matches!(query.selector.accept_any_keyexpr(), Ok(Some(true))) + && !query.selector.key_expr.intersects(&key_expr) { log::warn!( "Received Reply for `{}` from `{:?}, which didn't match query `{}`: dropping Reply.", diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 267bb5c284..77850b7c7c 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -148,7 +148,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re .declare_queryable(key_expr) .callback(move |query| { c_msgs.fetch_add(1, Ordering::Relaxed); - match query.parameters() { + match query.parameters().as_str() { "ok_put" => { tokio::task::block_in_place(|| { tokio::runtime::Handle::current().block_on(async { From f181b435bd3a7d333d996177e0d1b0bcf1991545 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 12 Apr 2024 22:13:46 +0200 Subject: [PATCH 208/598] Fix import --- commons/zenoh-collections/src/parameters.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/commons/zenoh-collections/src/parameters.rs b/commons/zenoh-collections/src/parameters.rs index 536f2beb2a..0d6e051397 100644 --- a/commons/zenoh-collections/src/parameters.rs +++ b/commons/zenoh-collections/src/parameters.rs @@ -15,6 +15,8 @@ pub const LIST_SEPARATOR: char = ';'; pub const FIELD_SEPARATOR: char = '='; pub const VALUE_SEPARATOR: char = '|'; +use alloc::{string::String, vec::Vec}; + fn split_once(s: &str, c: char) -> (&str, &str) { match s.find(c) { Some(index) => { From e7fbbf3956e97b1d18df8584e52c5ec166e2f12c Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 12 Apr 2024 22:31:36 +0200 Subject: [PATCH 209/598] Payload Cow<'static, _> deserializer --- zenoh/src/payload.rs | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 11a6f0c360..35d9ea6e58 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -543,6 +543,15 @@ impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { } } +impl From for Cow<'static, [u8]> { + fn from(v: Payload) -> Self { + match v.0.contiguous() { + Cow::Borrowed(s) => Cow::Owned(s.to_vec()), + Cow::Owned(s) => Cow::Owned(s), + } + } +} + impl<'a> From<&'a Payload> for Cow<'a, [u8]> { fn from(value: &'a Payload) -> Self { ZSerde.deserialize(value).unwrap_infallible() @@ -650,7 +659,15 @@ impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { type Error = Utf8Error; fn deserialize(self, v: &'a Payload) -> Result, Self::Error> { - let v: Cow<[u8]> = Self.deserialize(v).unwrap_infallible(); + Cow::try_from(v) + } +} + +impl TryFrom for Cow<'static, str> { + type Error = Utf8Error; + + fn try_from(v: Payload) -> Result { + let v: Cow<'static, [u8]> = Cow::from(v); let _ = core::str::from_utf8(v.as_ref())?; // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. @@ -661,8 +678,12 @@ impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { impl<'a> TryFrom<&'a Payload> for Cow<'a, str> { type Error = Utf8Error; - fn try_from(value: &'a Payload) -> Result { - ZSerde.deserialize(value) + fn try_from(v: &'a Payload) -> Result { + let v: Cow<'a, [u8]> = Cow::from(v); + let _ = core::str::from_utf8(v.as_ref())?; + // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 + // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. + Ok(unsafe { core::mem::transmute(v) }) } } From 27b7e06a04927cecc49e345c2b5089ec3bb40c63 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 12 Apr 2024 22:49:22 +0200 Subject: [PATCH 210/598] Impl Payload Serialize/Deserialize for Properties --- commons/zenoh-collections/src/properties.rs | 9 +++ zenoh/src/payload.rs | 63 +++++++++++++++++++++ 2 files changed, 72 insertions(+) diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index 026dd69f72..6b6f1de908 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -126,6 +126,15 @@ impl From for Properties<'_> { } } +impl<'s> From> for Properties<'s> { + fn from(value: Cow<'s, str>) -> Self { + match value { + Cow::Borrowed(s) => Properties::from(s), + Cow::Owned(s) => Properties::from(s), + } + } +} + impl<'s, K, V> FromIterator<(&'s K, &'s V)> for Properties<'_> where K: AsRef + 's, diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 35d9ea6e58..3f9fed1e90 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -28,6 +28,7 @@ use zenoh_buffers::{ ZBufReader, ZSlice, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; +use zenoh_collections::Properties; use zenoh_result::{ZError, ZResult}; #[cfg(feature = "shared-memory")] use zenoh_shm::SharedMemoryBuf; @@ -842,6 +843,63 @@ impl TryFrom<&Payload> for bool { } // - Zenoh advanced types encoders/decoders +// Properties +impl Serialize> for ZSerde { + type Output = Payload; + + fn serialize(self, t: Properties<'_>) -> Self::Output { + Self.serialize(t.as_str()) + } +} + +impl From> for Payload { + fn from(t: Properties<'_>) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&Properties<'_>> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &Properties<'_>) -> Self::Output { + Self.serialize(t.as_str()) + } +} + +impl<'s> From<&'s Properties<'s>> for Payload { + fn from(t: &'s Properties<'s>) -> Self { + ZSerde.serialize(t) + } +} + +impl<'s> Deserialize<'s, Properties<'s>> for ZSerde { + type Error = ZDeserializeError; + + fn deserialize(self, v: &'s Payload) -> Result, Self::Error> { + let s = v + .deserialize::>() + .map_err(|_| ZDeserializeError)?; + Ok(Properties::from(s)) + } +} + +impl TryFrom for Properties<'static> { + type Error = ZDeserializeError; + + fn try_from(v: Payload) -> Result { + let s = v.deserialize::>().map_err(|_| ZDeserializeError)?; + Ok(Properties::from(s.into_owned())) + } +} + +impl<'s> TryFrom<&'s Payload> for Properties<'s> { + type Error = ZDeserializeError; + + fn try_from(value: &'s Payload) -> Result { + ZSerde.deserialize(value) + } +} + // JSON impl Serialize for ZSerde { type Output = Result; @@ -1313,6 +1371,7 @@ mod tests { use rand::Rng; use std::borrow::Cow; use zenoh_buffers::{ZBuf, ZSlice}; + use zenoh_collections::Properties; const NUM: usize = 1_000; @@ -1405,6 +1464,10 @@ mod tests { serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); + // Properties + serialize_deserialize!(Properties, Properties::from("")); + serialize_deserialize!(Properties, Properties::from("a=1;b=2;c3")); + // Tuple serialize_deserialize!((usize, usize), (0, 1)); serialize_deserialize!((usize, String), (0, String::from("a"))); From 0f4477df69349c8c994bdf2425d8a12787d47a3b Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Sat, 13 Apr 2024 01:17:08 +0200 Subject: [PATCH 211/598] Fix paramters concat bug --- commons/zenoh-collections/src/parameters.rs | 30 ++++++++++++--------- commons/zenoh-collections/src/properties.rs | 1 - 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/commons/zenoh-collections/src/parameters.rs b/commons/zenoh-collections/src/parameters.rs index 0d6e051397..9bb6eb4148 100644 --- a/commons/zenoh-collections/src/parameters.rs +++ b/commons/zenoh-collections/src/parameters.rs @@ -32,13 +32,8 @@ pub struct Parameters; impl Parameters { pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { - s.split(LIST_SEPARATOR).filter_map(|prop| { - if prop.is_empty() { - None - } else { - Some(split_once(prop, FIELD_SEPARATOR)) - } - }) + s.split(LIST_SEPARATOR) + .filter_map(|prop| (!prop.is_empty()).then(|| split_once(prop, FIELD_SEPARATOR))) } #[allow(clippy::should_implement_trait)] @@ -55,13 +50,22 @@ impl Parameters { where I: Iterator, { - let mut from = iter - .filter(|(k, _)| !k.is_empty()) - .collect::>(); - from.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); + let mut from = iter.collect::>(); + from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); Self::concat_into(from.iter().copied(), into); } + pub fn from_slice_mut(slice: &mut [(&str, &str)]) -> String { + let mut into = String::new(); + Self::from_slice_mut_into(slice, &mut into); + into + } + + pub fn from_slice_mut_into(slice: &mut [(&str, &str)], into: &mut String) { + slice.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); + Self::concat_into(slice.iter().copied(), into); + } + pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { Self::iter(s) .find(|(key, _)| *key == k) @@ -147,8 +151,8 @@ impl Parameters { where I: Iterator, { - let mut first = into.is_empty(); - for (k, v) in iter { + let mut first = true; + for (k, v) in iter.filter(|(k, _)| !k.is_empty()) { if !first { into.push(LIST_SEPARATOR); } diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index 6b6f1de908..af881a94a6 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -82,7 +82,6 @@ impl Properties<'_> { pub fn extend<'s, I, K, V>(&mut self, iter: I) where I: IntoIterator, - // I::Item: std::borrow::Borrow<(K, V)>, K: AsRef + 's, V: AsRef + 's, { From e91c9978bdd87f2db3e6d4520d3c3a28774d2d07 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Sat, 13 Apr 2024 01:49:22 +0200 Subject: [PATCH 212/598] Properties use borrow trait --- commons/zenoh-collections/src/properties.rs | 47 +++++++++++++++------ 1 file changed, 34 insertions(+), 13 deletions(-) diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index af881a94a6..9b0619223c 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -82,12 +82,12 @@ impl Properties<'_> { pub fn extend<'s, I, K, V>(&mut self, iter: I) where I: IntoIterator, - K: AsRef + 's, - V: AsRef + 's, + K: Borrow + 's, + V: Borrow + 's, { self.0 = Cow::Owned(Parameters::extend( Parameters::iter(self.as_str()), - iter.into_iter().map(|(k, v)| (k.as_ref(), v.as_ref())), + iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow())), )); } @@ -136,30 +136,30 @@ impl<'s> From> for Properties<'s> { impl<'s, K, V> FromIterator<(&'s K, &'s V)> for Properties<'_> where - K: AsRef + 's, - V: AsRef + 's, + K: Borrow + 's, + V: Borrow + 's, { fn from_iter>(iter: T) -> Self { - let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.as_ref(), v.as_ref()))); + let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))); Self(Cow::Owned(inner)) } } impl<'s, K, V> FromIterator<&'s (K, V)> for Properties<'_> where - K: AsRef + 's, - V: AsRef + 's, + K: Borrow + 's, + V: Borrow + 's, { fn from_iter>(iter: T) -> Self { - let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.as_ref(), v.as_ref()))); + let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))); Self(Cow::Owned(inner)) } } impl<'s, K, V> From<&'s [(K, V)]> for Properties<'_> where - K: AsRef + 's, - V: AsRef + 's, + K: Borrow + 's, + V: Borrow + 's, { fn from(value: &'s [(K, V)]) -> Self { Self::from_iter(value.iter()) @@ -169,8 +169,8 @@ where #[cfg(feature = "std")] impl From> for Properties<'_> where - K: AsRef, - V: AsRef, + K: Borrow, + V: Borrow, { fn from(map: HashMap) -> Self { Self::from_iter(map.iter()) @@ -193,6 +193,15 @@ impl From<&Properties<'_>> for HashMap { } } +#[cfg(feature = "std")] +impl<'s> From<&'s Properties<'s>> for HashMap, Cow<'s, str>> { + fn from(props: &'s Properties<'s>) -> Self { + HashMap::from_iter( + Parameters::iter(props.as_str()).map(|(k, v)| (Cow::from(k), Cow::from(v))), + ) + } +} + #[cfg(feature = "std")] impl From> for HashMap { fn from(props: Properties) -> Self { @@ -251,5 +260,17 @@ mod tests { Properties::from("p1=x=y;p2=a==b"), Properties::from(&[("p1", "x=y"), ("p2", "a==b")][..]) ); + + let mut hm: HashMap = HashMap::new(); + hm.insert("p1".to_string(), "v1".to_string()); + assert_eq!(Properties::from(hm), Properties::from("p1=v1")); + + let mut hm: HashMap<&str, &str> = HashMap::new(); + hm.insert("p1", "v1"); + assert_eq!(Properties::from(hm), Properties::from("p1=v1")); + + let mut hm: HashMap, Cow> = HashMap::new(); + hm.insert(Cow::from("p1"), Cow::from("v1")); + assert_eq!(Properties::from(hm), Properties::from("p1=v1")); } } From 149ab064bfc1a6446d4ddccb03c137d6b390442f Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Sat, 13 Apr 2024 01:51:43 +0200 Subject: [PATCH 213/598] Fix clippy --- commons/zenoh-collections/src/parameters.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/commons/zenoh-collections/src/parameters.rs b/commons/zenoh-collections/src/parameters.rs index 9bb6eb4148..e86dfa2623 100644 --- a/commons/zenoh-collections/src/parameters.rs +++ b/commons/zenoh-collections/src/parameters.rs @@ -33,7 +33,8 @@ pub struct Parameters; impl Parameters { pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { s.split(LIST_SEPARATOR) - .filter_map(|prop| (!prop.is_empty()).then(|| split_once(prop, FIELD_SEPARATOR))) + .filter(|p| !p.is_empty()) + .map(|p| split_once(p, FIELD_SEPARATOR)) } #[allow(clippy::should_implement_trait)] From 14a203676492da6f8d9910337d9d4f39cf1ba8b5 Mon Sep 17 00:00:00 2001 From: DenisBiryukov91 <155981813+DenisBiryukov91@users.noreply.github.com> Date: Sat, 13 Apr 2024 02:00:45 +0200 Subject: [PATCH 214/598] make Selector::key_expr pub(crate) and add corresponding accessor (#928) --- .../src/queryable_get/bin/z_queryable_get.rs | 2 +- examples/examples/z_storage.rs | 2 +- plugins/zenoh-plugin-example/src/lib.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 4 ++-- plugins/zenoh-plugin-storage-manager/src/lib.rs | 8 ++++---- zenoh-ext/src/publication_cache.rs | 6 +++--- zenoh/src/selector.rs | 8 ++++++-- 7 files changed, 18 insertions(+), 14 deletions(-) diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 84c3a82f88..a5111c11e3 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -33,7 +33,7 @@ async fn main() { zenoh_runtime::ZRuntime::Application.block_in_place(async move { query .reply( - query.selector().key_expr, + query.selector().key_expr(), query.value().unwrap().payload().clone(), ) .res() diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index cb2f40c125..8ae8c4c678 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -60,7 +60,7 @@ async fn main() { let query = query.unwrap(); println!(">> [Queryable ] Received Query '{}'", query.selector()); for (stored_name, sample) in stored.iter() { - if query.selector().key_expr.intersects(unsafe {keyexpr::from_str_unchecked(stored_name)}) { + if query.selector().key_expr().intersects(unsafe {keyexpr::from_str_unchecked(stored_name)}) { query.reply(sample.key_expr().clone(), sample.payload().clone()).res().await.unwrap(); } } diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index ad254278e3..90b88e8095 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -174,7 +174,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { let query = query.unwrap(); info!("Handling query '{}'", query.selector()); for (key_expr, sample) in stored.iter() { - if query.selector().key_expr.intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { + if query.selector().key_expr().intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { query.reply_sample(sample.clone()).res().await.unwrap(); } } diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 6edcfdb945..1068d07163 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -274,7 +274,7 @@ impl RunningPluginTrait for RunningPlugin { with_extended_string(&mut key, &["/version"], |key| { if keyexpr::new(key.as_str()) .unwrap() - .intersects(&selector.key_expr) + .intersects(selector.key_expr()) { responses.push(zenoh::plugins::Response::new( key.clone(), @@ -285,7 +285,7 @@ impl RunningPluginTrait for RunningPlugin { with_extended_string(&mut key, &["/port"], |port_key| { if keyexpr::new(port_key.as_str()) .unwrap() - .intersects(&selector.key_expr) + .intersects(selector.key_expr()) { responses.push(zenoh::plugins::Response::new( port_key.clone(), diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 91df2f108d..6aa0a09f9a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -304,7 +304,7 @@ impl RunningPluginTrait for StorageRuntime { with_extended_string(&mut key, &["/version"], |key| { if keyexpr::new(key.as_str()) .unwrap() - .intersects(&selector.key_expr) + .intersects(selector.key_expr()) { responses.push(zenoh::plugins::Response::new( key.clone(), @@ -319,7 +319,7 @@ impl RunningPluginTrait for StorageRuntime { with_extended_string(key, &["/__path__"], |key| { if keyexpr::new(key.as_str()) .unwrap() - .intersects(&selector.key_expr) + .intersects(selector.key_expr()) { responses.push(zenoh::plugins::Response::new( key.clone(), @@ -329,7 +329,7 @@ impl RunningPluginTrait for StorageRuntime { }); if keyexpr::new(key.as_str()) .unwrap() - .intersects(&selector.key_expr) + .intersects(selector.key_expr()) { responses.push(zenoh::plugins::Response::new( key.clone(), @@ -345,7 +345,7 @@ impl RunningPluginTrait for StorageRuntime { with_extended_string(key, &[storage], |key| { if keyexpr::new(key.as_str()) .unwrap() - .intersects(&selector.key_expr) + .intersects(selector.key_expr()) { if let Ok(value) = task::block_on(async { let (tx, rx) = async_std::channel::bounded(1); diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 9f2b645da9..bbc90c0e8f 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -205,8 +205,8 @@ impl<'a> PublicationCache<'a> { // on query, reply with cach content query = quer_recv.recv_async() => { if let Ok(query) = query { - if !query.selector().key_expr.as_str().contains('*') { - if let Some(queue) = cache.get(query.selector().key_expr.as_keyexpr()) { + if !query.selector().key_expr().as_str().contains('*') { + if let Some(queue) = cache.get(query.selector().key_expr().as_keyexpr()) { for sample in queue { if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ @@ -220,7 +220,7 @@ impl<'a> PublicationCache<'a> { } } else { for (key_expr, queue) in cache.iter() { - if query.selector().key_expr.intersects(unsafe{ keyexpr::from_str_unchecked(key_expr) }) { + if query.selector().key_expr().intersects(unsafe{ keyexpr::from_str_unchecked(key_expr) }) { for sample in queue { if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 2a9a38c02c..26f9e09c57 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -65,13 +65,17 @@ use std::{ #[derive(Clone, PartialEq, Eq)] pub struct Selector<'a> { /// The part of this selector identifying which keys should be part of the selection. - pub key_expr: KeyExpr<'a>, - /// the part of this selector identifying which values should be part of the selection. + pub(crate) key_expr: KeyExpr<'a>, + /// The part of this selector identifying which values should be part of the selection. pub(crate) parameters: Cow<'a, str>, } pub const TIME_RANGE_KEY: &str = "_time"; impl<'a> Selector<'a> { + /// Gets the key-expression. + pub fn key_expr(&'a self) -> &KeyExpr<'a> { + &self.key_expr + } /// Gets the parameters as a raw string. pub fn parameters(&self) -> &str { &self.parameters From 94b62f5fc86280d6ac28792837fbaca437de4810 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Sat, 13 Apr 2024 14:45:42 +0200 Subject: [PATCH 215/598] Sorted parameters --- commons/zenoh-collections/src/parameters.rs | 122 +++++++++++++----- commons/zenoh-collections/src/properties.rs | 44 +++---- commons/zenoh-protocol/src/core/endpoint.rs | 94 +++++++++----- io/zenoh-transport/src/multicast/manager.rs | 2 +- io/zenoh-transport/src/unicast/manager.rs | 4 +- io/zenoh-transport/tests/endpoints.rs | 8 +- .../tests/unicast_authenticator.rs | 8 +- io/zenoh-transport/tests/unicast_multilink.rs | 8 +- io/zenoh-transport/tests/unicast_openclose.rs | 8 +- io/zenoh-transport/tests/unicast_time.rs | 8 +- io/zenoh-transport/tests/unicast_transport.rs | 34 +++-- zenoh/src/selector.rs | 93 +++++++------ 12 files changed, 252 insertions(+), 181 deletions(-) diff --git a/commons/zenoh-collections/src/parameters.rs b/commons/zenoh-collections/src/parameters.rs index e86dfa2623..b49ee1a1f9 100644 --- a/commons/zenoh-collections/src/parameters.rs +++ b/commons/zenoh-collections/src/parameters.rs @@ -28,15 +28,10 @@ fn split_once(s: &str, c: char) -> (&str, &str) { } /// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. -pub struct Parameters; - -impl Parameters { - pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { - s.split(LIST_SEPARATOR) - .filter(|p| !p.is_empty()) - .map(|p| split_once(p, FIELD_SEPARATOR)) - } +/// [`SortedParameters`] it's like [`Parameters`] but with the guarantee that keys are sorted upon insertion. +pub struct SortedParameters; +impl SortedParameters { #[allow(clippy::should_implement_trait)] pub fn from_iter<'s, I>(iter: I) -> String where @@ -53,18 +48,85 @@ impl Parameters { { let mut from = iter.collect::>(); from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); - Self::concat_into(from.iter().copied(), into); + Parameters::from_iter_into(from.iter().copied(), into); + } + + pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + where + I: Iterator + Clone, + { + let mut ic = iter.clone(); + let item = ic.find(|(key, _)| *key == k).map(|(_, v)| v); + + let current = iter.filter(|x| x.0 != k); + let new = Some((k, v)).into_iter(); + let iter = current.chain(new); + (SortedParameters::from_iter(iter), item) + } + + pub fn join<'s, C, N>(current: C, new: N) -> String + where + C: Iterator + Clone, + N: Iterator + Clone, + { + let mut into = String::new(); + SortedParameters::join_into(current, new, &mut into); + into + } + + pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) + where + C: Iterator + Clone, + N: Iterator + Clone, + { + let n = new.clone(); + let current = current + .clone() + .filter(|(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); + let iter = current.chain(new); + SortedParameters::from_iter_into(iter, into); + } + + pub fn is_sorted<'s, I>(iter: I) -> bool + where + I: Iterator, + { + let mut prev = None; + for (k, _) in iter { + match prev.take() { + Some(p) if k < p => return false, + _ => prev = Some(k), + } + } + true + } +} + +/// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. +pub struct Parameters; + +impl Parameters { + pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { + s.split(LIST_SEPARATOR) + .filter(|p| !p.is_empty()) + .map(|p| split_once(p, FIELD_SEPARATOR)) } - pub fn from_slice_mut(slice: &mut [(&str, &str)]) -> String { + #[allow(clippy::should_implement_trait)] + pub fn from_iter<'s, I>(iter: I) -> String + where + I: Iterator, + { let mut into = String::new(); - Self::from_slice_mut_into(slice, &mut into); + Self::from_iter_into(iter, &mut into); into } - pub fn from_slice_mut_into(slice: &mut [(&str, &str)], into: &mut String) { - slice.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); - Self::concat_into(slice.iter().copied(), into); + pub fn from_iter_into<'s, I>(iter: I, into: &mut String) + where + I: Iterator, + { + Self::concat_into(iter, into); } pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { @@ -106,39 +168,29 @@ impl Parameters { (Parameters::concat(iter), item) } - pub fn extend<'s, C, N>(current: C, new: N) -> String + pub fn join<'s, C, N>(current: C, new: N) -> String where - C: Iterator, - N: Iterator, + C: Iterator + Clone, + N: Iterator + Clone, { let mut into = String::new(); - Parameters::extend_into(current, new, &mut into); + Parameters::join_into(current, new, &mut into); into } - pub fn extend_into<'s, C, N>(current: C, new: N, into: &mut String) + pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) where - C: Iterator, - N: Iterator, + C: Iterator + Clone, + N: Iterator + Clone, { + let n = new.clone(); + let current = current + .clone() + .filter(|(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); let iter = current.chain(new); Parameters::from_iter_into(iter, into); } - pub fn is_sorted<'s, I>(iter: I) -> bool - where - I: Iterator, - { - let mut prev = None; - for (k, _) in iter { - match prev.take() { - Some(p) if k < p => return false, - _ => prev = Some(k), - } - } - true - } - fn concat<'s, I>(iter: I) -> String where I: Iterator, diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index 9b0619223c..800d2ed9dc 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -79,15 +79,15 @@ impl Properties<'_> { removed } - pub fn extend<'s, I, K, V>(&mut self, iter: I) + pub fn join<'s, I, K, V>(&mut self, iter: I) where - I: IntoIterator, - K: Borrow + 's, - V: Borrow + 's, + I: Iterator + Clone, + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, { - self.0 = Cow::Owned(Parameters::extend( + self.0 = Cow::Owned(Parameters::join( Parameters::iter(self.as_str()), - iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow())), + iter.map(|(k, v)| (k.borrow(), v.borrow())), )); } @@ -98,30 +98,20 @@ impl Properties<'_> { impl<'s> From<&'s str> for Properties<'s> { fn from(mut value: &'s str) -> Self { - if Parameters::is_sorted(Parameters::iter(value)) { - value = value.trim_end_matches(|c| { - c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR - }); - Self(Cow::Borrowed(value)) - } else { - Self(Cow::Owned(Parameters::from_iter(Parameters::iter(value)))) - } + value = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); + Self(Cow::Borrowed(value)) } } impl From for Properties<'_> { fn from(mut value: String) -> Self { - if Parameters::is_sorted(Parameters::iter(value.as_str())) { - let s = value.trim_end_matches(|c| { - c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR - }); - value.truncate(s.len()); - Self(Cow::Owned(value)) - } else { - Self(Cow::Owned(Parameters::from_iter(Parameters::iter( - value.as_str(), - )))) - } + let s = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); + value.truncate(s.len()); + Self(Cow::Owned(value)) } } @@ -136,8 +126,8 @@ impl<'s> From> for Properties<'s> { impl<'s, K, V> FromIterator<(&'s K, &'s V)> for Properties<'_> where - K: Borrow + 's, - V: Borrow + 's, + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, { fn from_iter>(iter: T) -> Self { let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))); diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index 03678fb675..debe7da7b5 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -13,8 +13,8 @@ // use super::locator::*; use alloc::{borrow::ToOwned, format, string::String}; -use core::{convert::TryFrom, fmt, str::FromStr}; -use zenoh_collections::Parameters; +use core::{borrow::Borrow, convert::TryFrom, fmt, str::FromStr}; +use zenoh_collections::{Parameters, SortedParameters}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; // Parsing chars @@ -240,25 +240,35 @@ impl<'a> MetadataMut<'a> { } impl MetadataMut<'_> { - pub fn extend(&mut self, iter: I) -> ZResult<()> + pub fn join<'s, I, K, V>(&mut self, iter: I) -> ZResult<()> where - I: Iterator, - K: AsRef, - V: AsRef, + I: Iterator + Clone, + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, { - for (k, v) in iter { - let k: &str = k.as_ref(); - let v: &str = v.as_ref(); - self.insert(k, v)? - } + let ep = EndPoint::new( + self.0.protocol(), + self.0.address(), + SortedParameters::join( + self.0.metadata().iter(), + iter.map(|(k, v)| (k.borrow(), v.borrow())), + ), + self.0.config(), + )?; + + self.0.inner = ep.inner; Ok(()) } - pub fn insert(&mut self, k: &str, v: &str) -> ZResult<()> { + pub fn insert(&mut self, k: K, v: V) -> ZResult<()> + where + K: Borrow, + V: Borrow, + { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::insert(self.0.metadata().iter(), k, v).0, + SortedParameters::insert(self.0.metadata().iter(), k.borrow(), v.borrow()).0, self.0.config(), )?; @@ -266,11 +276,14 @@ impl MetadataMut<'_> { Ok(()) } - pub fn remove(&mut self, k: &str) -> ZResult<()> { + pub fn remove(&mut self, k: K) -> ZResult<()> + where + K: Borrow, + { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::remove(self.0.metadata().iter(), k).0, + Parameters::remove(self.0.metadata().iter(), k.borrow()).0, self.0.config(), )?; @@ -357,38 +370,51 @@ impl<'a> ConfigMut<'a> { } impl ConfigMut<'_> { - pub fn extend(&mut self, iter: I) -> ZResult<()> + pub fn join<'s, I, K, V>(&mut self, iter: I) -> ZResult<()> where - I: Iterator, - K: AsRef, - V: AsRef, + I: Iterator + Clone, + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, { - for (k, v) in iter { - let k: &str = k.as_ref(); - let v: &str = v.as_ref(); - self.insert(k, v)? - } + let ep = EndPoint::new( + self.0.protocol(), + self.0.address(), + self.0.metadata(), + SortedParameters::join( + self.0.config().iter(), + iter.map(|(k, v)| (k.borrow(), v.borrow())), + ), + )?; + + self.0.inner = ep.inner; Ok(()) } - pub fn insert(&mut self, k: &str, v: &str) -> ZResult<()> { + pub fn insert(&mut self, k: K, v: V) -> ZResult<()> + where + K: Borrow, + V: Borrow, + { let ep = EndPoint::new( self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::insert(self.0.config().iter(), k, v).0, + SortedParameters::insert(self.0.config().iter(), k.borrow(), v.borrow()).0, )?; self.0.inner = ep.inner; Ok(()) } - pub fn remove(&mut self, k: &str) -> ZResult<()> { + pub fn remove(&mut self, k: K) -> ZResult<()> + where + K: Borrow, + { let ep = EndPoint::new( self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::remove(self.0.config().iter(), k).0, + Parameters::remove(self.0.config().iter(), k.borrow()).0, )?; self.0.inner = ep.inner; @@ -550,14 +576,14 @@ impl TryFrom for EndPoint { (Some(midx), None) if midx > pidx && !s[midx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - Parameters::from_iter_into(Parameters::iter(&s[midx + 1..]), &mut inner); + SortedParameters::from_iter_into(Parameters::iter(&s[midx + 1..]), &mut inner); Ok(EndPoint { inner }) } // There is some config (None, Some(cidx)) if cidx > pidx && !s[cidx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..cidx + 1]); // Includes config separator - Parameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); + SortedParameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); Ok(EndPoint { inner }) } // There is some metadata and some config @@ -570,10 +596,10 @@ impl TryFrom for EndPoint { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - Parameters::from_iter_into(Parameters::iter(&s[midx + 1..cidx]), &mut inner); + SortedParameters::from_iter_into(Parameters::iter(&s[midx + 1..cidx]), &mut inner); inner.push(CONFIG_SEPARATOR); - Parameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); + SortedParameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); Ok(EndPoint { inner }) } @@ -792,14 +818,14 @@ fn endpoints() { let mut endpoint = EndPoint::from_str("udp/127.0.0.1:7447").unwrap(); endpoint .metadata_mut() - .extend([("a", "1"), ("c", "3"), ("b", "2")].iter().copied()) + .join([("a", "1"), ("c", "3"), ("b", "2")].iter().copied()) .unwrap(); assert_eq!(endpoint.as_str(), "udp/127.0.0.1:7447?a=1;b=2;c=3"); let mut endpoint = EndPoint::from_str("udp/127.0.0.1:7447").unwrap(); endpoint .config_mut() - .extend([("A", "1"), ("C", "3"), ("B", "2")].iter().copied()) + .join([("A", "1"), ("C", "3"), ("B", "2")].iter().copied()) .unwrap(); assert_eq!(endpoint.as_str(), "udp/127.0.0.1:7447#A=1;B=2;C=3"); diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index daf49ce9a3..a6f682edc9 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -260,7 +260,7 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint.config_mut().extend(Parameters::iter(config))?; + endpoint.config_mut().join(Parameters::iter(config))?; } // Open the link diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index eb0339c35b..0fdce265f9 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -380,7 +380,7 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint.config_mut().extend(Parameters::iter(config))?; + endpoint.config_mut().join(Parameters::iter(config))?; }; manager.new_listener(endpoint).await } @@ -689,7 +689,7 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint.config_mut().extend(Parameters::iter(config))?; + endpoint.config_mut().join(Parameters::iter(config))?; }; // Create a new link associated by calling the Link Manager diff --git a/io/zenoh-transport/tests/endpoints.rs b/io/zenoh-transport/tests/endpoints.rs index 13a605a588..def493e88f 100644 --- a/io/zenoh-transport/tests/endpoints.rs +++ b/io/zenoh-transport/tests/endpoints.rs @@ -317,13 +317,13 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM let mut endpoint: EndPoint = format!("tls/localhost:{}", 7070).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -396,13 +396,13 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM let mut endpoint: EndPoint = format!("quic/localhost:{}", 7080).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); let endpoints = vec![endpoint]; diff --git a/io/zenoh-transport/tests/unicast_authenticator.rs b/io/zenoh-transport/tests/unicast_authenticator.rs index d94ade1ce1..63f1c785b7 100644 --- a/io/zenoh-transport/tests/unicast_authenticator.rs +++ b/io/zenoh-transport/tests/unicast_authenticator.rs @@ -802,14 +802,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 8030).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -902,14 +902,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 8040).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); diff --git a/io/zenoh-transport/tests/unicast_multilink.rs b/io/zenoh-transport/tests/unicast_multilink.rs index 5e4499be2a..54a31f62c3 100644 --- a/io/zenoh-transport/tests/unicast_multilink.rs +++ b/io/zenoh-transport/tests/unicast_multilink.rs @@ -611,14 +611,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 18030).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -709,14 +709,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 18040).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); diff --git a/io/zenoh-transport/tests/unicast_openclose.rs b/io/zenoh-transport/tests/unicast_openclose.rs index 56e4a1b140..3f57ebfd62 100644 --- a/io/zenoh-transport/tests/unicast_openclose.rs +++ b/io/zenoh-transport/tests/unicast_openclose.rs @@ -639,14 +639,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 13030).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -737,14 +737,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 13040).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs index 75d3ae1d98..668df34cd6 100644 --- a/io/zenoh-transport/tests/unicast_time.rs +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -398,14 +398,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 13030).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -497,14 +497,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 13040).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index c0af98eb46..2fffb2f811 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -994,14 +994,14 @@ async fn transport_unicast_tls_only_server() { let mut endpoint: EndPoint = format!("tls/localhost:{}", 16070).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), (TLS_SERVER_PRIVATE_KEY_RAW, SERVER_KEY), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -1039,14 +1039,14 @@ async fn transport_unicast_quic_only_server() { let mut endpoint: EndPoint = format!("quic/localhost:{}", 16080).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), (TLS_SERVER_PRIVATE_KEY_RAW, SERVER_KEY), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -1087,7 +1087,7 @@ async fn transport_unicast_tls_only_mutual_success() { let mut client_endpoint: EndPoint = ("tls/localhost:10461").parse().unwrap(); client_endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_CLIENT_CERTIFICATE_RAW, CLIENT_CERT), @@ -1095,7 +1095,7 @@ async fn transport_unicast_tls_only_mutual_success() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -1103,7 +1103,7 @@ async fn transport_unicast_tls_only_mutual_success() { let mut server_endpoint: EndPoint = ("tls/localhost:10461").parse().unwrap(); server_endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1111,7 +1111,7 @@ async fn transport_unicast_tls_only_mutual_success() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); // Define the reliability and congestion control @@ -1157,18 +1157,14 @@ async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { let mut client_endpoint: EndPoint = ("tls/localhost:10462").parse().unwrap(); client_endpoint .config_mut() - .extend( - [(TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA)] - .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), - ) + .join([(TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA)].iter().copied()) .unwrap(); // Define the locator let mut server_endpoint: EndPoint = ("tls/localhost:10462").parse().unwrap(); server_endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1176,7 +1172,7 @@ async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { (TLS_CLIENT_AUTH, "true"), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); // Define the reliability and congestion control @@ -1227,7 +1223,7 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { let mut client_endpoint: EndPoint = ("tls/localhost:10463").parse().unwrap(); client_endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), // Using the SERVER_CERT and SERVER_KEY in the client to simulate the case the client has @@ -1239,7 +1235,7 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -1247,7 +1243,7 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { let mut server_endpoint: EndPoint = ("tls/localhost:10463").parse().unwrap(); server_endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1255,7 +1251,7 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); // Define the reliability and congestion control diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 7d5326638e..51ee72f98a 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -233,49 +233,6 @@ impl Parameters<'_> { } } } -#[test] -fn selector_accessors() { - let time_range = "[now(-2s)..now(2s)]".parse().unwrap(); - for selector in [ - "hello/there?_timetrick", - "hello/there?_timetrick;_time", - "hello/there?_timetrick;_time;_filter", - "hello/there?_timetrick;_time=[..]", - "hello/there?_timetrick;_time=[..];_filter", - ] { - let mut selector = Selector::try_from(selector).unwrap(); - println!("Parameters start: {}", selector.parameters()); - for i in selector.parameters().iter() { - println!("\t{:?}", i); - } - - assert_eq!(selector.parameters().get("_timetrick").unwrap(), ""); - - selector.set_time_range(time_range); - assert_eq!(selector.time_range().unwrap().unwrap(), time_range); - assert!(selector.parameters().contains_key(TIME_RANGE_KEY)); - - let hm: HashMap = HashMap::from(selector.parameters()); - assert!(hm.contains_key(TIME_RANGE_KEY)); - - let hm: HashMap<&str, &str> = HashMap::from(selector.parameters()); - assert!(hm.contains_key(TIME_RANGE_KEY)); - - selector.parameters_mut().insert("_filter", ""); - assert_eq!(selector.parameters().get("_filter").unwrap(), ""); - - selector.set_accept_any_keyexpr(true); - - println!("Parameters end: {}", selector.parameters()); - for i in selector.parameters().iter() { - println!("\t{:?}", i); - } - assert_eq!( - &format!("{}", selector), - "hello/there?_anyke;_filter;_time=[now(-2s)..now(2s)];_timetrick" - ); - } -} impl std::fmt::Debug for Selector<'_> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { @@ -392,3 +349,53 @@ impl<'a> From> for Selector<'a> { } } } + +#[test] +fn selector_accessors() { + let time_range = "[now(-2s)..now(2s)]".parse().unwrap(); + for selector in [ + "hello/there?_timetrick", + "hello/there?_timetrick;_time", + "hello/there?_timetrick;_time;_filter", + "hello/there?_timetrick;_time=[..]", + "hello/there?_timetrick;_time=[..];_filter", + ] { + let mut selector = Selector::try_from(selector).unwrap(); + println!("Parameters start: {}", selector.parameters()); + for i in selector.parameters().iter() { + println!("\t{:?}", i); + } + + assert_eq!(selector.parameters().get("_timetrick").unwrap(), ""); + + selector.set_time_range(time_range); + assert_eq!(selector.time_range().unwrap().unwrap(), time_range); + assert!(selector.parameters().contains_key(TIME_RANGE_KEY)); + + let hm: HashMap<&str, &str> = HashMap::from(selector.parameters()); + assert!(hm.contains_key(TIME_RANGE_KEY)); + + selector.parameters_mut().insert("_filter", ""); + assert_eq!(selector.parameters().get("_filter").unwrap(), ""); + + let hm: HashMap = HashMap::from(selector.parameters()); + assert!(hm.contains_key(TIME_RANGE_KEY)); + + selector.parameters_mut().join(hm.iter()); + assert_eq!(selector.parameters().get("_filter").unwrap(), ""); + + selector.set_accept_any_keyexpr(true); + + println!("Parameters end: {}", selector.parameters()); + for i in selector.parameters().iter() { + println!("\t{:?}", i); + } + + assert_eq!( + HashMap::::from(selector.parameters()), + HashMap::::from(Parameters::from( + "_anyke;_filter;_time=[now(-2s)..now(2s)];_timetrick" + )) + ); + } +} From f79033d596eddad3fd2e5dbc61d3843998bbf1ba Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Sat, 13 Apr 2024 15:57:03 +0200 Subject: [PATCH 216/598] Fix conditional feature --- zenoh/src/session.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index beaecba314..e8b6660b9b 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -2187,9 +2187,12 @@ impl Primitives for Session { }; match state.queries.get_mut(&msg.rid) { Some(query) => { - if !matches!(query.selector.accept_any_keyexpr(), Ok(Some(true))) - && !query.selector.key_expr.intersects(&key_expr) - { + let c = zcondfeat!( + "unstable", + !matches!(query.selector.accept_any_keyexpr(), Ok(Some(true))), + true + ); + if c && !query.selector.key_expr.intersects(&key_expr) { log::warn!( "Received Reply for `{}` from `{:?}, which didn't match query `{}`: dropping Reply.", key_expr, From f77e1b7aca2c657142986d19695491bf861f7b3d Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Sat, 13 Apr 2024 16:26:02 +0200 Subject: [PATCH 217/598] Fix conditional feature --- zenoh/src/queryable.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 36a2c51ba0..26be68ac8f 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -220,6 +220,7 @@ impl Query { } }) } + #[cfg(feature = "unstable")] fn _accepts_any_replies(&self) -> ZResult { Ok(self.parameters().accept_any_keyexpr()?.unwrap_or(false)) } @@ -407,9 +408,12 @@ impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderDelete> { impl Query { fn _reply_sample(&self, sample: Sample) -> ZResult<()> { - if !self._accepts_any_replies().unwrap_or(false) - && !self.key_expr().intersects(&sample.key_expr) - { + let c = zcondfeat!( + "unstable", + !self._accepts_any_replies().unwrap_or(false), + true + ); + if c && !self.key_expr().intersects(&sample.key_expr) { bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.key_expr()) } #[cfg(not(feature = "unstable"))] From 418ca2be575969a1e0a4b02da1ee1c0cb9cedef2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 14 Apr 2024 18:09:53 +0200 Subject: [PATCH 218/598] errno added --- zenoh/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c0840c8829..65048a2d37 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -121,6 +121,7 @@ pub mod core { pub use zenoh_result::Error; /// A zenoh result. pub use zenoh_result::ZResult as Result; + pub use zenoh_util::core::zresult::ErrNo; } /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate @@ -348,10 +349,10 @@ pub mod internal { pub use zenoh_macros::unstable; pub use zenoh_result::bail; pub use zenoh_sync::Condition; + pub use zenoh_task::TerminatableTask; pub use zenoh_util::core::ResolveFuture; pub use zenoh_util::LibLoader; pub use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; - pub use zenoh_task::TerminatableTask; } #[cfg(feature = "shared-memory")] From f321cda42672885176ace0e58ddb749066cf72da Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 15 Apr 2024 12:55:50 +0200 Subject: [PATCH 219/598] compilation fixes --- zenoh/src/api/builders/publication.rs | 5 +- zenoh/src/api/builders/sample.rs | 3 +- zenoh/src/api/query.rs | 1 + zenoh/src/api/queryable.rs | 1 + zenoh/src/api/sample.rs | 10 +- zenoh/src/api/scouting.rs | 3 +- zenoh/src/api/value.rs | 2 +- zenoh/src/lib.rs | 143 +------------------------- 8 files changed, 16 insertions(+), 152 deletions(-) diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index b6ebb0bad2..9a95317488 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -16,6 +16,8 @@ use std::future::Ready; use crate::api::builders::sample::SampleBuilderTrait; use crate::api::builders::sample::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; use crate::api::key_expr::KeyExpr; +#[cfg(feature = "unstable")] +use crate::api::payload::OptionPayload; use crate::api::publication::Priority; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; @@ -156,7 +158,8 @@ impl SampleBuilderTrait for PublicationBuilder { } } #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: TA) -> Self { + fn attachment>(self, attachment: TA) -> Self { + let attachment: OptionPayload = attachment.into(); Self { attachment: attachment.into(), ..self diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 736e982b1c..55a028f687 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -26,8 +26,7 @@ use crate::api::sample::Sample; use crate::api::sample::SampleKind; use crate::api::value::Value; #[cfg(feature = "unstable")] -use crate::{payload::OptionPayload, sample::SourceInfo}; -use std::marker::PhantomData; +use crate::{api::payload::OptionPayload, sample::SourceInfo}; use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index d2a6d822e4..4152b01283 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -33,6 +33,7 @@ use zenoh_result::ZResult; #[zenoh_macros::unstable] use super::{ builders::sample::SampleBuilderTrait, + payload::OptionPayload, sample::{Attachment, SourceInfo}, }; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 48e6955097..c8c344b074 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -40,6 +40,7 @@ use zenoh_result::ZResult; use { super::{ builders::sample::SampleBuilderTrait, + payload::OptionPayload, query::ReplyKeyExpr, sample::{Attachment, SourceInfo}, }, diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 26ff586c59..d9ffb363dd 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -13,12 +13,8 @@ // use super::{ - builders::sample::{QoSBuilderTrait, ValueBuilderTrait}, - encoding::Encoding, - key_expr::KeyExpr, - payload::Payload, - publication::Priority, - value::Value, + builders::sample::QoSBuilderTrait, encoding::Encoding, key_expr::KeyExpr, payload::Payload, + publication::Priority, value::Value, }; use std::{convert::TryFrom, fmt}; use zenoh_protocol::{ @@ -27,7 +23,7 @@ use zenoh_protocol::{ }; #[zenoh_macros::unstable] -pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; +pub use attachment::Attachment; #[zenoh_macros::unstable] use serde::Serialize; diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 058ab82058..5aa456ae29 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -373,6 +373,7 @@ where ScoutBuilder { what: what.into(), config: config.try_into().map_err(|e| e.into()), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } + diff --git a/zenoh/src/api/value.rs b/zenoh/src/api/value.rs index 06d923b7d0..9938015d78 100644 --- a/zenoh/src/api/value.rs +++ b/zenoh/src/api/value.rs @@ -13,7 +13,7 @@ // //! Value primitives. -use super::{builders::sample::ValueBuilderTrait, encoding::Encoding, payload::Payload}; +use super::{encoding::Encoding, payload::Payload}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index eb51b22dbd..2607a0d2db 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -207,13 +207,13 @@ pub mod sample { pub use crate::api::builders::sample::TimestampBuilderTrait; pub use crate::api::builders::sample::ValueBuilderTrait; #[zenoh_macros::unstable] + pub use crate::api::sample::Attachment; + #[zenoh_macros::unstable] pub use crate::api::sample::Locality; pub use crate::api::sample::Sample; pub use crate::api::sample::SampleKind; #[zenoh_macros::unstable] pub use crate::api::sample::SourceInfo; - #[zenoh_macros::unstable] - pub use crate::api::sample::{Attachment, AttachmentBuilder, AttachmentIterator}; } /// Value primitives @@ -290,7 +290,7 @@ pub mod handlers { pub use crate::api::handlers::locked; pub use crate::api::handlers::DefaultHandler; pub use crate::api::handlers::IntoHandler; - pub use crate::api::handlers::RingBuffer; + pub use crate::api::handlers::RingChannel; } /// Scouting primitives @@ -317,143 +317,6 @@ pub mod time { pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; } -/// Scouting primitives. -pub mod scouting; - -/// Scout for routers and/or peers. -/// -/// [`scout`] spawns a task that periodically sends scout messages and waits for [`Hello`](crate::scouting::Hello) replies. -/// -/// Drop the returned [`Scout`](crate::scouting::Scout) to stop the scouting task. -/// -/// # Arguments -/// -/// * `what` - The kind of zenoh process to scout for -/// * `config` - The configuration [`Config`] to use for scouting -/// -/// # Examples -/// ```no_run -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; -/// -/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) -/// .res() -/// .await -/// .unwrap(); -/// while let Ok(hello) = receiver.recv_async().await { -/// println!("{}", hello); -/// } -/// # } -/// ``` -pub fn scout, TryIntoConfig>( - what: I, - config: TryIntoConfig, -) -> ScoutBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: - Into, -{ - ScoutBuilder { - what: what.into(), - config: config.try_into().map_err(|e| e.into()), - handler: DefaultHandler::default(), - } -} - -/// Open a zenoh [`Session`]. -/// -/// # Arguments -/// -/// * `config` - The [`Config`] for the zenoh session -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// # } -/// ``` -/// -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use std::str::FromStr; -/// use zenoh::prelude::r#async::*; -/// -/// let mut config = config::peer(); -/// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); -/// config.connect.endpoints.extend("tcp/10.10.10.10:7447,tcp/11.11.11.11:7447".split(',').map(|s|s.parse().unwrap())); -/// -/// let session = zenoh::open(config).res().await.unwrap(); -/// # } -/// ``` -pub fn open(config: TryIntoConfig) -> OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - OpenBuilder { config } -} - -/// A builder returned by [`open`] used to open a zenoh [`Session`]. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// # } -/// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -pub struct OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - config: TryIntoConfig, -} - -impl Resolvable for OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - type To = ZResult; -} - -impl SyncResolve for OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - fn res_sync(self) -> ::To { - let config: crate::config::Config = self - .config - .try_into() - .map_err(|e| zerror!("Invalid Zenoh configuration {:?}", &e))?; - Session::new(config).res_sync() - } -} - -impl AsyncResolve for OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - /// Initialize a Session with an existing Runtime. /// This operation is used by the plugins to share the same Runtime as the router. #[doc(hidden)] From ecfc6c506b55fcde5cb91b22444cbb4a49c8d252 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 15 Apr 2024 12:57:25 +0200 Subject: [PATCH 220/598] compilation fixes --- zenoh/src/handlers/mod.rs | 2 +- zenoh/src/handlers/ring.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/handlers/mod.rs b/zenoh/src/handlers/mod.rs index 627c166795..289af7f1cc 100644 --- a/zenoh/src/handlers/mod.rs +++ b/zenoh/src/handlers/mod.rs @@ -21,7 +21,7 @@ pub use callback::*; pub use fifo::*; pub use ring::*; -use crate::API_DATA_RECEPTION_CHANNEL_SIZE; +use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; /// An alias for `Arc`. pub type Dyn = std::sync::Arc; diff --git a/zenoh/src/handlers/ring.rs b/zenoh/src/handlers/ring.rs index 341a3efadd..36e4fb53a0 100644 --- a/zenoh/src/handlers/ring.rs +++ b/zenoh/src/handlers/ring.rs @@ -13,7 +13,7 @@ // //! Callback handler trait. -use crate::API_DATA_RECEPTION_CHANNEL_SIZE; +use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; use super::{callback::Callback, Dyn, IntoHandler}; use std::sync::{Arc, Weak}; From c36a0fa1279b8b94190ae726132d3cdfa3a78adb Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 15 Apr 2024 12:58:31 +0200 Subject: [PATCH 221/598] compilation fixes --- zenoh/src/{ => api}/handlers/callback.rs | 0 zenoh/src/{ => api}/handlers/fifo.rs | 0 zenoh/src/{ => api}/handlers/mod.rs | 0 zenoh/src/{ => api}/handlers/ring.rs | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{ => api}/handlers/callback.rs (100%) rename zenoh/src/{ => api}/handlers/fifo.rs (100%) rename zenoh/src/{ => api}/handlers/mod.rs (100%) rename zenoh/src/{ => api}/handlers/ring.rs (100%) diff --git a/zenoh/src/handlers/callback.rs b/zenoh/src/api/handlers/callback.rs similarity index 100% rename from zenoh/src/handlers/callback.rs rename to zenoh/src/api/handlers/callback.rs diff --git a/zenoh/src/handlers/fifo.rs b/zenoh/src/api/handlers/fifo.rs similarity index 100% rename from zenoh/src/handlers/fifo.rs rename to zenoh/src/api/handlers/fifo.rs diff --git a/zenoh/src/handlers/mod.rs b/zenoh/src/api/handlers/mod.rs similarity index 100% rename from zenoh/src/handlers/mod.rs rename to zenoh/src/api/handlers/mod.rs diff --git a/zenoh/src/handlers/ring.rs b/zenoh/src/api/handlers/ring.rs similarity index 100% rename from zenoh/src/handlers/ring.rs rename to zenoh/src/api/handlers/ring.rs From ae4c4934f4df9b2e305153594d3f6ee5ad0437ac Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 14:08:52 +0200 Subject: [PATCH 222/598] Selector time_range API is unstable --- zenoh/src/selector.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 51ee72f98a..1598975e1c 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -96,11 +96,13 @@ impl<'a> Selector<'a> { (self.key_expr, self.parameters) } + #[zenoh_macros::unstable] /// Sets the time range targeted by the selector. pub fn set_time_range>>(&mut self, time_range: T) { self.parameters_mut().set_time_range(time_range); } + #[zenoh_macros::unstable] /// Sets the `parameters` part of this `Selector`. #[inline(always)] pub fn with_parameters(mut self, parameters: &'a str) -> Self { @@ -108,6 +110,7 @@ impl<'a> Selector<'a> { self } + #[zenoh_macros::unstable] /// Extracts the standardized `_time` argument from the selector parameters. /// /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. From f1eb8a5480b5bc0cfeb8224a75e65e779c234df1 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 14:10:34 +0200 Subject: [PATCH 223/598] Selector time_range API is unstable --- zenoh/src/selector.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 1598975e1c..1fc91fbc6a 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -190,6 +190,7 @@ impl From> for HashMap { } impl Parameters<'_> { + #[zenoh_macros::unstable] /// Sets the time range targeted by the selector. pub fn set_time_range>>(&mut self, time_range: T) { let mut time_range: Option = time_range.into(); @@ -199,6 +200,7 @@ impl Parameters<'_> { }; } + #[zenoh_macros::unstable] /// Extracts the standardized `_time` argument from the selector parameters. /// /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. From 2992ea13e28b1c97c53bc90b1799c0f48720d020 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 14:55:06 +0200 Subject: [PATCH 224/598] Improve Properties docs --- commons/zenoh-collections/src/properties.rs | 40 +++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index 800d2ed9dc..6ce36b9c53 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -20,19 +20,50 @@ use std::collections::HashMap; /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties /// and `=` as separator between a key and its value. Keys and values are trimed. +/// +/// Example: +/// ``` +/// use zenoh_collections::Properties; +/// +/// let a = "a=1;b=2;c=3|4|5;d=6"; +/// let p = Properties::from(a); +/// +/// // Retrieve values +/// assert!(!p.is_empty()); +/// assert_eq!(p.get("a").unwrap(), "1"); +/// assert_eq!(p.get("b").unwrap(), "2"); +/// assert_eq!(p.get("c").unwrap(), "3|4|5"); +/// assert_eq!(p.get("d").unwrap(), "6"); +/// assert_eq!(p.values("c").collect::>(), vec!["3", "4", "5"]); +/// +/// // Iterate over properties +/// let mut iter = p.iter(); +/// assert_eq!(iter.next().unwrap(), ("a", "1")); +/// assert_eq!(iter.next().unwrap(), ("b", "2")); +/// assert_eq!(iter.next().unwrap(), ("c", "3|4|5")); +/// assert_eq!(iter.next().unwrap(), ("d", "6")); +/// assert!(iter.next().is_none()); +/// +/// // Create properties from iterators +/// let pi = Properties::from_iter(vec![("a", "1"), ("b", "2"), ("c", "3|4|5"), ("d", "6")]); +/// assert_eq!(p, pi); +/// ``` #[non_exhaustive] #[derive(Clone, PartialEq, Eq, Default)] pub struct Properties<'s>(Cow<'s, str>); impl Properties<'_> { + /// Returns `true` if properties does not contain anything. pub fn is_empty(&self) -> bool { self.0.is_empty() } + /// Returns properties as [`str`]. pub fn as_str(&self) -> &str { &self.0 } + /// Returns `true` if properties contains the specified key. pub fn contains_key(&self, k: K) -> bool where K: Borrow, @@ -40,6 +71,7 @@ impl Properties<'_> { self.get(k).is_some() } + /// Returns a reference to the value corresponding to the key. pub fn get(&self, k: K) -> Option<&str> where K: Borrow, @@ -47,6 +79,7 @@ impl Properties<'_> { Parameters::get(self.as_str(), k.borrow()) } + /// Returns an iterator to the values corresponding to the key. pub fn values(&self, k: K) -> impl DoubleEndedIterator where K: Borrow, @@ -54,10 +87,14 @@ impl Properties<'_> { Parameters::values(self.as_str(), k.borrow()) } + /// Returns an iterator on the key-value pairs as `(&str, &str)`. pub fn iter(&self) -> impl DoubleEndedIterator + Clone { Parameters::iter(self.as_str()) } + /// Inserts a key-value pair into the map. + /// If the map did not have this key present, [`None`]` is returned. + /// If the map did have this key present, the value is updated, and the old value is returned. pub fn insert(&mut self, k: K, v: V) -> Option where K: Borrow, @@ -69,6 +106,7 @@ impl Properties<'_> { removed } + /// Removes a key from the map, returning the value at the key if the key was previously in the properties. pub fn remove(&mut self, k: K) -> Option where K: Borrow, @@ -79,6 +117,7 @@ impl Properties<'_> { removed } + /// Join an iterator of key-value pairs `(&str, &str)` into properties. pub fn join<'s, I, K, V>(&mut self, iter: I) where I: Iterator + Clone, @@ -91,6 +130,7 @@ impl Properties<'_> { )); } + /// Convert these properties into owned properties. pub fn into_owned(self) -> Properties<'static> { Properties(Cow::Owned(self.0.into_owned())) } From c87a17285f69f067d2b622facb0e784f13455897 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 16:01:38 +0200 Subject: [PATCH 225/598] Move parameters and properties in zenoh_protocol::core --- Cargo.lock | 2 +- commons/zenoh-collections/src/lib.rs | 8 -------- commons/zenoh-protocol/src/core/endpoint.rs | 6 ++++-- commons/zenoh-protocol/src/core/mod.rs | 6 ++++++ .../src => zenoh-protocol/src/core}/parameters.rs | 0 .../src => zenoh-protocol/src/core}/properties.rs | 2 +- commons/zenoh-runtime/Cargo.toml | 2 +- commons/zenoh-runtime/src/lib.rs | 2 +- io/zenoh-links/zenoh-link-quic/src/lib.rs | 3 +-- io/zenoh-links/zenoh-link-tls/src/lib.rs | 3 +-- io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs | 3 +-- io/zenoh-transport/src/multicast/manager.rs | 7 ++++--- io/zenoh-transport/src/unicast/manager.rs | 3 +-- zenoh/src/payload.rs | 4 ++-- zenoh/src/selector.rs | 6 ++++-- 15 files changed, 28 insertions(+), 29 deletions(-) rename commons/{zenoh-collections/src => zenoh-protocol/src/core}/parameters.rs (100%) rename commons/{zenoh-collections/src => zenoh-protocol/src/core}/properties.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index 07e166c57c..445fe76075 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5030,7 +5030,7 @@ dependencies = [ "futures", "lazy_static", "tokio", - "zenoh-collections", + "zenoh-protocol", "zenoh-result", ] diff --git a/commons/zenoh-collections/src/lib.rs b/commons/zenoh-collections/src/lib.rs index 6690d372da..6549594de2 100644 --- a/commons/zenoh-collections/src/lib.rs +++ b/commons/zenoh-collections/src/lib.rs @@ -20,9 +20,6 @@ #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; -pub mod parameters; -pub use parameters::*; - pub mod single_or_vec; pub use single_or_vec::*; @@ -35,8 +32,3 @@ pub use ring_buffer::*; pub mod stack_buffer; #[cfg(feature = "std")] pub use stack_buffer::*; - -#[cfg(feature = "std")] -pub mod properties; -#[cfg(feature = "std")] -pub use properties::*; diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index debe7da7b5..3397147369 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -11,10 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::locator::*; +use super::{ + locator::*, + parameters::{Parameters, SortedParameters}, +}; use alloc::{borrow::ToOwned, format, string::String}; use core::{borrow::Borrow, convert::TryFrom, fmt, str::FromStr}; -use zenoh_collections::{Parameters, SortedParameters}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; // Parsing chars diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 20fcf85dd9..0920d55d01 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -53,6 +53,12 @@ pub use endpoint::*; pub mod resolution; pub use resolution::*; +pub mod parameters; +pub use parameters::*; + +pub mod properties; +pub use properties::*; + /// The global unique id of a zenoh peer. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] diff --git a/commons/zenoh-collections/src/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs similarity index 100% rename from commons/zenoh-collections/src/parameters.rs rename to commons/zenoh-protocol/src/core/parameters.rs diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-protocol/src/core/properties.rs similarity index 99% rename from commons/zenoh-collections/src/properties.rs rename to commons/zenoh-protocol/src/core/properties.rs index 6ce36b9c53..d2f0506d46 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; +use super::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; use alloc::borrow::Cow; use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] diff --git a/commons/zenoh-runtime/Cargo.toml b/commons/zenoh-runtime/Cargo.toml index e5bd64b8c5..350bed9c26 100644 --- a/commons/zenoh-runtime/Cargo.toml +++ b/commons/zenoh-runtime/Cargo.toml @@ -16,5 +16,5 @@ description = { workspace = true } futures = { workspace = true } lazy_static = { workspace = true } zenoh-result = { workspace = true, features = ["std"] } -zenoh-collections = { workspace = true, features = ["std"] } +zenoh-protocol = { workspace = true } tokio = { workspace = true, features = ["fs", "io-util", "macros", "net", "rt-multi-thread", "sync", "time"] } diff --git a/commons/zenoh-runtime/src/lib.rs b/commons/zenoh-runtime/src/lib.rs index 492e0a6665..0baefd73ef 100644 --- a/commons/zenoh-runtime/src/lib.rs +++ b/commons/zenoh-runtime/src/lib.rs @@ -25,7 +25,7 @@ use std::{ time::Duration, }; use tokio::runtime::{Handle, Runtime, RuntimeFlavor}; -use zenoh_collections::Properties; +use zenoh_protocol::core::Properties; use zenoh_result::ZResult as Result; const ZENOH_RUNTIME_THREADS_ENV: &str = "ZENOH_RUNTIME_THREADS"; diff --git a/io/zenoh-links/zenoh-link-quic/src/lib.rs b/io/zenoh-links/zenoh-link-quic/src/lib.rs index 7f5e2a1587..0c4ae4937b 100644 --- a/io/zenoh-links/zenoh-link-quic/src/lib.rs +++ b/io/zenoh-links/zenoh-link-quic/src/lib.rs @@ -25,12 +25,11 @@ use config::{ }; use secrecy::ExposeSecret; use std::net::SocketAddr; -use zenoh_collections::Parameters; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; use zenoh_protocol::{ - core::{endpoint::Address, Locator}, + core::{endpoint::Address, Locator, Parameters}, transport::BatchSize, }; use zenoh_result::{bail, zerror, ZResult}; diff --git a/io/zenoh-links/zenoh-link-tls/src/lib.rs b/io/zenoh-links/zenoh-link-tls/src/lib.rs index dae8227cad..f6a7968326 100644 --- a/io/zenoh-links/zenoh-link-tls/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tls/src/lib.rs @@ -27,12 +27,11 @@ use config::{ use rustls_pki_types::ServerName; use secrecy::ExposeSecret; use std::{convert::TryFrom, net::SocketAddr}; -use zenoh_collections::Parameters; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; use zenoh_protocol::{ - core::{endpoint::Address, Locator}, + core::{endpoint::Address, Locator, Parameters}, transport::BatchSize, }; use zenoh_result::{bail, zerror, ZResult}; diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs index 70d3d4dddc..61c891da33 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs @@ -21,11 +21,10 @@ pub mod unicast; use async_trait::async_trait; pub use unicast::*; -use zenoh_collections::Parameters; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::core::Locator; +use zenoh_protocol::core::{Locator, Parameters}; use zenoh_result::ZResult; pub const UNIXPIPE_LOCATOR_PREFIX: &str = "unixpipe"; diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index a6f682edc9..421664e954 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -19,7 +19,6 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use tokio::sync::Mutex; -use zenoh_collections::Parameters; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionMulticastConf; #[cfg(feature = "shared-memory")] @@ -27,8 +26,10 @@ use zenoh_config::SharedMemoryConf; use zenoh_config::{Config, LinkTxConf}; use zenoh_core::zasynclock; use zenoh_link::*; -use zenoh_protocol::core::ZenohId; -use zenoh_protocol::transport::close; +use zenoh_protocol::{ + core::{Parameters, ZenohId}, + transport::close, +}; use zenoh_result::{bail, zerror, ZResult}; pub struct TransportManagerConfigMulticast { diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 0fdce265f9..ab31376788 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -36,7 +36,6 @@ use std::{ time::Duration, }; use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; -use zenoh_collections::Parameters; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionUnicastConf; #[cfg(feature = "shared-memory")] @@ -46,7 +45,7 @@ use zenoh_core::{zasynclock, zcondfeat}; use zenoh_crypto::PseudoRng; use zenoh_link::*; use zenoh_protocol::{ - core::ZenohId, + core::{Parameters, ZenohId}, transport::{close, TransportSn}, }; use zenoh_result::{bail, zerror, ZResult}; diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 3f9fed1e90..5280c7af3c 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -28,7 +28,7 @@ use zenoh_buffers::{ ZBufReader, ZSlice, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_collections::Properties; +use zenoh_protocol::core::Properties; use zenoh_result::{ZError, ZResult}; #[cfg(feature = "shared-memory")] use zenoh_shm::SharedMemoryBuf; @@ -1371,7 +1371,7 @@ mod tests { use rand::Rng; use std::borrow::Cow; use zenoh_buffers::{ZBuf, ZSlice}; - use zenoh_collections::Properties; + use zenoh_protocol::core::Properties; const NUM: usize = 1_000; diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 1fc91fbc6a..893bfdb8a3 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -21,8 +21,10 @@ use std::{ ops::{Deref, DerefMut}, str::FromStr, }; -use zenoh_collections::Properties; -use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; +use zenoh_protocol::core::{ + key_expr::{keyexpr, OwnedKeyExpr}, + Properties, +}; use zenoh_result::ZResult; use zenoh_util::time_range::TimeRange; From 786d4199dde1eb77ea5863c6e5605b26f7e06149 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 15 Apr 2024 16:31:30 +0200 Subject: [PATCH 226/598] Interest network message (#915) * Declare message can be Push/Request/RequestContinuous/Response * Address review comments * Remove F: Future flag from DeclareInterest * cargo fmt --all * Remove unused Interest flags field * Update doc * Remove unneeded interest_id field * Update commons/zenoh-protocol/src/network/declare.rs * Remove unused UndeclareInterest * Add new Interest network message * Update doc * Update codec * Minor rework on interest message * Fix range in declare rand * Fix codec tests * Merge protocol_changes --------- Co-authored-by: Luca Cominardi --- commons/zenoh-codec/src/network/declare.rs | 129 +----- commons/zenoh-codec/src/network/interest.rs | 186 +++++++++ commons/zenoh-codec/src/network/mod.rs | 3 + commons/zenoh-protocol/src/network/declare.rs | 379 +---------------- .../zenoh-protocol/src/network/interest.rs | 383 ++++++++++++++++++ commons/zenoh-protocol/src/network/mod.rs | 13 +- io/zenoh-transport/src/shm.rs | 10 +- zenoh/src/key_expr.rs | 4 +- zenoh/src/net/primitives/demux.rs | 1 + zenoh/src/net/primitives/mod.rs | 8 +- zenoh/src/net/primitives/mux.rs | 55 ++- zenoh/src/net/routing/dispatcher/face.rs | 9 +- zenoh/src/net/routing/dispatcher/resource.rs | 4 +- zenoh/src/net/routing/hat/client/pubsub.rs | 10 +- zenoh/src/net/routing/hat/client/queries.rs | 8 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 14 +- .../net/routing/hat/linkstate_peer/queries.rs | 14 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 10 +- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 8 +- zenoh/src/net/routing/hat/router/pubsub.rs | 22 +- zenoh/src/net/routing/hat/router/queries.rs | 22 +- zenoh/src/net/routing/mod.rs | 2 +- zenoh/src/net/runtime/adminspace.rs | 13 +- zenoh/src/net/tests/tables.rs | 14 +- zenoh/src/session.rs | 20 +- 25 files changed, 771 insertions(+), 570 deletions(-) create mode 100644 commons/zenoh-codec/src/network/interest.rs create mode 100644 commons/zenoh-protocol/src/network/interest.rs diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index 6e9dad12ce..ed3d019950 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -19,17 +19,10 @@ use zenoh_buffers::{ ZBuf, }; use zenoh_protocol::{ - common::{ - iext, - imsg::{self, HEADER_BITS}, - ZExtZ64, - }, + common::{iext, imsg, ZExtZ64}, core::{ExprId, ExprLen, WireExpr}, network::{ - declare::{ - self, common, interest, keyexpr, queryable, subscriber, token, Declare, DeclareBody, - DeclareMode, Interest, - }, + declare::{self, common, keyexpr, queryable, subscriber, token, Declare, DeclareBody}, id, Mapping, }, }; @@ -51,7 +44,6 @@ where DeclareBody::UndeclareQueryable(r) => self.write(&mut *writer, r)?, DeclareBody::DeclareToken(r) => self.write(&mut *writer, r)?, DeclareBody::UndeclareToken(r) => self.write(&mut *writer, r)?, - DeclareBody::DeclareInterest(r) => self.write(&mut *writer, r)?, DeclareBody::DeclareFinal(r) => self.write(&mut *writer, r)?, } @@ -79,7 +71,6 @@ where U_QUERYABLE => DeclareBody::UndeclareQueryable(codec.read(&mut *reader)?), D_TOKEN => DeclareBody::DeclareToken(codec.read(&mut *reader)?), U_TOKEN => DeclareBody::UndeclareToken(codec.read(&mut *reader)?), - D_INTEREST => DeclareBody::DeclareInterest(codec.read(&mut *reader)?), D_FINAL => DeclareBody::DeclareFinal(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; @@ -97,7 +88,7 @@ where fn write(self, writer: &mut W, x: &Declare) -> Self::Output { let Declare { - mode, + interest_id, ext_qos, ext_tstamp, ext_nodeid, @@ -106,13 +97,9 @@ where // Header let mut header = id::DECLARE; - header |= match mode { - DeclareMode::Push => 0b00, - DeclareMode::Response(_) => 0b01, - DeclareMode::Request(_) => 0b10, - DeclareMode::RequestContinuous(_) => 0b11, - } << HEADER_BITS; - + if x.interest_id.is_some() { + header |= declare::flag::I; + } let mut n_exts = ((ext_qos != &declare::ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) + ((ext_nodeid != &declare::ext::NodeIdType::DEFAULT) as u8); @@ -121,12 +108,8 @@ where } self.write(&mut *writer, header)?; - // Body - if let DeclareMode::Request(rid) - | DeclareMode::RequestContinuous(rid) - | DeclareMode::Response(rid) = mode - { - self.write(&mut *writer, rid)?; + if let Some(interest_id) = interest_id { + self.write(&mut *writer, interest_id)?; } // Extensions @@ -175,14 +158,10 @@ where return Err(DidntRead); } - // Body - let mode = match (self.header >> HEADER_BITS) & 0b11 { - 0b00 => DeclareMode::Push, - 0b01 => DeclareMode::Response(self.codec.read(&mut *reader)?), - 0b10 => DeclareMode::Request(self.codec.read(&mut *reader)?), - 0b11 => DeclareMode::RequestContinuous(self.codec.read(&mut *reader)?), - _ => return Err(DidntRead), - }; + let mut interest_id = None; + if imsg::has_flag(self.header, declare::flag::I) { + interest_id = Some(self.codec.read(&mut *reader)?); + } // Extensions let mut ext_qos = declare::ext::QoSType::DEFAULT; @@ -219,7 +198,7 @@ where let body: DeclareBody = self.codec.read(&mut *reader)?; Ok(Declare { - mode, + interest_id, ext_qos, ext_tstamp, ext_nodeid, @@ -938,7 +917,7 @@ where // Extensions let mut ext_wire_expr = common::ext::WireExprType::null(); - let mut has_ext = imsg::has_flag(self.header, interest::flag::Z); + let mut has_ext = imsg::has_flag(self.header, token::flag::Z); while has_ext { let ext: u8 = self.codec.read(&mut *reader)?; let eodec = Zenoh080Header::new(ext); @@ -958,86 +937,6 @@ where } } -// DeclareInterest -impl WCodec<&interest::DeclareInterest, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &interest::DeclareInterest) -> Self::Output { - let interest::DeclareInterest { - interest: _, - wire_expr, - } = x; - - // Header - let header = declare::id::D_INTEREST; - self.write(&mut *writer, header)?; - - // Body - self.write(&mut *writer, x.options())?; - if let Some(we) = wire_expr.as_ref() { - self.write(&mut *writer, we)?; - } - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != declare::id::D_INTEREST { - return Err(DidntRead); - } - - // Body - let options: u8 = self.codec.read(&mut *reader)?; - let interest = Interest::from(options); - - let mut wire_expr = None; - if interest.restricted() { - let ccond = Zenoh080Condition::new(interest.named()); - let mut we: WireExpr<'static> = ccond.read(&mut *reader)?; - we.mapping = if interest.mapping() { - Mapping::Sender - } else { - Mapping::Receiver - }; - wire_expr = Some(we); - } - - // Extensions - let has_ext = imsg::has_flag(self.header, token::flag::Z); - if has_ext { - extension::skip_all(reader, "DeclareInterest")?; - } - - Ok(interest::DeclareInterest { - interest, - wire_expr, - }) - } -} - // WARNING: this is a temporary extension used for undeclarations impl WCodec<(&common::ext::WireExprType, bool), &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/network/interest.rs b/commons/zenoh-codec/src/network/interest.rs new file mode 100644 index 0000000000..9d1e64de76 --- /dev/null +++ b/commons/zenoh-codec/src/network/interest.rs @@ -0,0 +1,186 @@ +// +// Copyright (c) 2022 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; +use zenoh_buffers::{ + reader::{DidntRead, Reader}, + writer::{DidntWrite, Writer}, +}; +use zenoh_protocol::{ + common::{ + iext, + imsg::{self, HEADER_BITS}, + }, + core::WireExpr, + network::{ + declare, id, + interest::{self, Interest, InterestMode, InterestOptions}, + Mapping, + }, +}; + +// Interest +impl WCodec<&Interest, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &Interest) -> Self::Output { + let Interest { + id, + mode, + options: _, // Compute the options on-the-fly according to Interest fields + wire_expr, + ext_qos, + ext_tstamp, + ext_nodeid, + } = x; + + // Header + let mut header = id::INTEREST; + header |= match mode { + InterestMode::Final => 0b00, + InterestMode::Current => 0b01, + InterestMode::Future => 0b10, + InterestMode::CurrentFuture => 0b11, + } << HEADER_BITS; + let mut n_exts = ((ext_qos != &declare::ext::QoSType::DEFAULT) as u8) + + (ext_tstamp.is_some() as u8) + + ((ext_nodeid != &declare::ext::NodeIdType::DEFAULT) as u8); + if n_exts != 0 { + header |= declare::flag::Z; + } + self.write(&mut *writer, header)?; + + self.write(&mut *writer, id)?; + + if *mode != InterestMode::Final { + self.write(&mut *writer, x.options())?; + if let Some(we) = wire_expr.as_ref() { + self.write(&mut *writer, we)?; + } + } + + // Extensions + if ext_qos != &declare::ext::QoSType::DEFAULT { + n_exts -= 1; + self.write(&mut *writer, (*ext_qos, n_exts != 0))?; + } + if let Some(ts) = ext_tstamp.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (ts, n_exts != 0))?; + } + if ext_nodeid != &declare::ext::NodeIdType::DEFAULT { + n_exts -= 1; + self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; + } + + Ok(()) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let header: u8 = self.read(&mut *reader)?; + let codec = Zenoh080Header::new(header); + + codec.read(reader) + } +} + +impl RCodec for Zenoh080Header +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + if imsg::mid(self.header) != id::INTEREST { + return Err(DidntRead); + } + + let id = self.codec.read(&mut *reader)?; + let mode = match (self.header >> HEADER_BITS) & 0b11 { + 0b00 => InterestMode::Final, + 0b01 => InterestMode::Current, + 0b10 => InterestMode::Future, + 0b11 => InterestMode::CurrentFuture, + _ => return Err(DidntRead), + }; + + let mut options = InterestOptions::empty(); + let mut wire_expr = None; + if mode != InterestMode::Final { + let options_byte: u8 = self.codec.read(&mut *reader)?; + options = InterestOptions::from(options_byte); + if options.restricted() { + let ccond = Zenoh080Condition::new(options.named()); + let mut we: WireExpr<'static> = ccond.read(&mut *reader)?; + we.mapping = if options.mapping() { + Mapping::Sender + } else { + Mapping::Receiver + }; + wire_expr = Some(we); + } + } + + // Extensions + let mut ext_qos = declare::ext::QoSType::DEFAULT; + let mut ext_tstamp = None; + let mut ext_nodeid = declare::ext::NodeIdType::DEFAULT; + + let mut has_ext = imsg::has_flag(self.header, declare::flag::Z); + while has_ext { + let ext: u8 = self.codec.read(&mut *reader)?; + let eodec = Zenoh080Header::new(ext); + match iext::eid(ext) { + declare::ext::QoS::ID => { + let (q, ext): (interest::ext::QoSType, bool) = eodec.read(&mut *reader)?; + ext_qos = q; + has_ext = ext; + } + declare::ext::Timestamp::ID => { + let (t, ext): (interest::ext::TimestampType, bool) = + eodec.read(&mut *reader)?; + ext_tstamp = Some(t); + has_ext = ext; + } + declare::ext::NodeId::ID => { + let (nid, ext): (interest::ext::NodeIdType, bool) = eodec.read(&mut *reader)?; + ext_nodeid = nid; + has_ext = ext; + } + _ => { + has_ext = extension::skip(reader, "Declare", ext)?; + } + } + } + + Ok(Interest { + id, + mode, + options, + wire_expr, + ext_qos, + ext_tstamp, + ext_nodeid, + }) + } +} diff --git a/commons/zenoh-codec/src/network/mod.rs b/commons/zenoh-codec/src/network/mod.rs index 3a227cd42a..5ebdb17b8e 100644 --- a/commons/zenoh-codec/src/network/mod.rs +++ b/commons/zenoh-codec/src/network/mod.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // mod declare; +mod interest; mod oam; mod push; mod request; @@ -45,6 +46,7 @@ where NetworkBody::Request(b) => self.write(&mut *writer, b), NetworkBody::Response(b) => self.write(&mut *writer, b), NetworkBody::ResponseFinal(b) => self.write(&mut *writer, b), + NetworkBody::Interest(b) => self.write(&mut *writer, b), NetworkBody::Declare(b) => self.write(&mut *writer, b), NetworkBody::OAM(b) => self.write(&mut *writer, b), } @@ -89,6 +91,7 @@ where id::REQUEST => NetworkBody::Request(self.read(&mut *reader)?), id::RESPONSE => NetworkBody::Response(self.read(&mut *reader)?), id::RESPONSE_FINAL => NetworkBody::ResponseFinal(self.read(&mut *reader)?), + id::INTEREST => NetworkBody::Interest(self.read(&mut *reader)?), id::DECLARE => NetworkBody::Declare(self.read(&mut *reader)?), id::OAM => NetworkBody::OAM(self.read(&mut *reader)?), _ => return Err(DidntRead), diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 31e8adcc6e..9a41f42e56 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -19,8 +19,6 @@ use crate::{ }; use alloc::borrow::Cow; pub use common::*; -use core::sync::atomic::AtomicU32; -pub use interest::*; pub use keyexpr::*; pub use queryable::*; pub use subscriber::*; @@ -33,59 +31,24 @@ pub mod flag { } /// Flags: -/// - |: Mode The mode of the the declaration* -/// -/ +/// - I: Interest If I==1 then interest_id is present +/// - X: Reserved /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|Mod| DECLARE | +/// |Z|X|I| DECLARE | /// +-+-+-+---------+ -/// ~ rid:z32 ~ if Mode != Push +/// ~interest_id:z32~ if I==1 /// +---------------+ /// ~ [decl_exts] ~ if Z==1 /// +---------------+ /// ~ declaration ~ /// +---------------+ /// -/// *Mode of declaration: -/// - Mode 0b00: Push -/// - Mode 0b01: Response -/// - Mode 0b10: Request -/// - Mode 0b11: RequestContinuous - -/// The resolution of a RequestId -pub type DeclareRequestId = u32; -pub type AtomicDeclareRequestId = AtomicU32; - -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum DeclareMode { - Push, - Request(DeclareRequestId), - RequestContinuous(DeclareRequestId), - Response(DeclareRequestId), -} - -impl DeclareMode { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - - let mut rng = rand::thread_rng(); - - match rng.gen_range(0..4) { - 0 => DeclareMode::Push, - 1 => DeclareMode::Request(rng.gen()), - 2 => DeclareMode::RequestContinuous(rng.gen()), - 3 => DeclareMode::Response(rng.gen()), - _ => unreachable!(), - } - } -} - #[derive(Debug, Clone, PartialEq, Eq)] pub struct Declare { - pub mode: DeclareMode, + pub interest_id: Option, pub ext_qos: ext::QoSType, pub ext_tstamp: Option, pub ext_nodeid: ext::NodeIdType, @@ -121,8 +84,6 @@ pub mod id { pub const D_TOKEN: u8 = 0x06; pub const U_TOKEN: u8 = 0x07; - pub const D_INTEREST: u8 = 0x08; - pub const D_FINAL: u8 = 0x1A; } @@ -136,7 +97,6 @@ pub enum DeclareBody { UndeclareQueryable(UndeclareQueryable), DeclareToken(DeclareToken), UndeclareToken(UndeclareToken), - DeclareInterest(DeclareInterest), DeclareFinal(DeclareFinal), } @@ -147,7 +107,7 @@ impl DeclareBody { let mut rng = rand::thread_rng(); - match rng.gen_range(0..10) { + match rng.gen_range(0..9) { 0 => DeclareBody::DeclareKeyExpr(DeclareKeyExpr::rand()), 1 => DeclareBody::UndeclareKeyExpr(UndeclareKeyExpr::rand()), 2 => DeclareBody::DeclareSubscriber(DeclareSubscriber::rand()), @@ -156,8 +116,7 @@ impl DeclareBody { 5 => DeclareBody::UndeclareQueryable(UndeclareQueryable::rand()), 6 => DeclareBody::DeclareToken(DeclareToken::rand()), 7 => DeclareBody::UndeclareToken(UndeclareToken::rand()), - 8 => DeclareBody::DeclareInterest(DeclareInterest::rand()), - 9 => DeclareBody::DeclareFinal(DeclareFinal::rand()), + 8 => DeclareBody::DeclareFinal(DeclareFinal::rand()), _ => unreachable!(), } } @@ -170,14 +129,16 @@ impl Declare { let mut rng = rand::thread_rng(); - let mode = DeclareMode::rand(); + let interest_id = rng + .gen_bool(0.5) + .then_some(rng.gen::()); let ext_qos = ext::QoSType::rand(); let ext_tstamp = rng.gen_bool(0.5).then(ext::TimestampType::rand); let ext_nodeid = ext::NodeIdType::rand(); let body = DeclareBody::rand(); Self { - mode, + interest_id, ext_qos, ext_tstamp, ext_nodeid, @@ -197,7 +158,7 @@ pub mod common { /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ - /// |Z|x|x| D_FINAL | + /// |Z|X|X| D_FINAL | /// +---------------+ /// ~ [final_exts] ~ if Z==1 /// +---------------+ @@ -714,319 +675,3 @@ pub mod token { } } } - -pub mod interest { - use core::{ - fmt::{self, Debug}, - ops::{Add, AddAssign, Sub, SubAssign}, - }; - - use super::*; - - pub type InterestId = u32; - - pub mod flag { - // pub const X: u8 = 1 << 5; // 0x20 Reserved - // pub const X: u8 = 1 << 6; // 0x40 Reserved - pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow - } - - /// # DeclareInterest message - /// - /// The DECLARE INTEREST message is sent to request the transmission of current and optionally future - /// declarations of a given kind matching a target keyexpr. E.g., a declare interest could be - /// sent to request the transmisison of all current subscriptions matching `a/*`. - /// - /// The behaviour of a DECLARE INTEREST depends on the DECLARE MODE in the DECLARE MESSAGE: - /// - Push: invalid - /// - Request: only current declarations - /// - RequestContinous: current and future declarations - /// - Response: invalid - /// - /// E.g., the [`DeclareInterest`] message flow is the following for a Request: - /// - /// ```text - /// A B - /// | DECL INTEREST | - /// |------------------>| -- Sent in Declare::Request. - /// | | This is a DeclareInterest e.g. for subscriber declarations. - /// | | - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Response - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Response - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Response - /// | | - /// | FINAL | - /// |<------------------| -- Sent in Declare::Response - /// ``` - /// - /// - /// And the [`DeclareInterest`] message flow is the following for a RequestContinuous: - /// - /// ```text - /// A B - /// | DECL INTEREST | - /// |------------------>| -- Sent in Declare::RequestContinuous. - /// | | This is a DeclareInterest e.g. for subscriber declarations/undeclarations. - /// | | - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Push - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Push - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Push - /// | | - /// | FINAL | - /// |<------------------| -- Sent in Declare::Response - /// | | - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Push. This is a new subscriber declaration. - /// | UNDECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Push. This is a new subscriber undeclaration. - /// | | - /// | ... | - /// | | - /// | FINAL | - /// |------------------>| -- Sent in Declare::RequestContinuous. - /// | | This stops the transmission of subscriber declarations/undeclarations. - /// | | - /// ``` - /// - /// The DECLARE INTEREST message structure is defined as follows: - /// - /// ```text - /// Flags: - /// - X: Reserved - /// - X: Reserved - /// - Z: Extension If Z==1 then at least one extension is present - /// - /// 7 6 5 4 3 2 1 0 - /// +-+-+-+-+-+-+-+-+ - /// |Z|X|X| D_INT | - /// +---------------+ - /// |A|M|N|R|T|Q|S|K| (*) - /// +---------------+ - /// ~ key_scope:z16 ~ if R==1 - /// +---------------+ - /// ~ key_suffix ~ if R==1 && N==1 -- - /// +---------------+ - /// ~ [decl_exts] ~ if Z==1 - /// +---------------+ - /// - /// (*) - if K==1 then the interest refers to key expressions - /// - if S==1 then the interest refers to subscribers - /// - if Q==1 then the interest refers to queryables - /// - if T==1 then the interest refers to tokens - /// - if R==1 then the interest is restricted to the matching key expression, else it is for all key expressions. - /// - if N==1 then the key expr has name/suffix. If R==0 then N should be set to 0. - /// - if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver. - /// If R==0 then M should be set to 0. - /// - if A==1 then the replies SHOULD be aggregated - /// ``` - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct DeclareInterest { - pub interest: Interest, - pub wire_expr: Option>, - } - - impl DeclareInterest { - pub fn options(&self) -> u8 { - let mut interest = self.interest; - if let Some(we) = self.wire_expr.as_ref() { - interest += Interest::RESTRICTED; - if we.has_suffix() { - interest += Interest::NAMED; - } - if let Mapping::Sender = we.mapping { - interest += Interest::MAPPING; - } - } - interest.options - } - - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let wire_expr = rng.gen_bool(0.5).then_some(WireExpr::rand()); - let interest = Interest::rand(); - - Self { - wire_expr, - interest, - } - } - } - - #[derive(Clone, Copy)] - pub struct Interest { - options: u8, - } - - impl Interest { - // Flags - pub const KEYEXPRS: Interest = Interest::options(1); - pub const SUBSCRIBERS: Interest = Interest::options(1 << 1); - pub const QUERYABLES: Interest = Interest::options(1 << 2); - pub const TOKENS: Interest = Interest::options(1 << 3); - const RESTRICTED: Interest = Interest::options(1 << 4); - const NAMED: Interest = Interest::options(1 << 5); - const MAPPING: Interest = Interest::options(1 << 6); - pub const AGGREGATE: Interest = Interest::options(1 << 7); - pub const ALL: Interest = Interest::options( - Interest::KEYEXPRS.options - | Interest::SUBSCRIBERS.options - | Interest::QUERYABLES.options - | Interest::TOKENS.options, - ); - - const fn options(options: u8) -> Self { - Self { options } - } - - pub const fn empty() -> Self { - Self { options: 0 } - } - - pub const fn keyexprs(&self) -> bool { - imsg::has_flag(self.options, Self::KEYEXPRS.options) - } - - pub const fn subscribers(&self) -> bool { - imsg::has_flag(self.options, Self::SUBSCRIBERS.options) - } - - pub const fn queryables(&self) -> bool { - imsg::has_flag(self.options, Self::QUERYABLES.options) - } - - pub const fn tokens(&self) -> bool { - imsg::has_flag(self.options, Self::TOKENS.options) - } - - pub const fn restricted(&self) -> bool { - imsg::has_flag(self.options, Self::RESTRICTED.options) - } - - pub const fn named(&self) -> bool { - imsg::has_flag(self.options, Self::NAMED.options) - } - - pub const fn mapping(&self) -> bool { - imsg::has_flag(self.options, Self::MAPPING.options) - } - - pub const fn aggregate(&self) -> bool { - imsg::has_flag(self.options, Self::AGGREGATE.options) - } - - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let mut s = Self::empty(); - if rng.gen_bool(0.5) { - s += Interest::KEYEXPRS; - } - if rng.gen_bool(0.5) { - s += Interest::SUBSCRIBERS; - } - if rng.gen_bool(0.5) { - s += Interest::TOKENS; - } - if rng.gen_bool(0.5) { - s += Interest::AGGREGATE; - } - s - } - } - - impl PartialEq for Interest { - fn eq(&self, other: &Self) -> bool { - self.keyexprs() == other.keyexprs() - && self.subscribers() == other.subscribers() - && self.queryables() == other.queryables() - && self.tokens() == other.tokens() - && self.aggregate() == other.aggregate() - } - } - - impl Debug for Interest { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Interest {{ ")?; - if self.keyexprs() { - write!(f, "K:Y, ")?; - } else { - write!(f, "K:N, ")?; - } - if self.subscribers() { - write!(f, "S:Y, ")?; - } else { - write!(f, "S:N, ")?; - } - if self.queryables() { - write!(f, "Q:Y, ")?; - } else { - write!(f, "Q:N, ")?; - } - if self.tokens() { - write!(f, "T:Y, ")?; - } else { - write!(f, "T:N, ")?; - } - if self.aggregate() { - write!(f, "A:Y")?; - } else { - write!(f, "A:N")?; - } - write!(f, " }}")?; - Ok(()) - } - } - - impl Eq for Interest {} - - impl Add for Interest { - type Output = Self; - - #[allow(clippy::suspicious_arithmetic_impl)] // Allows to implement Add & Sub for Interest - fn add(self, rhs: Self) -> Self::Output { - Self { - options: self.options | rhs.options, - } - } - } - - impl AddAssign for Interest { - #[allow(clippy::suspicious_op_assign_impl)] // Allows to implement Add & Sub for Interest - fn add_assign(&mut self, rhs: Self) { - self.options |= rhs.options; - } - } - - impl Sub for Interest { - type Output = Self; - - fn sub(self, rhs: Self) -> Self::Output { - Self { - options: self.options & !rhs.options, - } - } - } - - impl SubAssign for Interest { - fn sub_assign(&mut self, rhs: Self) { - self.options &= !rhs.options; - } - } - - impl From for Interest { - fn from(options: u8) -> Self { - Self { options } - } - } -} diff --git a/commons/zenoh-protocol/src/network/interest.rs b/commons/zenoh-protocol/src/network/interest.rs new file mode 100644 index 0000000000..e7eb75787e --- /dev/null +++ b/commons/zenoh-protocol/src/network/interest.rs @@ -0,0 +1,383 @@ +// +// Copyright (c) 2022 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::{common::imsg, core::WireExpr, network::Mapping}; +use core::{ + fmt::{self, Debug}, + ops::{Add, AddAssign, Sub, SubAssign}, + sync::atomic::AtomicU32, +}; + +pub type InterestId = u32; + +pub mod flag { + pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow +} + +/// The INTEREST message is sent to request the transmission of current and optionally future +/// declarations of a given kind matching a target keyexpr. E.g., an interest could be +/// sent to request the transmisison of all current subscriptions matching `a/*`. +/// +/// The behaviour of a INTEREST depends on the INTEREST MODE. +/// +/// E.g., the message flow is the following for an [`Interest`] with mode `Current`: +/// +/// ```text +/// A B +/// | INTEREST | +/// |------------------>| -- Mode: Current +/// | | This is an Interest e.g. for subscriber declarations. +/// | | +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field set +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field set +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field set +/// | | +/// | DECL FINAL | +/// |<------------------| -- With interest_id field set +/// | | +/// ``` +/// +/// And the message flow is the following for an [`Interest`] with mode `CurrentFuture`: +/// +/// ```text +/// A B +/// | INTEREST | +/// |------------------>| -- This is a DeclareInterest e.g. for subscriber declarations/undeclarations. +/// | | +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field not set +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field not set +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field not set +/// | | +/// | DECL FINAL | +/// |<------------------| -- With interest_id field set +/// | | +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field not set +/// | UNDECL SUBSCRIBER | +/// |<------------------| -- With interest_id field not set +/// | | +/// | ... | +/// | | +/// | INTEREST FINAL | +/// |------------------>| -- Mode: Final +/// | | This stops the transmission of subscriber declarations/undeclarations. +/// | | +/// +/// Flags: +/// - |: Mode The mode of the interest* +/// -/ +/// - Z: Extension If Z==1 then at least one extension is present +/// +/// 7 6 5 4 3 2 1 0 +/// +-+-+-+-+-+-+-+-+ +/// |Z|Mod|INTEREST | +/// +-+-+-+---------+ +/// ~ id:z32 ~ +/// +---------------+ +/// |A|M|N|R|T|Q|S|K| if Mod!=Final (*) +/// +---------------+ +/// ~ key_scope:z16 ~ if Mod!=Final && R==1 +/// +---------------+ +/// ~ key_suffix ~ if Mod!=Final && R==1 && N==1 -- +/// +---------------+ +/// ~ [int_exts] ~ if Z==1 +/// +---------------+ +/// +/// *Mode of declaration: +/// - Mode 0b00: Final +/// - Mode 0b01: Current +/// - Mode 0b10: Future +/// - Mode 0b11: CurrentFuture +/// +/// (*) - if K==1 then the interest refers to key expressions +/// - if S==1 then the interest refers to subscribers +/// - if Q==1 then the interest refers to queryables +/// - if T==1 then the interest refers to tokens +/// - if R==1 then the interest is restricted to the matching key expression, else it is for all key expressions. +/// - if N==1 then the key expr has name/suffix. If R==0 then N should be set to 0. +/// - if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver. +/// If R==0 then M should be set to 0. +/// - if A==1 then the replies SHOULD be aggregated +/// ``` + +/// The resolution of a RequestId +pub type DeclareRequestId = u32; +pub type AtomicDeclareRequestId = AtomicU32; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum InterestMode { + Final, + Current, + Future, + CurrentFuture, +} + +impl InterestMode { + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + + let mut rng = rand::thread_rng(); + + match rng.gen_range(0..4) { + 0 => InterestMode::Final, + 1 => InterestMode::Current, + 2 => InterestMode::Future, + 3 => InterestMode::CurrentFuture, + _ => unreachable!(), + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Interest { + pub id: InterestId, + pub mode: InterestMode, + pub options: InterestOptions, + pub wire_expr: Option>, + pub ext_qos: ext::QoSType, + pub ext_tstamp: Option, + pub ext_nodeid: ext::NodeIdType, +} + +pub mod ext { + use crate::{ + common::{ZExtZ64, ZExtZBuf}, + zextz64, zextzbuf, + }; + + pub type QoS = zextz64!(0x1, false); + pub type QoSType = crate::network::ext::QoSType<{ QoS::ID }>; + + pub type Timestamp = zextzbuf!(0x2, false); + pub type TimestampType = crate::network::ext::TimestampType<{ Timestamp::ID }>; + + pub type NodeId = zextz64!(0x3, true); + pub type NodeIdType = crate::network::ext::NodeIdType<{ NodeId::ID }>; +} + +impl Interest { + pub fn options(&self) -> u8 { + let mut interest = self.options; + if let Some(we) = self.wire_expr.as_ref() { + interest += InterestOptions::RESTRICTED; + if we.has_suffix() { + interest += InterestOptions::NAMED; + } + if let Mapping::Sender = we.mapping { + interest += InterestOptions::MAPPING; + } + } + interest.options + } + + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + let mut rng = rand::thread_rng(); + + let id = rng.gen::(); + let mode = InterestMode::rand(); + let options = InterestOptions::rand(); + let wire_expr = rng.gen_bool(0.5).then_some(WireExpr::rand()); + let ext_qos = ext::QoSType::rand(); + let ext_tstamp = rng.gen_bool(0.5).then(ext::TimestampType::rand); + let ext_nodeid = ext::NodeIdType::rand(); + + Self { + id, + mode, + wire_expr, + options, + ext_qos, + ext_tstamp, + ext_nodeid, + } + } +} + +#[repr(transparent)] +#[derive(Clone, Copy)] +pub struct InterestOptions { + options: u8, +} + +impl InterestOptions { + // Flags + pub const KEYEXPRS: InterestOptions = InterestOptions::options(1); + pub const SUBSCRIBERS: InterestOptions = InterestOptions::options(1 << 1); + pub const QUERYABLES: InterestOptions = InterestOptions::options(1 << 2); + pub const TOKENS: InterestOptions = InterestOptions::options(1 << 3); + const RESTRICTED: InterestOptions = InterestOptions::options(1 << 4); + const NAMED: InterestOptions = InterestOptions::options(1 << 5); + const MAPPING: InterestOptions = InterestOptions::options(1 << 6); + pub const AGGREGATE: InterestOptions = InterestOptions::options(1 << 7); + pub const ALL: InterestOptions = InterestOptions::options( + InterestOptions::KEYEXPRS.options + | InterestOptions::SUBSCRIBERS.options + | InterestOptions::QUERYABLES.options + | InterestOptions::TOKENS.options, + ); + + const fn options(options: u8) -> Self { + Self { options } + } + + pub const fn empty() -> Self { + Self { options: 0 } + } + + pub const fn keyexprs(&self) -> bool { + imsg::has_flag(self.options, Self::KEYEXPRS.options) + } + + pub const fn subscribers(&self) -> bool { + imsg::has_flag(self.options, Self::SUBSCRIBERS.options) + } + + pub const fn queryables(&self) -> bool { + imsg::has_flag(self.options, Self::QUERYABLES.options) + } + + pub const fn tokens(&self) -> bool { + imsg::has_flag(self.options, Self::TOKENS.options) + } + + pub const fn restricted(&self) -> bool { + imsg::has_flag(self.options, Self::RESTRICTED.options) + } + + pub const fn named(&self) -> bool { + imsg::has_flag(self.options, Self::NAMED.options) + } + + pub const fn mapping(&self) -> bool { + imsg::has_flag(self.options, Self::MAPPING.options) + } + + pub const fn aggregate(&self) -> bool { + imsg::has_flag(self.options, Self::AGGREGATE.options) + } + + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + let mut rng = rand::thread_rng(); + + let mut s = Self::empty(); + if rng.gen_bool(0.5) { + s += InterestOptions::KEYEXPRS; + } + if rng.gen_bool(0.5) { + s += InterestOptions::SUBSCRIBERS; + } + if rng.gen_bool(0.5) { + s += InterestOptions::TOKENS; + } + if rng.gen_bool(0.5) { + s += InterestOptions::AGGREGATE; + } + s + } +} + +impl PartialEq for InterestOptions { + fn eq(&self, other: &Self) -> bool { + self.keyexprs() == other.keyexprs() + && self.subscribers() == other.subscribers() + && self.queryables() == other.queryables() + && self.tokens() == other.tokens() + && self.aggregate() == other.aggregate() + } +} + +impl Debug for InterestOptions { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Interest {{ ")?; + if self.keyexprs() { + write!(f, "K:Y, ")?; + } else { + write!(f, "K:N, ")?; + } + if self.subscribers() { + write!(f, "S:Y, ")?; + } else { + write!(f, "S:N, ")?; + } + if self.queryables() { + write!(f, "Q:Y, ")?; + } else { + write!(f, "Q:N, ")?; + } + if self.tokens() { + write!(f, "T:Y, ")?; + } else { + write!(f, "T:N, ")?; + } + if self.aggregate() { + write!(f, "A:Y")?; + } else { + write!(f, "A:N")?; + } + write!(f, " }}")?; + Ok(()) + } +} + +impl Eq for InterestOptions {} + +impl Add for InterestOptions { + type Output = Self; + + #[allow(clippy::suspicious_arithmetic_impl)] // Allows to implement Add & Sub for Interest + fn add(self, rhs: Self) -> Self::Output { + Self { + options: self.options | rhs.options, + } + } +} + +impl AddAssign for InterestOptions { + #[allow(clippy::suspicious_op_assign_impl)] // Allows to implement Add & Sub for Interest + fn add_assign(&mut self, rhs: Self) { + self.options |= rhs.options; + } +} + +impl Sub for InterestOptions { + type Output = Self; + + fn sub(self, rhs: Self) -> Self::Output { + Self { + options: self.options & !rhs.options, + } + } +} + +impl SubAssign for InterestOptions { + fn sub_assign(&mut self, rhs: Self) { + self.options &= !rhs.options; + } +} + +impl From for InterestOptions { + fn from(options: u8) -> Self { + Self { options } + } +} diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index e60388f425..5a0635c9e0 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // pub mod declare; +pub mod interest; pub mod oam; pub mod push; pub mod request; @@ -20,10 +21,10 @@ pub mod response; use core::fmt; pub use declare::{ - Declare, DeclareBody, DeclareFinal, DeclareInterest, DeclareKeyExpr, DeclareMode, - DeclareQueryable, DeclareSubscriber, DeclareToken, UndeclareKeyExpr, UndeclareQueryable, - UndeclareSubscriber, UndeclareToken, + Declare, DeclareBody, DeclareFinal, DeclareKeyExpr, DeclareQueryable, DeclareSubscriber, + DeclareToken, UndeclareKeyExpr, UndeclareQueryable, UndeclareSubscriber, UndeclareToken, }; +pub use interest::Interest; pub use oam::Oam; pub use push::Push; pub use request::{AtomicRequestId, Request, RequestId}; @@ -40,6 +41,7 @@ pub mod id { pub const REQUEST: u8 = 0x1c; pub const RESPONSE: u8 = 0x1b; pub const RESPONSE_FINAL: u8 = 0x1a; + pub const INTEREST: u8 = 0x19; } #[repr(u8)] @@ -73,6 +75,7 @@ pub enum NetworkBody { Request(Request), Response(Response), ResponseFinal(ResponseFinal), + Interest(Interest), Declare(Declare), OAM(Oam), } @@ -117,6 +120,7 @@ impl NetworkMessage { NetworkBody::Request(msg) => msg.ext_qos.is_express(), NetworkBody::Response(msg) => msg.ext_qos.is_express(), NetworkBody::ResponseFinal(msg) => msg.ext_qos.is_express(), + NetworkBody::Interest(msg) => msg.ext_qos.is_express(), NetworkBody::Declare(msg) => msg.ext_qos.is_express(), NetworkBody::OAM(msg) => msg.ext_qos.is_express(), } @@ -133,6 +137,7 @@ impl NetworkMessage { NetworkBody::Request(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Response(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::ResponseFinal(msg) => msg.ext_qos.get_congestion_control(), + NetworkBody::Interest(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Declare(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::OAM(msg) => msg.ext_qos.get_congestion_control(), }; @@ -147,6 +152,7 @@ impl NetworkMessage { NetworkBody::Request(msg) => msg.ext_qos.get_priority(), NetworkBody::Response(msg) => msg.ext_qos.get_priority(), NetworkBody::ResponseFinal(msg) => msg.ext_qos.get_priority(), + NetworkBody::Interest(msg) => msg.ext_qos.get_priority(), NetworkBody::Declare(msg) => msg.ext_qos.get_priority(), NetworkBody::OAM(msg) => msg.ext_qos.get_priority(), } @@ -162,6 +168,7 @@ impl fmt::Display for NetworkMessage { Request(_) => write!(f, "Request"), Response(_) => write!(f, "Response"), ResponseFinal(_) => write!(f, "ResponseFinal"), + Interest(_) => write!(f, "Interest"), Declare(_) => write!(f, "Declare"), } } diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index bf569d0345..09edde884e 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -145,7 +145,10 @@ pub fn map_zmsg_to_shminfo(msg: &mut NetworkMessage) -> ZResult { ResponseBody::Reply(b) => b.map_to_shminfo(), ResponseBody::Err(b) => b.map_to_shminfo(), }, - NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), + NetworkBody::ResponseFinal(_) + | NetworkBody::Interest(_) + | NetworkBody::Declare(_) + | NetworkBody::OAM(_) => Ok(false), } } @@ -196,7 +199,10 @@ pub fn map_zmsg_to_shmbuf( ResponseBody::Reply(b) => b.map_to_shmbuf(shmr), ResponseBody::Err(b) => b.map_to_shmbuf(shmr), }, - NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), + NetworkBody::ResponseFinal(_) + | NetworkBody::Interest(_) + | NetworkBody::Declare(_) + | NetworkBody::OAM(_) => Ok(false), } } diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index 1e8da2c3c9..d2bfb5bcfe 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -53,7 +53,7 @@ pub use zenoh_keyexpr::*; pub use zenoh_macros::{kedefine, keformat, kewrite}; use zenoh_protocol::{ core::{key_expr::canon::Canonizable, ExprId, WireExpr}, - network::{declare, DeclareBody, DeclareMode, Mapping, UndeclareKeyExpr}, + network::{declare, DeclareBody, Mapping, UndeclareKeyExpr}, }; use zenoh_result::ZResult; @@ -664,7 +664,7 @@ impl SyncResolve for KeyExprUndeclaration<'_> { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(zenoh_protocol::network::Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index d62e410c81..e58e01a1b5 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -67,6 +67,7 @@ impl TransportPeerEventHandler for DeMux { match msg.body { NetworkBody::Push(m) => self.face.send_push(m), NetworkBody::Declare(m) => self.face.send_declare(m), + NetworkBody::Interest(_) => todo!(), NetworkBody::Request(m) => self.face.send_request(m), NetworkBody::Response(m) => self.face.send_response(m), NetworkBody::ResponseFinal(m) => self.face.send_response_final(m), diff --git a/zenoh/src/net/primitives/mod.rs b/zenoh/src/net/primitives/mod.rs index fd85280be0..d3aa8097ca 100644 --- a/zenoh/src/net/primitives/mod.rs +++ b/zenoh/src/net/primitives/mod.rs @@ -18,11 +18,15 @@ use std::any::Any; pub use demux::*; pub use mux::*; -use zenoh_protocol::network::{Declare, Push, Request, Response, ResponseFinal}; +use zenoh_protocol::network::{ + interest::Interest, Declare, Push, Request, Response, ResponseFinal, +}; use super::routing::RoutingContext; pub trait Primitives: Send + Sync { + fn send_interest(&self, msg: Interest); + fn send_declare(&self, msg: Declare); fn send_push(&self, msg: Push); @@ -56,6 +60,8 @@ pub(crate) trait EPrimitives: Send + Sync { pub struct DummyPrimitives; impl Primitives for DummyPrimitives { + fn send_interest(&self, _msg: Interest) {} + fn send_declare(&self, _msg: Declare) {} fn send_push(&self, _msg: Push) {} diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index 5c473e8ad8..ccb2452f30 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -19,7 +19,8 @@ use crate::net::routing::{ }; use std::sync::OnceLock; use zenoh_protocol::network::{ - Declare, NetworkBody, NetworkMessage, Push, Request, Response, ResponseFinal, + interest::Interest, Declare, NetworkBody, NetworkMessage, Push, Request, Response, + ResponseFinal, }; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; @@ -40,6 +41,34 @@ impl Mux { } impl Primitives for Mux { + fn send_interest(&self, msg: Interest) { + let msg = NetworkMessage { + body: NetworkBody::Interest(msg), + #[cfg(feature = "stats")] + size: None, + }; + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let Some(face) = face.upgrade() else { + log::debug!("Invalid face: {:?}. Interest not sent: {:?}", face, msg); + return; + }; + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(&face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + log::debug!("Uninitialized multiplexer. Interest not sent: {:?}", msg); + } + } + fn send_declare(&self, msg: Declare) { let msg = NetworkMessage { body: NetworkBody::Declare(msg), @@ -316,6 +345,30 @@ impl McastMux { } impl Primitives for McastMux { + fn send_interest(&self, msg: Interest) { + let msg = NetworkMessage { + body: NetworkBody::Interest(msg), + #[cfg(feature = "stats")] + size: None, + }; + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + log::error!("Uninitialized multiplexer!"); + } + } + fn send_declare(&self, msg: Declare) { let msg = NetworkMessage { body: NetworkBody::Declare(msg), diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 371edee57b..29c3f0da2f 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -154,7 +154,7 @@ impl fmt::Display for FaceState { } } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct WeakFace { pub(crate) tables: Weak, pub(crate) state: Weak, @@ -185,6 +185,10 @@ impl Face { } impl Primitives for Face { + fn send_interest(&self, _msg: zenoh_protocol::network::Interest) { + todo!() + } + fn send_declare(&self, msg: zenoh_protocol::network::Declare) { let ctrl_lock = zlock!(self.tables.ctrl_lock); match msg.body { @@ -238,8 +242,7 @@ impl Primitives for Face { } zenoh_protocol::network::DeclareBody::DeclareToken(_m) => todo!(), zenoh_protocol::network::DeclareBody::UndeclareToken(_m) => todo!(), - zenoh_protocol::network::DeclareBody::DeclareInterest(_m) => todo!(), - zenoh_protocol::network::DeclareBody::DeclareFinal(_m) => todo!(), + zenoh_protocol::network::DeclareBody::DeclareFinal(_) => todo!(), } drop(ctrl_lock); } diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 58a081d743..62193cdf93 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -27,7 +27,7 @@ use zenoh_protocol::{ network::{ declare::{ ext, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, - DeclareBody, DeclareKeyExpr, DeclareMode, + DeclareBody, DeclareKeyExpr, }, Mapping, }, @@ -465,7 +465,7 @@ impl Resource { .insert(expr_id, nonwild_prefix.clone()); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 6c689d3336..e85bb77bf9 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -30,7 +30,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareMode, DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -53,7 +53,7 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -137,7 +137,7 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -171,7 +171,7 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -206,7 +206,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 28e1d75460..5c0bc5349b 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -33,7 +33,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareMode, DeclareQueryable, UndeclareQueryable, + DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -93,7 +93,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -165,7 +165,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -418,7 +418,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -460,7 +460,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 356793e3a3..150c12a632 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -36,7 +36,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareMode, DeclareQueryable, UndeclareQueryable, + DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -126,7 +126,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -170,7 +170,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -339,7 +339,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -365,7 +365,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 5ac0b22846..b495248788 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -30,7 +30,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareMode, DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -53,7 +53,7 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -137,7 +137,7 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -171,7 +171,7 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -206,7 +206,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index c2d62c7658..72c32b9217 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -33,7 +33,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareMode, DeclareQueryable, UndeclareQueryable, + DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -93,7 +93,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -165,7 +165,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -412,7 +412,7 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -564,7 +564,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -606,7 +606,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -635,7 +635,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -774,7 +774,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: if forget { dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -800,7 +800,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: }; dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index e647cf2dc7..99e787beb5 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -36,7 +36,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareMode, DeclareQueryable, UndeclareQueryable, + DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -194,7 +194,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -248,7 +248,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -473,7 +473,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -499,7 +499,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -775,7 +775,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -874,7 +874,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links if forget { dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -900,7 +900,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index 77f51c16b3..75b4d4ef6a 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -107,6 +107,7 @@ impl RoutingContext { NetworkBody::Request(m) => Some(&m.wire_expr), NetworkBody::Response(m) => Some(&m.wire_expr), NetworkBody::ResponseFinal(_) => None, + NetworkBody::Interest(m) => m.wire_expr.as_ref(), NetworkBody::Declare(m) => match &m.body { DeclareBody::DeclareKeyExpr(m) => Some(&m.wire_expr), DeclareBody::UndeclareKeyExpr(_) => None, @@ -116,7 +117,6 @@ impl RoutingContext { DeclareBody::UndeclareQueryable(m) => Some(&m.ext_wire_expr.wire_expr), DeclareBody::DeclareToken(m) => Some(&m.wire_expr), DeclareBody::UndeclareToken(m) => Some(&m.ext_wire_expr.wire_expr), - DeclareBody::DeclareInterest(m) => m.wire_expr.as_ref(), DeclareBody::DeclareFinal(_) => None, }, NetworkBody::OAM(_) => None, diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 5b5b41b390..78ece859c7 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -33,6 +33,7 @@ use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{ConfigValidator, ValidatedMap, WhatAmI}; use zenoh_plugin_trait::{PluginControl, PluginStatus}; use zenoh_protocol::network::declare::QueryableId; +use zenoh_protocol::network::Interest; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, @@ -40,8 +41,8 @@ use zenoh_protocol::{ }, network::{ declare::{queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo}, - ext, Declare, DeclareBody, DeclareMode, DeclareQueryable, DeclareSubscriber, Push, Request, - Response, ResponseFinal, + ext, Declare, DeclareBody, DeclareQueryable, DeclareSubscriber, Push, Request, Response, + ResponseFinal, }, zenoh::{PushBody, RequestBody}, }; @@ -277,7 +278,7 @@ impl AdminSpace { zlock!(admin.primitives).replace(primitives.clone()); primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, @@ -290,7 +291,7 @@ impl AdminSpace { }); primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -320,6 +321,10 @@ impl AdminSpace { } impl Primitives for AdminSpace { + fn send_interest(&self, msg: Interest) { + log::trace!("Recv interest {:?}", msg); + } + fn send_declare(&self, msg: Declare) { log::trace!("Recv declare {:?}", msg); if let DeclareBody::DeclareKeyExpr(m) = msg.body { diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 35db2a7ac4..841bc209f6 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -26,7 +26,7 @@ use zenoh_protocol::core::{ key_expr::keyexpr, ExprId, Reliability, WhatAmI, WireExpr, ZenohId, EMPTY_EXPR_ID, }; use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr, DeclareMode}; +use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr}; use zenoh_protocol::zenoh::{PushBody, Put}; #[test] @@ -495,6 +495,8 @@ impl ClientPrimitives { } impl Primitives for ClientPrimitives { + fn send_interest(&self, _msg: zenoh_protocol::network::Interest) {} + fn send_declare(&self, msg: zenoh_protocol::network::Declare) { match msg.body { DeclareBody::DeclareKeyExpr(d) => { @@ -579,7 +581,7 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -607,7 +609,7 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -629,7 +631,7 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -657,7 +659,7 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -679,7 +681,7 @@ fn client_test() { Primitives::send_declare( primitives2.as_ref(), Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 3f1c382a66..29ad9c2b00 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -69,7 +69,7 @@ use zenoh_protocol::{ network::{ declare::{ self, common::ext::WireExprType, queryable::ext::QueryableInfoType, - subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareMode, + subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, request::{self, ext::TargetType, Request}, @@ -893,7 +893,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1106,7 +1106,7 @@ impl Session { // }; primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1163,7 +1163,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1215,7 +1215,7 @@ impl Session { distance: 0, }; primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1237,7 +1237,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1273,7 +1273,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1298,7 +1298,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -1984,6 +1984,9 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { } impl Primitives for Session { + fn send_interest(&self, msg: zenoh_protocol::network::Interest) { + trace!("recv Interest {} {:?}", msg.id, msg.wire_expr); + } fn send_declare(&self, msg: zenoh_protocol::network::Declare) { match msg.body { zenoh_protocol::network::DeclareBody::DeclareKeyExpr(m) => { @@ -2086,7 +2089,6 @@ impl Primitives for Session { } DeclareBody::DeclareToken(_) => todo!(), DeclareBody::UndeclareToken(_) => todo!(), - DeclareBody::DeclareInterest(_) => todo!(), DeclareBody::DeclareFinal(_) => todo!(), } } From 64227479e237a91e3d82a3fd21a5e8b2367aa89e Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 16:42:27 +0200 Subject: [PATCH 227/598] Properties do not depend on Parameters --- commons/zenoh-protocol/src/core/properties.rs | 121 +++++++++++++----- zenoh/src/selector.rs | 2 +- 2 files changed, 92 insertions(+), 31 deletions(-) diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index d2f0506d46..88097debdc 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -11,12 +11,25 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; use alloc::borrow::Cow; use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] use std::collections::HashMap; +const LIST_SEPARATOR: char = ';'; +const FIELD_SEPARATOR: char = '='; +const VALUE_SEPARATOR: char = '|'; + +fn split_once(s: &str, c: char) -> (&str, &str) { + match s.find(c) { + Some(index) => { + let (l, r) = s.split_at(index); + (l, &r[1..]) + } + None => (s, ""), + } +} + /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties /// and `=` as separator between a key and its value. Keys and values are trimed. @@ -71,25 +84,37 @@ impl Properties<'_> { self.get(k).is_some() } - /// Returns a reference to the value corresponding to the key. + /// Returns a reference to the `&str`-value corresponding to the key. pub fn get(&self, k: K) -> Option<&str> where K: Borrow, { - Parameters::get(self.as_str(), k.borrow()) + self.iter() + .find(|(key, _)| *key == k.borrow()) + .map(|(_, value)| value) } - /// Returns an iterator to the values corresponding to the key. + /// Returns an iterator to the `&str`-values corresponding to the key. pub fn values(&self, k: K) -> impl DoubleEndedIterator where K: Borrow, { - Parameters::values(self.as_str(), k.borrow()) + match self.get(k) { + Some(v) => v.split(VALUE_SEPARATOR), + None => { + let mut i = "".split(VALUE_SEPARATOR); + i.next(); + i + } + } } /// Returns an iterator on the key-value pairs as `(&str, &str)`. pub fn iter(&self) -> impl DoubleEndedIterator + Clone { - Parameters::iter(self.as_str()) + self.as_str() + .split(LIST_SEPARATOR) + .filter(|p| !p.is_empty()) + .map(|p| split_once(p, FIELD_SEPARATOR)) } /// Inserts a key-value pair into the map. @@ -100,10 +125,17 @@ impl Properties<'_> { K: Borrow, V: Borrow, { - let (inner, removed) = Parameters::insert(self.iter(), k.borrow(), v.borrow()); - let removed = removed.map(|s| s.to_string()); - self.0 = Cow::Owned(inner); - removed + let item = self + .iter() + .find(|(key, _)| *key == k.borrow()) + .map(|(_, v)| v.to_string()); + + let current = self.iter().filter(|x| x.0 != k.borrow()); + let new = Some((k.borrow(), v.borrow())).into_iter(); + let iter = current.chain(new); + + *self = Self::from_iter(iter); + item } /// Removes a key from the map, returning the value at the key if the key was previously in the properties. @@ -111,23 +143,35 @@ impl Properties<'_> { where K: Borrow, { - let (inner, removed) = Parameters::remove(self.iter(), k.borrow()); - let removed = removed.map(|s| s.to_string()); - self.0 = Cow::Owned(inner); - removed + let item = self + .iter() + .find(|(key, _)| *key == k.borrow()) + .map(|(_, v)| v.to_string()); + let iter = self.iter().filter(|x| x.0 != k.borrow()); + + *self = Self::from_iter(iter); + item } - /// Join an iterator of key-value pairs `(&str, &str)` into properties. - pub fn join<'s, I, K, V>(&mut self, iter: I) + /// Extend these properties with other properties. + pub fn extend(&mut self, other: &Properties) { + self.extend_from_iter(other.iter()); + } + + /// Extend these properties from an iterator. + pub fn extend_from_iter<'s, I, K, V>(&mut self, iter: I) where I: Iterator + Clone, K: Borrow + 's + ?Sized, V: Borrow + 's + ?Sized, { - self.0 = Cow::Owned(Parameters::join( - Parameters::iter(self.as_str()), - iter.map(|(k, v)| (k.borrow(), v.borrow())), - )); + let new: I = iter.clone(); + let current = self + .iter() + .filter(|(kc, _)| !new.clone().any(|(kn, _)| *kc == kn.borrow())); + let iter = current.chain(iter.map(|(k, v)| (k.borrow(), v.borrow()))); + + *self = Self::from_iter(iter); } /// Convert these properties into owned properties. @@ -170,7 +214,29 @@ where V: Borrow + 's + ?Sized, { fn from_iter>(iter: T) -> Self { - let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))); + fn concat<'s, I>(iter: I) -> String + where + I: Iterator, + { + let mut into = String::new(); + let mut first = true; + for (k, v) in iter.filter(|(k, _)| !k.is_empty()) { + if !first { + into.push(LIST_SEPARATOR); + } + into.push_str(k); + if !v.is_empty() { + into.push(FIELD_SEPARATOR); + into.push_str(v); + } + first = false; + } + into + } + + let iter = iter.into_iter(); + let inner = concat(iter.map(|(k, v)| (k.borrow(), v.borrow()))); + Self(Cow::Owned(inner)) } } @@ -181,8 +247,7 @@ where V: Borrow + 's, { fn from_iter>(iter: T) -> Self { - let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))); - Self(Cow::Owned(inner)) + Self::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))) } } @@ -210,25 +275,21 @@ where #[cfg(feature = "std")] impl<'s> From<&'s Properties<'s>> for HashMap<&'s str, &'s str> { fn from(props: &'s Properties<'s>) -> Self { - HashMap::from_iter(Parameters::iter(props.as_str())) + HashMap::from_iter(props.iter()) } } #[cfg(feature = "std")] impl From<&Properties<'_>> for HashMap { fn from(props: &Properties<'_>) -> Self { - HashMap::from_iter( - Parameters::iter(props.as_str()).map(|(k, v)| (k.to_string(), v.to_string())), - ) + HashMap::from_iter(props.iter().map(|(k, v)| (k.to_string(), v.to_string()))) } } #[cfg(feature = "std")] impl<'s> From<&'s Properties<'s>> for HashMap, Cow<'s, str>> { fn from(props: &'s Properties<'s>) -> Self { - HashMap::from_iter( - Parameters::iter(props.as_str()).map(|(k, v)| (Cow::from(k), Cow::from(v))), - ) + HashMap::from_iter(props.iter().map(|(k, v)| (Cow::from(k), Cow::from(v)))) } } diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 893bfdb8a3..3e367c6864 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -388,7 +388,7 @@ fn selector_accessors() { let hm: HashMap = HashMap::from(selector.parameters()); assert!(hm.contains_key(TIME_RANGE_KEY)); - selector.parameters_mut().join(hm.iter()); + selector.parameters_mut().extend_from_iter(hm.iter()); assert_eq!(selector.parameters().get("_filter").unwrap(), ""); selector.set_accept_any_keyexpr(true); From 9a8c2102f2e5a595f0c41cc2c08711013f17d7e9 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 17:00:46 +0200 Subject: [PATCH 228/598] Remove #[non_exhaustive] for Properties --- commons/zenoh-protocol/src/core/properties.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index 88097debdc..bd00507509 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -61,7 +61,6 @@ fn split_once(s: &str, c: char) -> (&str, &str) { /// let pi = Properties::from_iter(vec![("a", "1"), ("b", "2"), ("c", "3|4|5"), ("d", "6")]); /// assert_eq!(p, pi); /// ``` -#[non_exhaustive] #[derive(Clone, PartialEq, Eq, Default)] pub struct Properties<'s>(Cow<'s, str>); From fef7148293cc0992ffa69ae9cfc8a3b3e598fbed Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 18:02:51 +0200 Subject: [PATCH 229/598] Add OrderedProperties --- commons/zenoh-protocol/src/core/properties.rs | 213 ++++++++++++++++++ 1 file changed, 213 insertions(+) diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index bd00507509..2e323925fc 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -177,6 +177,18 @@ impl Properties<'_> { pub fn into_owned(self) -> Properties<'static> { Properties(Cow::Owned(self.0.into_owned())) } + + /// Returns true if all keys are sorted in alphabetical order. + pub fn is_ordered(&self) -> bool { + let mut prev = None; + for (k, _) in self.iter() { + match prev.take() { + Some(p) if k < p => return false, + _ => prev = Some(k), + } + } + true + } } impl<'s> From<&'s str> for Properties<'s> { @@ -311,6 +323,207 @@ impl fmt::Debug for Properties<'_> { } } +#[derive(Clone, PartialEq, Eq, Default)] +pub struct OrderedProperties<'s>(Properties<'s>); + +impl OrderedProperties<'_> { + /// Returns `true` if properties does not contain anything. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns properties as [`str`]. + pub fn as_str(&self) -> &str { + self.0.as_str() + } + + /// Returns `true` if properties contains the specified key. + pub fn contains_key(&self, k: K) -> bool + where + K: Borrow, + { + self.0.contains_key(k) + } + + /// Returns a reference to the `&str`-value corresponding to the key. + pub fn get(&self, k: K) -> Option<&str> + where + K: Borrow, + { + self.0.get(k) + } + + /// Returns an iterator to the `&str`-values corresponding to the key. + pub fn values(&self, k: K) -> impl DoubleEndedIterator + where + K: Borrow, + { + self.0.values(k) + } + + /// Returns an iterator on the key-value pairs as `(&str, &str)`. + pub fn iter(&self) -> impl DoubleEndedIterator + Clone { + self.0.iter() + } + + /// Removes a key from the map, returning the value at the key if the key was previously in the properties. + pub fn remove(&mut self, k: K) -> Option + where + K: Borrow, + { + self.0.remove(k) + } + + /// Inserts a key-value pair into the map. + /// If the map did not have this key present, [`None`]` is returned. + /// If the map did have this key present, the value is updated, and the old value is returned. + pub fn insert(&mut self, k: K, v: V) -> Option + where + K: Borrow, + V: Borrow, + { + let item = self.0.insert(k, v); + self.order(); + item + } + + /// Extend these properties with other properties. + pub fn extend(&mut self, other: &Properties) { + self.extend_from_iter(other.iter()); + } + + /// Extend these properties from an iterator. + pub fn extend_from_iter<'s, I, K, V>(&mut self, iter: I) + where + I: Iterator + Clone, + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, + { + self.0.extend_from_iter(iter); + self.order(); + } + + /// Convert these properties into owned properties. + pub fn into_owned(self) -> OrderedProperties<'static> { + OrderedProperties(self.0.into_owned()) + } + + fn order(&mut self) { + if !self.0.is_ordered() { + let mut from = self.0.iter().collect::>(); + from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); + self.0 = Properties::from_iter(from); + } + } +} + +impl<'s> From> for OrderedProperties<'s> { + fn from(value: Properties<'s>) -> Self { + let mut props = Self(value); + props.order(); + props + } +} + +impl<'s> From<&'s str> for OrderedProperties<'s> { + fn from(value: &'s str) -> Self { + Self::from(Properties::from(value)) + } +} + +impl From for OrderedProperties<'_> { + fn from(value: String) -> Self { + Self::from(Properties::from(value)) + } +} + +impl<'s> From> for OrderedProperties<'s> { + fn from(value: Cow<'s, str>) -> Self { + Self::from(Properties::from(value)) + } +} + +impl<'s, K, V> FromIterator<(&'s K, &'s V)> for OrderedProperties<'_> +where + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, +{ + fn from_iter>(iter: T) -> Self { + Self::from(Properties::from_iter(iter)) + } +} + +impl<'s, K, V> FromIterator<&'s (K, V)> for OrderedProperties<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from_iter>(iter: T) -> Self { + Self::from(Properties::from_iter(iter)) + } +} + +impl<'s, K, V> From<&'s [(K, V)]> for OrderedProperties<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from(value: &'s [(K, V)]) -> Self { + Self::from_iter(value.iter()) + } +} + +#[cfg(feature = "std")] +impl From> for OrderedProperties<'_> +where + K: Borrow, + V: Borrow, +{ + fn from(map: HashMap) -> Self { + Self::from_iter(map.iter()) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s OrderedProperties<'s>> for HashMap<&'s str, &'s str> { + fn from(props: &'s OrderedProperties<'s>) -> Self { + HashMap::from(&props.0) + } +} + +#[cfg(feature = "std")] +impl From<&OrderedProperties<'_>> for HashMap { + fn from(props: &OrderedProperties<'_>) -> Self { + HashMap::from(&props.0) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s OrderedProperties<'s>> for HashMap, Cow<'s, str>> { + fn from(props: &'s OrderedProperties<'s>) -> Self { + HashMap::from(&props.0) + } +} + +#[cfg(feature = "std")] +impl From> for HashMap { + fn from(props: OrderedProperties) -> Self { + HashMap::from(&props) + } +} + +impl fmt::Display for OrderedProperties<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl fmt::Debug for OrderedProperties<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self) + } +} + #[cfg(test)] mod tests { use super::*; From 587a7cdbed170d5a1116eaf0a52f2a49da31fd08 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 18:29:02 +0200 Subject: [PATCH 230/598] Revert Properties depend on Parameters --- commons/zenoh-protocol/src/core/mod.rs | 1 - commons/zenoh-protocol/src/core/parameters.rs | 68 ++++---- commons/zenoh-protocol/src/core/properties.rs | 145 +++++------------- 3 files changed, 72 insertions(+), 142 deletions(-) diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 0920d55d01..1652d6bdad 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -54,7 +54,6 @@ pub mod resolution; pub use resolution::*; pub mod parameters; -pub use parameters::*; pub mod properties; pub use properties::*; diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs index b49ee1a1f9..59aaa54e28 100644 --- a/commons/zenoh-protocol/src/core/parameters.rs +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -pub const LIST_SEPARATOR: char = ';'; -pub const FIELD_SEPARATOR: char = '='; -pub const VALUE_SEPARATOR: char = '|'; +pub(super) const LIST_SEPARATOR: char = ';'; +pub(super) const FIELD_SEPARATOR: char = '='; +pub(super) const VALUE_SEPARATOR: char = '|'; use alloc::{string::String, vec::Vec}; @@ -29,11 +29,11 @@ fn split_once(s: &str, c: char) -> (&str, &str) { /// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. /// [`SortedParameters`] it's like [`Parameters`] but with the guarantee that keys are sorted upon insertion. -pub struct SortedParameters; +pub(super) struct SortedParameters; impl SortedParameters { #[allow(clippy::should_implement_trait)] - pub fn from_iter<'s, I>(iter: I) -> String + pub(super) fn from_iter<'s, I>(iter: I) -> String where I: Iterator, { @@ -42,7 +42,7 @@ impl SortedParameters { into } - pub fn from_iter_into<'s, I>(iter: I, into: &mut String) + pub(super) fn from_iter_into<'s, I>(iter: I, into: &mut String) where I: Iterator, { @@ -51,7 +51,7 @@ impl SortedParameters { Parameters::from_iter_into(from.iter().copied(), into); } - pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + pub(super) fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) where I: Iterator + Clone, { @@ -64,7 +64,7 @@ impl SortedParameters { (SortedParameters::from_iter(iter), item) } - pub fn join<'s, C, N>(current: C, new: N) -> String + pub(super) fn join<'s, C, N>(current: C, new: N) -> String where C: Iterator + Clone, N: Iterator + Clone, @@ -74,7 +74,7 @@ impl SortedParameters { into } - pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) + pub(super) fn join_into<'s, C, N>(current: C, new: N, into: &mut String) where C: Iterator + Clone, N: Iterator + Clone, @@ -86,34 +86,20 @@ impl SortedParameters { let iter = current.chain(new); SortedParameters::from_iter_into(iter, into); } - - pub fn is_sorted<'s, I>(iter: I) -> bool - where - I: Iterator, - { - let mut prev = None; - for (k, _) in iter { - match prev.take() { - Some(p) if k < p => return false, - _ => prev = Some(k), - } - } - true - } } /// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. -pub struct Parameters; +pub(super) struct Parameters; impl Parameters { - pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { + pub(super) fn iter(s: &str) -> impl DoubleEndedIterator + Clone { s.split(LIST_SEPARATOR) .filter(|p| !p.is_empty()) .map(|p| split_once(p, FIELD_SEPARATOR)) } #[allow(clippy::should_implement_trait)] - pub fn from_iter<'s, I>(iter: I) -> String + pub(super) fn from_iter<'s, I>(iter: I) -> String where I: Iterator, { @@ -122,20 +108,20 @@ impl Parameters { into } - pub fn from_iter_into<'s, I>(iter: I, into: &mut String) + pub(super) fn from_iter_into<'s, I>(iter: I, into: &mut String) where I: Iterator, { Self::concat_into(iter, into); } - pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { + pub(super) fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { Self::iter(s) .find(|(key, _)| *key == k) .map(|(_, value)| value) } - pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { + pub(super) fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { match Self::get(s, k) { Some(v) => v.split(VALUE_SEPARATOR), None => { @@ -146,7 +132,7 @@ impl Parameters { } } - pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + pub(super) fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) where I: Iterator + Clone, { @@ -159,7 +145,7 @@ impl Parameters { (Parameters::from_iter(iter), item) } - pub fn remove<'s, I>(mut iter: I, k: &'s str) -> (String, Option<&'s str>) + pub(super) fn remove<'s, I>(mut iter: I, k: &'s str) -> (String, Option<&'s str>) where I: Iterator, { @@ -168,7 +154,7 @@ impl Parameters { (Parameters::concat(iter), item) } - pub fn join<'s, C, N>(current: C, new: N) -> String + pub(super) fn join<'s, C, N>(current: C, new: N) -> String where C: Iterator + Clone, N: Iterator + Clone, @@ -178,7 +164,7 @@ impl Parameters { into } - pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) + pub(super) fn join_into<'s, C, N>(current: C, new: N, into: &mut String) where C: Iterator + Clone, N: Iterator + Clone, @@ -218,8 +204,22 @@ impl Parameters { } } + pub(super) fn is_ordered<'s, I>(iter: I) -> bool + where + I: Iterator, + { + let mut prev = None; + for (k, _) in iter { + match prev.take() { + Some(p) if k < p => return false, + _ => prev = Some(k), + } + } + true + } + #[cfg(feature = "test")] - pub fn rand(into: &mut String) { + pub(super) fn rand(into: &mut String) { use rand::{ distributions::{Alphanumeric, DistString}, Rng, diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index 2e323925fc..e2f11e8814 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -11,25 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // +use super::parameters::{ + Parameters, SortedParameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR, +}; use alloc::borrow::Cow; use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] use std::collections::HashMap; -const LIST_SEPARATOR: char = ';'; -const FIELD_SEPARATOR: char = '='; -const VALUE_SEPARATOR: char = '|'; - -fn split_once(s: &str, c: char) -> (&str, &str) { - match s.find(c) { - Some(index) => { - let (l, r) = s.split_at(index); - (l, &r[1..]) - } - None => (s, ""), - } -} - /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties /// and `=` as separator between a key and its value. Keys and values are trimed. @@ -61,17 +50,17 @@ fn split_once(s: &str, c: char) -> (&str, &str) { /// let pi = Properties::from_iter(vec![("a", "1"), ("b", "2"), ("c", "3|4|5"), ("d", "6")]); /// assert_eq!(p, pi); /// ``` -#[derive(Clone, PartialEq, Eq, Default)] +#[derive(Clone, PartialEq, Eq, Hash, Default)] pub struct Properties<'s>(Cow<'s, str>); -impl Properties<'_> { +impl<'s> Properties<'s> { /// Returns `true` if properties does not contain anything. pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Returns properties as [`str`]. - pub fn as_str(&self) -> &str { + pub fn as_str(&'s self) -> &'s str { &self.0 } @@ -80,40 +69,28 @@ impl Properties<'_> { where K: Borrow, { - self.get(k).is_some() + Parameters::get(self.as_str(), k.borrow()).is_some() } /// Returns a reference to the `&str`-value corresponding to the key. - pub fn get(&self, k: K) -> Option<&str> + pub fn get(&'s self, k: K) -> Option<&'s str> where K: Borrow, { - self.iter() - .find(|(key, _)| *key == k.borrow()) - .map(|(_, value)| value) + Parameters::get(self.as_str(), k.borrow()) } /// Returns an iterator to the `&str`-values corresponding to the key. - pub fn values(&self, k: K) -> impl DoubleEndedIterator + pub fn values(&'s self, k: K) -> impl DoubleEndedIterator where K: Borrow, { - match self.get(k) { - Some(v) => v.split(VALUE_SEPARATOR), - None => { - let mut i = "".split(VALUE_SEPARATOR); - i.next(); - i - } - } + Parameters::values(self.as_str(), k.borrow()) } /// Returns an iterator on the key-value pairs as `(&str, &str)`. - pub fn iter(&self) -> impl DoubleEndedIterator + Clone { - self.as_str() - .split(LIST_SEPARATOR) - .filter(|p| !p.is_empty()) - .map(|p| split_once(p, FIELD_SEPARATOR)) + pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { + Parameters::iter(self.as_str()) } /// Inserts a key-value pair into the map. @@ -124,16 +101,9 @@ impl Properties<'_> { K: Borrow, V: Borrow, { - let item = self - .iter() - .find(|(key, _)| *key == k.borrow()) - .map(|(_, v)| v.to_string()); - - let current = self.iter().filter(|x| x.0 != k.borrow()); - let new = Some((k.borrow(), v.borrow())).into_iter(); - let iter = current.chain(new); - - *self = Self::from_iter(iter); + let (inner, item) = Parameters::insert(self.iter(), k.borrow(), v.borrow()); + let item = item.map(|i| i.to_string()); + self.0 = Cow::Owned(inner); item } @@ -142,13 +112,9 @@ impl Properties<'_> { where K: Borrow, { - let item = self - .iter() - .find(|(key, _)| *key == k.borrow()) - .map(|(_, v)| v.to_string()); - let iter = self.iter().filter(|x| x.0 != k.borrow()); - - *self = Self::from_iter(iter); + let (inner, item) = Parameters::remove(self.iter(), k.borrow()); + let item = item.map(|i| i.to_string()); + self.0 = Cow::Owned(inner); item } @@ -158,19 +124,14 @@ impl Properties<'_> { } /// Extend these properties from an iterator. - pub fn extend_from_iter<'s, I, K, V>(&mut self, iter: I) + pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) where - I: Iterator + Clone, - K: Borrow + 's + ?Sized, - V: Borrow + 's + ?Sized, + I: Iterator + Clone, + K: Borrow + 'e + ?Sized, + V: Borrow + 'e + ?Sized, { - let new: I = iter.clone(); - let current = self - .iter() - .filter(|(kc, _)| !new.clone().any(|(kn, _)| *kc == kn.borrow())); - let iter = current.chain(iter.map(|(k, v)| (k.borrow(), v.borrow()))); - - *self = Self::from_iter(iter); + let inner = Parameters::join(self.iter(), iter.map(|(k, v)| (k.borrow(), v.borrow()))); + self.0 = Cow::Owned(inner); } /// Convert these properties into owned properties. @@ -180,14 +141,7 @@ impl Properties<'_> { /// Returns true if all keys are sorted in alphabetical order. pub fn is_ordered(&self) -> bool { - let mut prev = None; - for (k, _) in self.iter() { - match prev.take() { - Some(p) if k < p => return false, - _ => prev = Some(k), - } - } - true + Parameters::is_ordered(self.iter()) } } @@ -225,29 +179,8 @@ where V: Borrow + 's + ?Sized, { fn from_iter>(iter: T) -> Self { - fn concat<'s, I>(iter: I) -> String - where - I: Iterator, - { - let mut into = String::new(); - let mut first = true; - for (k, v) in iter.filter(|(k, _)| !k.is_empty()) { - if !first { - into.push(LIST_SEPARATOR); - } - into.push_str(k); - if !v.is_empty() { - into.push(FIELD_SEPARATOR); - into.push_str(v); - } - first = false; - } - into - } - let iter = iter.into_iter(); - let inner = concat(iter.map(|(k, v)| (k.borrow(), v.borrow()))); - + let inner = Parameters::from_iter(iter.map(|(k, v)| (k.borrow(), v.borrow()))); Self(Cow::Owned(inner)) } } @@ -323,17 +256,17 @@ impl fmt::Debug for Properties<'_> { } } -#[derive(Clone, PartialEq, Eq, Default)] +#[derive(Clone, PartialEq, Eq, Hash, Default)] pub struct OrderedProperties<'s>(Properties<'s>); -impl OrderedProperties<'_> { +impl<'s> OrderedProperties<'s> { /// Returns `true` if properties does not contain anything. pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Returns properties as [`str`]. - pub fn as_str(&self) -> &str { + pub fn as_str(&'s self) -> &'s str { self.0.as_str() } @@ -346,7 +279,7 @@ impl OrderedProperties<'_> { } /// Returns a reference to the `&str`-value corresponding to the key. - pub fn get(&self, k: K) -> Option<&str> + pub fn get(&'s self, k: K) -> Option<&'s str> where K: Borrow, { @@ -354,7 +287,7 @@ impl OrderedProperties<'_> { } /// Returns an iterator to the `&str`-values corresponding to the key. - pub fn values(&self, k: K) -> impl DoubleEndedIterator + pub fn values(&'s self, k: K) -> impl DoubleEndedIterator where K: Borrow, { @@ -362,7 +295,7 @@ impl OrderedProperties<'_> { } /// Returns an iterator on the key-value pairs as `(&str, &str)`. - pub fn iter(&self) -> impl DoubleEndedIterator + Clone { + pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { self.0.iter() } @@ -393,11 +326,11 @@ impl OrderedProperties<'_> { } /// Extend these properties from an iterator. - pub fn extend_from_iter<'s, I, K, V>(&mut self, iter: I) + pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) where - I: Iterator + Clone, - K: Borrow + 's + ?Sized, - V: Borrow + 's + ?Sized, + I: Iterator + Clone, + K: Borrow + 'e + ?Sized, + V: Borrow + 'e + ?Sized, { self.0.extend_from_iter(iter); self.order(); @@ -410,9 +343,7 @@ impl OrderedProperties<'_> { fn order(&mut self) { if !self.0.is_ordered() { - let mut from = self.0.iter().collect::>(); - from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); - self.0 = Properties::from_iter(from); + self.0 = Properties(Cow::Owned(SortedParameters::from_iter(self.iter()))); } } } From 7539346ae07b1063402ba7ee29fd39089cc4c097 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 18:31:13 +0200 Subject: [PATCH 231/598] Revert Properties depend on Parameters --- commons/zenoh-protocol/src/core/mod.rs | 1 + commons/zenoh-protocol/src/core/parameters.rs | 36 +++++++++---------- 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 1652d6bdad..0920d55d01 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -54,6 +54,7 @@ pub mod resolution; pub use resolution::*; pub mod parameters; +pub use parameters::*; pub mod properties; pub use properties::*; diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs index 59aaa54e28..2ce430661c 100644 --- a/commons/zenoh-protocol/src/core/parameters.rs +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -29,11 +29,11 @@ fn split_once(s: &str, c: char) -> (&str, &str) { /// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. /// [`SortedParameters`] it's like [`Parameters`] but with the guarantee that keys are sorted upon insertion. -pub(super) struct SortedParameters; +pub struct SortedParameters; impl SortedParameters { #[allow(clippy::should_implement_trait)] - pub(super) fn from_iter<'s, I>(iter: I) -> String + pub fn from_iter<'s, I>(iter: I) -> String where I: Iterator, { @@ -42,7 +42,7 @@ impl SortedParameters { into } - pub(super) fn from_iter_into<'s, I>(iter: I, into: &mut String) + pub fn from_iter_into<'s, I>(iter: I, into: &mut String) where I: Iterator, { @@ -51,7 +51,7 @@ impl SortedParameters { Parameters::from_iter_into(from.iter().copied(), into); } - pub(super) fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) where I: Iterator + Clone, { @@ -64,7 +64,7 @@ impl SortedParameters { (SortedParameters::from_iter(iter), item) } - pub(super) fn join<'s, C, N>(current: C, new: N) -> String + pub fn join<'s, C, N>(current: C, new: N) -> String where C: Iterator + Clone, N: Iterator + Clone, @@ -74,7 +74,7 @@ impl SortedParameters { into } - pub(super) fn join_into<'s, C, N>(current: C, new: N, into: &mut String) + pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) where C: Iterator + Clone, N: Iterator + Clone, @@ -89,17 +89,17 @@ impl SortedParameters { } /// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. -pub(super) struct Parameters; +pub struct Parameters; impl Parameters { - pub(super) fn iter(s: &str) -> impl DoubleEndedIterator + Clone { + pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { s.split(LIST_SEPARATOR) .filter(|p| !p.is_empty()) .map(|p| split_once(p, FIELD_SEPARATOR)) } #[allow(clippy::should_implement_trait)] - pub(super) fn from_iter<'s, I>(iter: I) -> String + pub fn from_iter<'s, I>(iter: I) -> String where I: Iterator, { @@ -108,20 +108,20 @@ impl Parameters { into } - pub(super) fn from_iter_into<'s, I>(iter: I, into: &mut String) + pub fn from_iter_into<'s, I>(iter: I, into: &mut String) where I: Iterator, { Self::concat_into(iter, into); } - pub(super) fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { + pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { Self::iter(s) .find(|(key, _)| *key == k) .map(|(_, value)| value) } - pub(super) fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { + pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { match Self::get(s, k) { Some(v) => v.split(VALUE_SEPARATOR), None => { @@ -132,7 +132,7 @@ impl Parameters { } } - pub(super) fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) where I: Iterator + Clone, { @@ -145,7 +145,7 @@ impl Parameters { (Parameters::from_iter(iter), item) } - pub(super) fn remove<'s, I>(mut iter: I, k: &'s str) -> (String, Option<&'s str>) + pub fn remove<'s, I>(mut iter: I, k: &'s str) -> (String, Option<&'s str>) where I: Iterator, { @@ -154,7 +154,7 @@ impl Parameters { (Parameters::concat(iter), item) } - pub(super) fn join<'s, C, N>(current: C, new: N) -> String + pub fn join<'s, C, N>(current: C, new: N) -> String where C: Iterator + Clone, N: Iterator + Clone, @@ -164,7 +164,7 @@ impl Parameters { into } - pub(super) fn join_into<'s, C, N>(current: C, new: N, into: &mut String) + pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) where C: Iterator + Clone, N: Iterator + Clone, @@ -204,7 +204,7 @@ impl Parameters { } } - pub(super) fn is_ordered<'s, I>(iter: I) -> bool + pub fn is_ordered<'s, I>(iter: I) -> bool where I: Iterator, { @@ -219,7 +219,7 @@ impl Parameters { } #[cfg(feature = "test")] - pub(super) fn rand(into: &mut String) { + pub fn rand(into: &mut String) { use rand::{ distributions::{Alphanumeric, DistString}, Rng, From 28d32a6bf2a35a86f4b3969865f033259174705f Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 19:53:35 +0200 Subject: [PATCH 232/598] Remove SortedParameters --- commons/zenoh-protocol/src/core/endpoint.rs | 25 ++- commons/zenoh-protocol/src/core/parameters.rs | 169 +++++++++--------- commons/zenoh-protocol/src/core/properties.rs | 14 +- 3 files changed, 103 insertions(+), 105 deletions(-) diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index 3397147369..5c7cb891ae 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -11,10 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{ - locator::*, - parameters::{Parameters, SortedParameters}, -}; +use super::{locator::*, parameters::Parameters}; use alloc::{borrow::ToOwned, format, string::String}; use core::{borrow::Borrow, convert::TryFrom, fmt, str::FromStr}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; @@ -251,7 +248,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - SortedParameters::join( + Parameters::join_sort( self.0.metadata().iter(), iter.map(|(k, v)| (k.borrow(), v.borrow())), ), @@ -270,7 +267,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - SortedParameters::insert(self.0.metadata().iter(), k.borrow(), v.borrow()).0, + Parameters::insert_sort(self.0.metadata().as_str(), k.borrow(), v.borrow()).0, self.0.config(), )?; @@ -285,7 +282,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::remove(self.0.metadata().iter(), k.borrow()).0, + Parameters::remove(self.0.metadata().as_str(), k.borrow()).0, self.0.config(), )?; @@ -382,7 +379,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - SortedParameters::join( + Parameters::join_sort( self.0.config().iter(), iter.map(|(k, v)| (k.borrow(), v.borrow())), ), @@ -401,7 +398,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - SortedParameters::insert(self.0.config().iter(), k.borrow(), v.borrow()).0, + Parameters::insert_sort(self.0.config().as_str(), k.borrow(), v.borrow()).0, )?; self.0.inner = ep.inner; @@ -416,7 +413,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::remove(self.0.config().iter(), k.borrow()).0, + Parameters::remove(self.0.config().as_str(), k.borrow()).0, )?; self.0.inner = ep.inner; @@ -578,14 +575,14 @@ impl TryFrom for EndPoint { (Some(midx), None) if midx > pidx && !s[midx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - SortedParameters::from_iter_into(Parameters::iter(&s[midx + 1..]), &mut inner); + Parameters::from_iter_sort_into(Parameters::iter(&s[midx + 1..]), &mut inner); Ok(EndPoint { inner }) } // There is some config (None, Some(cidx)) if cidx > pidx && !s[cidx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..cidx + 1]); // Includes config separator - SortedParameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); + Parameters::from_iter_sort_into(Parameters::iter(&s[cidx + 1..]), &mut inner); Ok(EndPoint { inner }) } // There is some metadata and some config @@ -598,10 +595,10 @@ impl TryFrom for EndPoint { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - SortedParameters::from_iter_into(Parameters::iter(&s[midx + 1..cidx]), &mut inner); + Parameters::from_iter_sort_into(Parameters::iter(&s[midx + 1..cidx]), &mut inner); inner.push(CONFIG_SEPARATOR); - SortedParameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); + Parameters::from_iter_sort_into(Parameters::iter(&s[cidx + 1..]), &mut inner); Ok(EndPoint { inner }) } diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs index 2ce430661c..adf744130b 100644 --- a/commons/zenoh-protocol/src/core/parameters.rs +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -28,52 +28,73 @@ fn split_once(s: &str, c: char) -> (&str, &str) { } /// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. -/// [`SortedParameters`] it's like [`Parameters`] but with the guarantee that keys are sorted upon insertion. -pub struct SortedParameters; +/// +/// `;` is the separator between the key-value `(&str, &str)` elements. +/// +/// `=` is the separator between the `&str`-key and `&str`-value +/// +/// `|` is the separator between multiple elements of the values. +pub struct Parameters; -impl SortedParameters { +impl Parameters { + /// Returns an iterator of key-value `(&str, &str)` pairs according to the parameters format. + pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { + s.split(LIST_SEPARATOR) + .filter(|p| !p.is_empty()) + .map(|p| split_once(p, FIELD_SEPARATOR)) + } + + /// Builds a string from an iterator preserving the order. #[allow(clippy::should_implement_trait)] pub fn from_iter<'s, I>(iter: I) -> String where I: Iterator, { let mut into = String::new(); - Self::from_iter_into(iter, &mut into); + Parameters::from_iter_into(iter, &mut into); into } + /// Same as [`Self::from_iter`] but it writes into a user-provided string instead of allocating a new one. pub fn from_iter_into<'s, I>(iter: I, into: &mut String) where I: Iterator, { - let mut from = iter.collect::>(); - from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); - Parameters::from_iter_into(from.iter().copied(), into); + Parameters::concat_into(iter, into); } - pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + /// Same as [`Self::from_iter`] but keys are sorted in alphabetical order. + pub fn from_iter_sort<'s, I>(iter: I) -> String where - I: Iterator + Clone, + I: Iterator, { - let mut ic = iter.clone(); - let item = ic.find(|(key, _)| *key == k).map(|(_, v)| v); + let mut into = String::new(); + Parameters::from_iter_into(iter, &mut into); + into + } - let current = iter.filter(|x| x.0 != k); - let new = Some((k, v)).into_iter(); - let iter = current.chain(new); - (SortedParameters::from_iter(iter), item) + /// Same as [`Self::from_iter_into`] but keys are sorted in alphabetical order. + pub fn from_iter_sort_into<'s, I>(iter: I, into: &mut String) + where + I: Iterator, + { + let mut from = iter.collect::>(); + from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); + Parameters::from_iter_into(from.iter().copied(), into); } + /// Builds a string by joining two key-value `(&str, &str)` iterators removing from `current` any element whose key is present in `new`. pub fn join<'s, C, N>(current: C, new: N) -> String where C: Iterator + Clone, N: Iterator + Clone, { let mut into = String::new(); - SortedParameters::join_into(current, new, &mut into); + Parameters::join_into(current, new, &mut into); into } + /// Same as [`Self::join`] but it writes into a user-provided string instead of allocating a new one. pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) where C: Iterator + Clone, @@ -84,45 +105,44 @@ impl SortedParameters { .clone() .filter(|(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); let iter = current.chain(new); - SortedParameters::from_iter_into(iter, into); - } -} - -/// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. -pub struct Parameters; - -impl Parameters { - pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { - s.split(LIST_SEPARATOR) - .filter(|p| !p.is_empty()) - .map(|p| split_once(p, FIELD_SEPARATOR)) + Parameters::from_iter_into(iter, into); } - #[allow(clippy::should_implement_trait)] - pub fn from_iter<'s, I>(iter: I) -> String + /// Same as [`Self::join`] but keys are sorted in alphabetical order. + pub fn join_sort<'s, C, N>(current: C, new: N) -> String where - I: Iterator, + C: Iterator + Clone, + N: Iterator + Clone, { let mut into = String::new(); - Self::from_iter_into(iter, &mut into); + Parameters::join_sort_into(current, new, &mut into); into } - pub fn from_iter_into<'s, I>(iter: I, into: &mut String) + /// Same as [`Self::join_into`] but keys are sorted in alphabetical order. + pub fn join_sort_into<'s, C, N>(current: C, new: N, into: &mut String) where - I: Iterator, + C: Iterator + Clone, + N: Iterator + Clone, { - Self::concat_into(iter, into); + let n = new.clone(); + let current = current + .clone() + .filter(|(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); + let iter = current.chain(new); + Parameters::from_iter_into(iter, into); } + /// Get the a `&str`-value for a `&str`-key according to the parameters format. pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { - Self::iter(s) + Parameters::iter(s) .find(|(key, _)| *key == k) .map(|(_, value)| value) } + /// Get the a `&str`-value iterator for a `&str`-key according to the parameters format. pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { - match Self::get(s, k) { + match Parameters::get(s, k) { Some(v) => v.split(VALUE_SEPARATOR), None => { let mut i = "".split(VALUE_SEPARATOR); @@ -132,49 +152,46 @@ impl Parameters { } } - pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) - where - I: Iterator + Clone, - { - let mut ic = iter.clone(); - let item = ic.find(|(key, _)| *key == k).map(|(_, v)| v); + /// Insert a key-value `(&str, &str)` pair by appending it at the end of `s` preserving the insertion order. + pub fn insert<'s>(s: &'s str, k: &str, v: &str) -> (String, Option<&'s str>) { + let mut iter = Parameters::iter(s); + let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); - let current = iter.filter(|x| x.0 != k); + let current = Parameters::iter(s).filter(|x| x.0 != k); let new = Some((k, v)).into_iter(); let iter = current.chain(new); (Parameters::from_iter(iter), item) } - pub fn remove<'s, I>(mut iter: I, k: &'s str) -> (String, Option<&'s str>) - where - I: Iterator, - { + /// Same as [`Self::insert`] but keys are sorted in alphabetical order. + pub fn insert_sort<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { + let mut iter = Parameters::iter(s); let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); - let iter = iter.filter(|x| x.0 != k); - (Parameters::concat(iter), item) + + let current = Parameters::iter(s).filter(|x| x.0 != k); + let new = Some((k, v)).into_iter(); + let iter = current.chain(new); + (Parameters::from_iter_sort(iter), item) } - pub fn join<'s, C, N>(current: C, new: N) -> String - where - C: Iterator + Clone, - N: Iterator + Clone, - { - let mut into = String::new(); - Parameters::join_into(current, new, &mut into); - into + /// Remove a key-value `(&str, &str)` pair from `s` preserving the insertion order. + pub fn remove<'s>(s: &'s str, k: &str) -> (String, Option<&'s str>) { + let mut iter = Parameters::iter(s); + let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); + let iter = iter.filter(|x| x.0 != k); + (Parameters::concat(iter), item) } - pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) - where - C: Iterator + Clone, - N: Iterator + Clone, - { - let n = new.clone(); - let current = current - .clone() - .filter(|(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); - let iter = current.chain(new); - Parameters::from_iter_into(iter, into); + /// Returns `true` if all keys are sorted in alphabetical order + pub fn is_ordered(s: &str) -> bool { + let mut prev = None; + for (k, _) in Parameters::iter(s) { + match prev.take() { + Some(p) if k < p => return false, + _ => prev = Some(k), + } + } + true } fn concat<'s, I>(iter: I) -> String @@ -204,20 +221,6 @@ impl Parameters { } } - pub fn is_ordered<'s, I>(iter: I) -> bool - where - I: Iterator, - { - let mut prev = None; - for (k, _) in iter { - match prev.take() { - Some(p) if k < p => return false, - _ => prev = Some(k), - } - } - true - } - #[cfg(feature = "test")] pub fn rand(into: &mut String) { use rand::{ diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index e2f11e8814..2611d2d7a9 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -11,9 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::parameters::{ - Parameters, SortedParameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR, -}; +use super::parameters::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; use alloc::borrow::Cow; use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] @@ -101,7 +99,7 @@ impl<'s> Properties<'s> { K: Borrow, V: Borrow, { - let (inner, item) = Parameters::insert(self.iter(), k.borrow(), v.borrow()); + let (inner, item) = Parameters::insert(self.as_str(), k.borrow(), v.borrow()); let item = item.map(|i| i.to_string()); self.0 = Cow::Owned(inner); item @@ -112,7 +110,7 @@ impl<'s> Properties<'s> { where K: Borrow, { - let (inner, item) = Parameters::remove(self.iter(), k.borrow()); + let (inner, item) = Parameters::remove(self.as_str(), k.borrow()); let item = item.map(|i| i.to_string()); self.0 = Cow::Owned(inner); item @@ -139,9 +137,9 @@ impl<'s> Properties<'s> { Properties(Cow::Owned(self.0.into_owned())) } - /// Returns true if all keys are sorted in alphabetical order. + /// Returns `true`` if all keys are sorted in alphabetical order. pub fn is_ordered(&self) -> bool { - Parameters::is_ordered(self.iter()) + Parameters::is_ordered(self.as_str()) } } @@ -343,7 +341,7 @@ impl<'s> OrderedProperties<'s> { fn order(&mut self) { if !self.0.is_ordered() { - self.0 = Properties(Cow::Owned(SortedParameters::from_iter(self.iter()))); + self.0 = Properties(Cow::Owned(Parameters::from_iter_sort(self.iter()))); } } } From 8e20b019d5e4eec709915b84b02bd569474d476b Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 19:56:31 +0200 Subject: [PATCH 233/598] Rename config and metadata join functions to extend_from_iter --- commons/zenoh-protocol/src/core/endpoint.rs | 8 ++++---- io/zenoh-transport/src/multicast/manager.rs | 4 +++- io/zenoh-transport/src/unicast/manager.rs | 8 ++++++-- io/zenoh-transport/tests/endpoints.rs | 4 ++-- .../tests/unicast_authenticator.rs | 4 ++-- io/zenoh-transport/tests/unicast_multilink.rs | 4 ++-- io/zenoh-transport/tests/unicast_openclose.rs | 4 ++-- io/zenoh-transport/tests/unicast_time.rs | 4 ++-- io/zenoh-transport/tests/unicast_transport.rs | 16 ++++++++-------- 9 files changed, 31 insertions(+), 25 deletions(-) diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index 5c7cb891ae..735e329146 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -239,7 +239,7 @@ impl<'a> MetadataMut<'a> { } impl MetadataMut<'_> { - pub fn join<'s, I, K, V>(&mut self, iter: I) -> ZResult<()> + pub fn extend_from_iter<'s, I, K, V>(&mut self, iter: I) -> ZResult<()> where I: Iterator + Clone, K: Borrow + 's + ?Sized, @@ -369,7 +369,7 @@ impl<'a> ConfigMut<'a> { } impl ConfigMut<'_> { - pub fn join<'s, I, K, V>(&mut self, iter: I) -> ZResult<()> + pub fn extend_from_iter<'s, I, K, V>(&mut self, iter: I) -> ZResult<()> where I: Iterator + Clone, K: Borrow + 's + ?Sized, @@ -817,14 +817,14 @@ fn endpoints() { let mut endpoint = EndPoint::from_str("udp/127.0.0.1:7447").unwrap(); endpoint .metadata_mut() - .join([("a", "1"), ("c", "3"), ("b", "2")].iter().copied()) + .extend_from_iter([("a", "1"), ("c", "3"), ("b", "2")].iter().copied()) .unwrap(); assert_eq!(endpoint.as_str(), "udp/127.0.0.1:7447?a=1;b=2;c=3"); let mut endpoint = EndPoint::from_str("udp/127.0.0.1:7447").unwrap(); endpoint .config_mut() - .join([("A", "1"), ("C", "3"), ("B", "2")].iter().copied()) + .extend_from_iter([("A", "1"), ("C", "3"), ("B", "2")].iter().copied()) .unwrap(); assert_eq!(endpoint.as_str(), "udp/127.0.0.1:7447#A=1;B=2;C=3"); diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index 421664e954..173475cf55 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -261,7 +261,9 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint.config_mut().join(Parameters::iter(config))?; + endpoint + .config_mut() + .extend_from_iter(Parameters::iter(config))?; } // Open the link diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index ab31376788..eec9d05386 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -379,7 +379,9 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint.config_mut().join(Parameters::iter(config))?; + endpoint + .config_mut() + .extend_from_iter(Parameters::iter(config))?; }; manager.new_listener(endpoint).await } @@ -688,7 +690,9 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint.config_mut().join(Parameters::iter(config))?; + endpoint + .config_mut() + .extend_from_iter(Parameters::iter(config))?; }; // Create a new link associated by calling the Link Manager diff --git a/io/zenoh-transport/tests/endpoints.rs b/io/zenoh-transport/tests/endpoints.rs index def493e88f..50b2b80ff0 100644 --- a/io/zenoh-transport/tests/endpoints.rs +++ b/io/zenoh-transport/tests/endpoints.rs @@ -317,7 +317,7 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM let mut endpoint: EndPoint = format!("tls/localhost:{}", 7070).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), @@ -396,7 +396,7 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM let mut endpoint: EndPoint = format!("quic/localhost:{}", 7080).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), diff --git a/io/zenoh-transport/tests/unicast_authenticator.rs b/io/zenoh-transport/tests/unicast_authenticator.rs index 63f1c785b7..0f31ef2453 100644 --- a/io/zenoh-transport/tests/unicast_authenticator.rs +++ b/io/zenoh-transport/tests/unicast_authenticator.rs @@ -802,7 +802,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 8030).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_CERTIFICATE_RAW, cert), @@ -902,7 +902,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 8040).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_CERTIFICATE_RAW, cert), diff --git a/io/zenoh-transport/tests/unicast_multilink.rs b/io/zenoh-transport/tests/unicast_multilink.rs index 54a31f62c3..99eb651b75 100644 --- a/io/zenoh-transport/tests/unicast_multilink.rs +++ b/io/zenoh-transport/tests/unicast_multilink.rs @@ -611,7 +611,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 18030).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), @@ -709,7 +709,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 18040).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), diff --git a/io/zenoh-transport/tests/unicast_openclose.rs b/io/zenoh-transport/tests/unicast_openclose.rs index 3f57ebfd62..4071ab5c1d 100644 --- a/io/zenoh-transport/tests/unicast_openclose.rs +++ b/io/zenoh-transport/tests/unicast_openclose.rs @@ -639,7 +639,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 13030).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), @@ -737,7 +737,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 13040).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs index 668df34cd6..12b29be1b5 100644 --- a/io/zenoh-transport/tests/unicast_time.rs +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -398,7 +398,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 13030).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), @@ -497,7 +497,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 13040).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index 2fffb2f811..0b150c0feb 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -994,7 +994,7 @@ async fn transport_unicast_tls_only_server() { let mut endpoint: EndPoint = format!("tls/localhost:{}", 16070).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1039,7 +1039,7 @@ async fn transport_unicast_quic_only_server() { let mut endpoint: EndPoint = format!("quic/localhost:{}", 16080).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1087,7 +1087,7 @@ async fn transport_unicast_tls_only_mutual_success() { let mut client_endpoint: EndPoint = ("tls/localhost:10461").parse().unwrap(); client_endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_CLIENT_CERTIFICATE_RAW, CLIENT_CERT), @@ -1103,7 +1103,7 @@ async fn transport_unicast_tls_only_mutual_success() { let mut server_endpoint: EndPoint = ("tls/localhost:10461").parse().unwrap(); server_endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1157,14 +1157,14 @@ async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { let mut client_endpoint: EndPoint = ("tls/localhost:10462").parse().unwrap(); client_endpoint .config_mut() - .join([(TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA)].iter().copied()) + .extend_from_iter([(TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA)].iter().copied()) .unwrap(); // Define the locator let mut server_endpoint: EndPoint = ("tls/localhost:10462").parse().unwrap(); server_endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1223,7 +1223,7 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { let mut client_endpoint: EndPoint = ("tls/localhost:10463").parse().unwrap(); client_endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), // Using the SERVER_CERT and SERVER_KEY in the client to simulate the case the client has @@ -1243,7 +1243,7 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { let mut server_endpoint: EndPoint = ("tls/localhost:10463").parse().unwrap(); server_endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), From eed331e21bb09a6e52e82a768d683aa35e29d338 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 20:10:50 +0200 Subject: [PATCH 234/598] Selector new --- plugins/zenoh-plugin-rest/src/lib.rs | 2 +- zenoh/src/selector.rs | 36 +++++++++++++++++++++------- 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index fc74ca5421..06341e46a7 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -403,7 +403,7 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result { pub const TIME_RANGE_KEY: &str = "_time"; impl<'a> Selector<'a> { + /// Builds a new selector + pub fn new(key_expr: K, parameters: P) -> Self + where + K: Into>, + P: Into>, + { + Self { + key_expr: key_expr.into(), + parameters: parameters.into(), + } + } + /// Gets the parameters. pub fn parameters(&self) -> &Parameters { &self.parameters @@ -85,11 +97,20 @@ impl<'a> Selector<'a> { &mut self.parameters } + /// Sets the `parameters` part of this `Selector`. + #[inline(always)] + pub fn set_parameters

(&mut self, parameters: P) + where + P: Into>, + { + self.parameters = parameters.into(); + } + /// Create an owned version of this selector with `'static` lifetime. pub fn into_owned(self) -> Selector<'static> { Selector { key_expr: self.key_expr.into_owned(), - parameters: Parameters(self.parameters.0.into_owned()), + parameters: self.parameters.into_owned(), } } @@ -104,14 +125,6 @@ impl<'a> Selector<'a> { self.parameters_mut().set_time_range(time_range); } - #[zenoh_macros::unstable] - /// Sets the `parameters` part of this `Selector`. - #[inline(always)] - pub fn with_parameters(mut self, parameters: &'a str) -> Self { - self.parameters = parameters.into(); - self - } - #[zenoh_macros::unstable] /// Extracts the standardized `_time` argument from the selector parameters. /// @@ -192,6 +205,11 @@ impl From> for HashMap { } impl Parameters<'_> { + /// Create an owned version of these parameters with `'static` lifetime. + pub fn into_owned(self) -> Parameters<'static> { + Parameters(self.0.into_owned()) + } + #[zenoh_macros::unstable] /// Sets the time range targeted by the selector. pub fn set_time_range>>(&mut self, time_range: T) { From 9033e69aa49e531dad24cf430c95d696e4dceb8e Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 21:18:34 +0200 Subject: [PATCH 235/598] Improve parameters --- commons/zenoh-protocol/src/core/endpoint.rs | 28 +++-- commons/zenoh-protocol/src/core/parameters.rs | 109 ++++++------------ commons/zenoh-protocol/src/core/properties.rs | 9 +- 3 files changed, 61 insertions(+), 85 deletions(-) diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index 735e329146..a61fdd8e89 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -248,10 +248,10 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::join_sort( + Parameters::from_iter(Parameters::sort(Parameters::join( self.0.metadata().iter(), iter.map(|(k, v)| (k.borrow(), v.borrow())), - ), + ))), self.0.config(), )?; @@ -379,10 +379,10 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::join_sort( + Parameters::from_iter(Parameters::sort(Parameters::join( self.0.config().iter(), iter.map(|(k, v)| (k.borrow(), v.borrow())), - ), + ))), )?; self.0.inner = ep.inner; @@ -575,14 +575,20 @@ impl TryFrom for EndPoint { (Some(midx), None) if midx > pidx && !s[midx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - Parameters::from_iter_sort_into(Parameters::iter(&s[midx + 1..]), &mut inner); + Parameters::from_iter_into( + Parameters::sort(Parameters::iter(&s[midx + 1..])), + &mut inner, + ); Ok(EndPoint { inner }) } // There is some config (None, Some(cidx)) if cidx > pidx && !s[cidx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..cidx + 1]); // Includes config separator - Parameters::from_iter_sort_into(Parameters::iter(&s[cidx + 1..]), &mut inner); + Parameters::from_iter_into( + Parameters::sort(Parameters::iter(&s[cidx + 1..])), + &mut inner, + ); Ok(EndPoint { inner }) } // There is some metadata and some config @@ -595,10 +601,16 @@ impl TryFrom for EndPoint { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - Parameters::from_iter_sort_into(Parameters::iter(&s[midx + 1..cidx]), &mut inner); + Parameters::from_iter_into( + Parameters::sort(Parameters::iter(&s[midx + 1..cidx])), + &mut inner, + ); inner.push(CONFIG_SEPARATOR); - Parameters::from_iter_sort_into(Parameters::iter(&s[cidx + 1..]), &mut inner); + Parameters::from_iter_into( + Parameters::sort(Parameters::iter(&s[cidx + 1..])), + &mut inner, + ); Ok(EndPoint { inner }) } diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs index adf744130b..e4f815feff 100644 --- a/commons/zenoh-protocol/src/core/parameters.rs +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -44,93 +44,46 @@ impl Parameters { .map(|p| split_once(p, FIELD_SEPARATOR)) } - /// Builds a string from an iterator preserving the order. - #[allow(clippy::should_implement_trait)] - pub fn from_iter<'s, I>(iter: I) -> String - where - I: Iterator, - { - let mut into = String::new(); - Parameters::from_iter_into(iter, &mut into); - into - } - - /// Same as [`Self::from_iter`] but it writes into a user-provided string instead of allocating a new one. - pub fn from_iter_into<'s, I>(iter: I, into: &mut String) - where - I: Iterator, - { - Parameters::concat_into(iter, into); - } - - /// Same as [`Self::from_iter`] but keys are sorted in alphabetical order. - pub fn from_iter_sort<'s, I>(iter: I) -> String - where - I: Iterator, - { - let mut into = String::new(); - Parameters::from_iter_into(iter, &mut into); - into - } - /// Same as [`Self::from_iter_into`] but keys are sorted in alphabetical order. - pub fn from_iter_sort_into<'s, I>(iter: I, into: &mut String) + pub fn sort<'s, I>(iter: I) -> impl Iterator where I: Iterator, { let mut from = iter.collect::>(); from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); - Parameters::from_iter_into(from.iter().copied(), into); + from.into_iter() } - /// Builds a string by joining two key-value `(&str, &str)` iterators removing from `current` any element whose key is present in `new`. - pub fn join<'s, C, N>(current: C, new: N) -> String + /// Joins two key-value `(&str, &str)` iterators removing from `current` any element whose key is present in `new`. + pub fn join<'s, C, N>(current: C, new: N) -> impl Iterator + Clone where C: Iterator + Clone, - N: Iterator + Clone, - { - let mut into = String::new(); - Parameters::join_into(current, new, &mut into); - into - } - - /// Same as [`Self::join`] but it writes into a user-provided string instead of allocating a new one. - pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) - where - C: Iterator + Clone, - N: Iterator + Clone, + N: Iterator + Clone + 's, { let n = new.clone(); let current = current .clone() - .filter(|(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); - let iter = current.chain(new); - Parameters::from_iter_into(iter, into); + .filter(move |(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); + current.chain(new) } - /// Same as [`Self::join`] but keys are sorted in alphabetical order. - pub fn join_sort<'s, C, N>(current: C, new: N) -> String + /// Builds a string from an iterator preserving the order. + #[allow(clippy::should_implement_trait)] + pub fn from_iter<'s, I>(iter: I) -> String where - C: Iterator + Clone, - N: Iterator + Clone, + I: Iterator, { let mut into = String::new(); - Parameters::join_sort_into(current, new, &mut into); + Parameters::from_iter_into(iter, &mut into); into } - /// Same as [`Self::join_into`] but keys are sorted in alphabetical order. - pub fn join_sort_into<'s, C, N>(current: C, new: N, into: &mut String) + /// Same as [`Self::from_iter`] but it writes into a user-provided string instead of allocating a new one. + pub fn from_iter_into<'s, I>(iter: I, into: &mut String) where - C: Iterator + Clone, - N: Iterator + Clone, + I: Iterator, { - let n = new.clone(); - let current = current - .clone() - .filter(|(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); - let iter = current.chain(new); - Parameters::from_iter_into(iter, into); + Parameters::concat_into(iter, into); } /// Get the a `&str`-value for a `&str`-key according to the parameters format. @@ -152,26 +105,32 @@ impl Parameters { } } - /// Insert a key-value `(&str, &str)` pair by appending it at the end of `s` preserving the insertion order. - pub fn insert<'s>(s: &'s str, k: &str, v: &str) -> (String, Option<&'s str>) { - let mut iter = Parameters::iter(s); + fn _insert<'s, I>( + i: I, + k: &'s str, + v: &'s str, + ) -> (impl Iterator, Option<&'s str>) + where + I: Iterator + Clone, + { + let mut iter = i.clone(); let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); - let current = Parameters::iter(s).filter(|x| x.0 != k); + let current = i.filter(move |x| x.0 != k); let new = Some((k, v)).into_iter(); - let iter = current.chain(new); + (current.chain(new), item) + } + + /// Insert a key-value `(&str, &str)` pair by appending it at the end of `s` preserving the insertion order. + pub fn insert<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { + let (iter, item) = Parameters::_insert(Parameters::iter(s), k, v); (Parameters::from_iter(iter), item) } /// Same as [`Self::insert`] but keys are sorted in alphabetical order. pub fn insert_sort<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { - let mut iter = Parameters::iter(s); - let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); - - let current = Parameters::iter(s).filter(|x| x.0 != k); - let new = Some((k, v)).into_iter(); - let iter = current.chain(new); - (Parameters::from_iter_sort(iter), item) + let (iter, item) = Parameters::_insert(Parameters::iter(s), k, v); + (Parameters::from_iter(Parameters::sort(iter)), item) } /// Remove a key-value `(&str, &str)` pair from `s` preserving the insertion order. diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index 2611d2d7a9..3c83d00e75 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -128,7 +128,10 @@ impl<'s> Properties<'s> { K: Borrow + 'e + ?Sized, V: Borrow + 'e + ?Sized, { - let inner = Parameters::join(self.iter(), iter.map(|(k, v)| (k.borrow(), v.borrow()))); + let inner = Parameters::from_iter(Parameters::join( + self.iter(), + iter.map(|(k, v)| (k.borrow(), v.borrow())), + )); self.0 = Cow::Owned(inner); } @@ -341,7 +344,9 @@ impl<'s> OrderedProperties<'s> { fn order(&mut self) { if !self.0.is_ordered() { - self.0 = Properties(Cow::Owned(Parameters::from_iter_sort(self.iter()))); + self.0 = Properties(Cow::Owned(Parameters::from_iter(Parameters::sort( + self.iter(), + )))); } } } From 0bc257dab138223769ee267d5a46b9691aec1cda Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 21:19:25 +0200 Subject: [PATCH 236/598] Fix no_std CI --- commons/zenoh-protocol/src/core/properties.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index 3c83d00e75..2fa71ec93f 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use super::parameters::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; -use alloc::borrow::Cow; +use alloc::{borrow::Cow, string::String}; use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] use std::collections::HashMap; From 2bf499db30e792ca13ece596cbfbb5ac22c102dc Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 21:40:04 +0200 Subject: [PATCH 237/598] Fix no_std CI --- commons/zenoh-protocol/src/core/properties.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index 2fa71ec93f..67a1edba7e 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -12,7 +12,10 @@ // ZettaScale Zenoh Team, // use super::parameters::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; -use alloc::{borrow::Cow, string::String}; +use alloc::{ + borrow::Cow, + string::{String, ToString}, +}; use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] use std::collections::HashMap; From 6d531588ac58bb1daad82047d8f41aa05ded9855 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 22:31:18 +0200 Subject: [PATCH 238/598] Fix doctest --- commons/zenoh-protocol/src/core/properties.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index 67a1edba7e..a4c2c35197 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -26,7 +26,7 @@ use std::collections::HashMap; /// /// Example: /// ``` -/// use zenoh_collections::Properties; +/// use zenoh_protocol::core::Properties; /// /// let a = "a=1;b=2;c=3|4|5;d=6"; /// let p = Properties::from(a); From ce9495c80df5d245859dbff25cf2e49a9f4ebba5 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 22:51:11 +0200 Subject: [PATCH 239/598] Fix unused import --- zenoh/src/selector.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 076b1c0875..659e6695ca 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -25,8 +25,8 @@ use zenoh_protocol::core::{ key_expr::{keyexpr, OwnedKeyExpr}, Properties, }; -use zenoh_result::ZResult; -use zenoh_util::time_range::TimeRange; +#[cfg(feature = "unstable")] +use ::{zenoh_result::ZResult, zenoh_util::time_range::TimeRange}; /// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters From 099c77f486f5d63d302fb9e6d4e6b29dc2bb538b Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 11:53:30 +0200 Subject: [PATCH 240/598] Fix docstring --- zenoh/src/selector.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 659e6695ca..fe5394ea8e 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -88,15 +88,13 @@ impl<'a> Selector<'a> { pub fn key_expr(&'a self) -> &KeyExpr<'a> { &self.key_expr } - /// Gets the parameters as a raw string. + + /// Gets a reference to selector's [`Parameters`]. pub fn parameters(&self) -> &Parameters<'a> { &self.parameters } - /// Gets a mutable reference to the parameters as a String. - /// - /// Note that calling this function may cause an allocation and copy if the selector's parameters wasn't - /// already owned by `self`. `self` owns its parameters as soon as this function returns. + /// Gets a mutable reference to selector's [`Parameters`]. pub fn parameters_mut(&mut self) -> &mut Parameters<'a> { &mut self.parameters } From 131ccdd5e5620a648f5ea1d43480d061e79ec62c Mon Sep 17 00:00:00 2001 From: DenisBiryukov91 <155981813+DenisBiryukov91@users.noreply.github.com> Date: Tue, 16 Apr 2024 11:55:08 +0200 Subject: [PATCH 241/598] make Reply fields pub(crate) and add accessors (#929) * make Reply fields pub(crate) and add accessors * doctests fix * Rename sample() and try_into_sample() to result() and into_result() * Fix valgrind CI * Fix doctest --------- Co-authored-by: Luca Cominardi --- .../src/queryable_get/bin/z_queryable_get.rs | 2 +- examples/examples/z_get.rs | 2 +- examples/examples/z_get_liveliness.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 14 ++++---- .../src/replica/align_queryable.rs | 2 +- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 2 +- .../tests/operations.rs | 2 +- .../tests/wildcard.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh-ext/src/lib.rs | 2 +- zenoh/src/lib.rs | 2 +- zenoh/src/liveliness.rs | 8 ++--- zenoh/src/query.rs | 35 +++++++++++++++---- zenoh/src/session.rs | 28 +++++++-------- zenoh/tests/attachments.rs | 2 +- zenoh/tests/events.rs | 8 ++--- zenoh/tests/liveliness.rs | 5 ++- zenoh/tests/routing.rs | 2 +- zenoh/tests/session.rs | 6 ++-- zenoh/tests/unicity.rs | 2 +- 21 files changed, 78 insertions(+), 54 deletions(-) diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index a5111c11e3..80549ead27 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -60,7 +60,7 @@ async fn main() { .await .unwrap(); while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.result() { Ok(sample) => println!( ">> Received ('{}': '{}')", sample.key_expr().as_str(), diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 77b67b90ed..7295294a00 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -41,7 +41,7 @@ async fn main() { .await .unwrap(); while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.result() { Ok(sample) => { let payload = sample .payload() diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 0a15b287c7..49c211f322 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -36,7 +36,7 @@ async fn main() { .await .unwrap(); while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.result() { Ok(sample) => println!(">> Alive token ('{}')", sample.key_expr().as_str(),), Err(err) => { let payload = err diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 1068d07163..0c5f639eca 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -92,9 +92,9 @@ fn sample_to_json(sample: &Sample) -> JSONSample { } } -fn result_to_json(sample: Result) -> JSONSample { +fn result_to_json(sample: Result<&Sample, &Value>) -> JSONSample { match sample { - Ok(sample) => sample_to_json(&sample), + Ok(sample) => sample_to_json(sample), Err(err) => JSONSample { key: "ERROR".into(), value: payload_to_json(err.payload(), err.encoding()), @@ -107,7 +107,7 @@ fn result_to_json(sample: Result) -> JSONSample { async fn to_json(results: flume::Receiver) -> String { let values = results .stream() - .filter_map(move |reply| async move { Some(result_to_json(reply.sample)) }) + .filter_map(move |reply| async move { Some(result_to_json(reply.result())) }) .collect::>() .await; @@ -122,7 +122,7 @@ async fn to_json_response(results: flume::Receiver) -> Response { ) } -fn sample_to_html(sample: Sample) -> String { +fn sample_to_html(sample: &Sample) -> String { format!( "

{}
\n
{}
\n", sample.key_expr().as_str(), @@ -133,7 +133,7 @@ fn sample_to_html(sample: Sample) -> String { ) } -fn result_to_html(sample: Result) -> String { +fn result_to_html(sample: Result<&Sample, &Value>) -> String { match sample { Ok(sample) => sample_to_html(sample), Err(err) => { @@ -148,7 +148,7 @@ fn result_to_html(sample: Result) -> String { async fn to_html(results: flume::Receiver) -> String { let values = results .stream() - .filter_map(move |reply| async move { Some(result_to_html(reply.sample)) }) + .filter_map(move |reply| async move { Some(result_to_html(reply.result())) }) .collect::>() .await .join("\n"); @@ -161,7 +161,7 @@ async fn to_html_response(results: flume::Receiver) -> Response { async fn to_raw_response(results: flume::Receiver) -> Response { match results.recv_async().await { - Ok(reply) => match reply.sample { + Ok(reply) => match reply.result() { Ok(sample) => response( StatusCode::Ok, Cow::from(sample.encoding()).as_ref(), diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 3a37095f67..bc98f61009 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -231,7 +231,7 @@ impl AlignQueryable { // get corresponding key from log let replies = self.session.get(&logentry.key).res().await.unwrap(); if let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.into_result() { Ok(sample) => { log::trace!( "[ALIGN QUERYABLE] Received ('{}': '{}')", diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index f33b370200..8b8fe6753a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -332,7 +332,7 @@ impl Aligner { { Ok(replies) => { while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.into_result() { Ok(sample) => { log::trace!( "[ALIGNER] Received ('{}': '{}')", diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index d2c2984c21..0c49852cfa 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -649,7 +649,7 @@ impl StorageService { } }; while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.into_result() { Ok(sample) => { self.process_sample(sample).await; } diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 36162f01c2..c82459cdcc 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -50,7 +50,7 @@ async fn get_data(session: &zenoh::Session, key_expr: &str) -> Vec { println!("Getting replies on '{key_expr}': '{replies:?}'..."); let mut samples = Vec::new(); for reply in replies { - if let Ok(sample) = reply.sample { + if let Ok(sample) = reply.into_result() { samples.push(sample); } } diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 5a71dc23f0..d778eadde4 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -51,7 +51,7 @@ async fn get_data(session: &zenoh::Session, key_expr: &str) -> Vec { println!("Getting replies on '{key_expr}': '{replies:?}'..."); let mut samples = Vec::new(); for reply in replies { - if let Ok(sample) = reply.sample { + if let Ok(sample) = reply.into_result() { samples.push(sample); } } diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 8a7823ed72..bd36652850 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -306,7 +306,7 @@ async fn net_event_handler(z: Arc, state: Arc) { let receiver = z.get(&qres).consolidation(qc).res().await.unwrap(); while let Ok(reply) = receiver.recv_async().await { - match reply.sample { + match reply.result() { Ok(sample) => { match bincode::deserialize_from::( sample.payload().reader(), diff --git a/zenoh-ext/src/lib.rs b/zenoh-ext/src/lib.rs index 7ac880fd8c..80d01d7846 100644 --- a/zenoh-ext/src/lib.rs +++ b/zenoh-ext/src/lib.rs @@ -61,6 +61,6 @@ pub trait ExtractSample { impl ExtractSample for Reply { fn extract(self) -> ZResult { - self.sample.map_err(|e| zerror!("{:?}", e).into()) + self.into_result().map_err(|e| zerror!("{:?}", e).into()) } } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 90b4b2af58..8de143fd8d 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -70,7 +70,7 @@ //! let session = zenoh::open(config::default()).res().await.unwrap(); //! let replies = session.get("key/expression").res().await.unwrap(); //! while let Ok(reply) = replies.recv_async().await { -//! println!(">> Received {:?}", reply.sample); +//! println!(">> Received {:?}", reply.result()); //! } //! } //! ``` diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 0b539ba636..e55b0a90dc 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -174,7 +174,7 @@ impl<'a> Liveliness<'a> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let replies = session.liveliness().get("key/expression").res().await.unwrap(); /// while let Ok(reply) = replies.recv_async().await { - /// if let Ok(sample) = reply.sample { + /// if let Ok(sample) = reply.result() { /// println!(">> Liveliness token {}", sample.key_expr()); /// } /// } @@ -606,7 +606,7 @@ where /// .await /// .unwrap(); /// while let Ok(token) = tokens.recv_async().await { -/// match token.sample { +/// match token.result() { /// Ok(sample) => println!("Alive token ('{}')", sample.key_expr().as_str()), /// Err(err) => println!("Received (ERROR: '{:?}')", err.payload()), /// } @@ -635,7 +635,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// let queryable = session /// .liveliness() /// .get("key/expression") - /// .callback(|reply| {println!("Received {:?}", reply.sample);}) + /// .callback(|reply| { println!("Received {:?}", reply.result()); }) /// .res() /// .await /// .unwrap(); @@ -710,7 +710,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// .await /// .unwrap(); /// while let Ok(reply) = replies.recv_async().await { - /// println!("Received {:?}", reply.sample); + /// println!("Received {:?}", reply.result()); /// } /// # } /// ``` diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index d089290326..901dcd18ae 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -77,10 +77,31 @@ impl Default for QueryConsolidation { #[non_exhaustive] #[derive(Clone, Debug)] pub struct Reply { - /// The result of this Reply. - pub sample: Result, - /// The id of the zenoh instance that answered this Reply. - pub replier_id: ZenohId, + pub(crate) result: Result, + pub(crate) replier_id: ZenohId, +} + +impl Reply { + /// Gets the a borrowed result of this `Reply`. Use [`Reply::into_result`] to take ownership of the result. + pub fn result(&self) -> Result<&Sample, &Value> { + self.result.as_ref() + } + + /// Converts this `Reply` into the its result. Use [`Reply::result`] it you don't want to take ownership. + pub fn into_result(self) -> Result { + self.result + } + + /// Gets the id of the zenoh instance that answered this Reply. + pub fn replier_id(&self) -> ZenohId { + self.replier_id + } +} + +impl From for Result { + fn from(value: Reply) -> Self { + value.into_result() + } } pub(crate) struct QueryState { @@ -110,7 +131,7 @@ pub(crate) struct QueryState { /// .await /// .unwrap(); /// while let Ok(reply) = replies.recv_async().await { -/// println!("Received {:?}", reply.sample) +/// println!("Received {:?}", reply.result()) /// } /// # } /// ``` @@ -209,7 +230,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let queryable = session /// .get("key/expression") - /// .callback(|reply| {println!("Received {:?}", reply.sample);}) + /// .callback(|reply| {println!("Received {:?}", reply.result());}) /// .res() /// .await /// .unwrap(); @@ -302,7 +323,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// .await /// .unwrap(); /// while let Ok(reply) = replies.recv_async().await { - /// println!("Received {:?}", reply.sample); + /// println!("Received {:?}", reply.result()); /// } /// # } /// ``` diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 29ad9c2b00..c73b791a96 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -784,7 +784,7 @@ impl Session { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let replies = session.get("key/expression").res().await.unwrap(); /// while let Ok(reply) = replies.recv_async().await { - /// println!(">> Received {:?}", reply.sample); + /// println!(">> Received {:?}", reply.result()); /// } /// # } /// ``` @@ -1653,7 +1653,7 @@ impl Session { } } (query.callback)(Reply { - sample: Err("Timeout".into()), + result: Err("Timeout".into()), replier_id: zid, }); } @@ -2171,7 +2171,7 @@ impl Primitives for Session { }; let new_reply = Reply { replier_id, - sample: Err(value), + result: Err(value), }; callback(new_reply); } @@ -2292,7 +2292,7 @@ impl Primitives for Session { attachment, ); let new_reply = Reply { - sample: Ok(sample), + result: Ok(sample), replier_id: ZenohId::rand(), // TODO }; let callback = @@ -2302,15 +2302,15 @@ impl Primitives for Session { } ConsolidationMode::Monotonic => { match query.replies.as_ref().unwrap().get( - new_reply.sample.as_ref().unwrap().key_expr.as_keyexpr(), + new_reply.result.as_ref().unwrap().key_expr.as_keyexpr(), ) { Some(reply) => { - if new_reply.sample.as_ref().unwrap().timestamp - > reply.sample.as_ref().unwrap().timestamp + if new_reply.result.as_ref().unwrap().timestamp + > reply.result.as_ref().unwrap().timestamp { query.replies.as_mut().unwrap().insert( new_reply - .sample + .result .as_ref() .unwrap() .key_expr @@ -2326,7 +2326,7 @@ impl Primitives for Session { None => { query.replies.as_mut().unwrap().insert( new_reply - .sample + .result .as_ref() .unwrap() .key_expr @@ -2340,15 +2340,15 @@ impl Primitives for Session { } Consolidation::Auto | ConsolidationMode::Latest => { match query.replies.as_ref().unwrap().get( - new_reply.sample.as_ref().unwrap().key_expr.as_keyexpr(), + new_reply.result.as_ref().unwrap().key_expr.as_keyexpr(), ) { Some(reply) => { - if new_reply.sample.as_ref().unwrap().timestamp - > reply.sample.as_ref().unwrap().timestamp + if new_reply.result.as_ref().unwrap().timestamp + > reply.result.as_ref().unwrap().timestamp { query.replies.as_mut().unwrap().insert( new_reply - .sample + .result .as_ref() .unwrap() .key_expr @@ -2361,7 +2361,7 @@ impl Primitives for Session { None => { query.replies.as_mut().unwrap().insert( new_reply - .sample + .result .as_ref() .unwrap() .key_expr diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 844e2985bc..df9ebcca2e 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -113,7 +113,7 @@ fn attachment_queries() { .res() .unwrap(); while let Ok(reply) = get.recv() { - let response = reply.sample.as_ref().unwrap(); + let response = reply.result().unwrap(); for (k, v) in response.attachment().unwrap().iter::<( [u8; std::mem::size_of::()], [u8; std::mem::size_of::()], diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 201f4941f9..9c807bd121 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -75,8 +75,8 @@ async fn zenoh_events() { .into_iter() .collect(); assert!(replies.len() == 1); - assert!(replies[0].sample.is_ok()); - let key_expr = replies[0].sample.as_ref().unwrap().key_expr().as_str(); + assert!(replies[0].result().is_ok()); + let key_expr = replies[0].result().unwrap().key_expr().as_str(); assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); let replies: Vec = ztimeout!(session @@ -86,8 +86,8 @@ async fn zenoh_events() { .into_iter() .collect(); assert!(replies.len() == 1); - assert!(replies[0].sample.is_ok()); - let key_expr = replies[0].sample.as_ref().unwrap().key_expr().as_str(); + assert!(replies[0].result().is_ok()); + let key_expr = replies[0].result().unwrap().key_expr().as_str(); assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); close_session(session2).await; diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 0e2870d808..fe6ac99571 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -53,7 +53,10 @@ async fn zenoh_liveliness() { .get("zenoh_liveliness_test") .res_async()) .unwrap(); - let sample = ztimeout!(replies.recv_async()).unwrap().sample.unwrap(); + let sample: Sample = ztimeout!(replies.recv_async()) + .unwrap() + .into_result() + .unwrap(); assert!(sample.kind() == SampleKind::Put); assert!(sample.key_expr().as_str() == "zenoh_liveliness_test"); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 56bacd7fdd..1323dc4b08 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -100,7 +100,7 @@ impl Task { replies = session.get(ke).timeout(Duration::from_secs(10)).res() => { let replies = replies?; while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.result() { Ok(sample) => { let recv_size = sample.payload().len(); if recv_size != *expected_size { diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 603ebdac49..ca67c450fd 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -193,7 +193,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let selector = format!("{}?ok_put", key_expr); let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - let s = s.sample.unwrap(); + let s = s.result().unwrap(); assert_eq!(s.kind(), SampleKind::Put); assert_eq!(s.payload().len(), size); cnt += 1; @@ -211,7 +211,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let selector = format!("{}?ok_del", key_expr); let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - let s = s.sample.unwrap(); + let s = s.result().unwrap(); assert_eq!(s.kind(), SampleKind::Delete); assert_eq!(s.payload().len(), 0); cnt += 1; @@ -229,7 +229,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let selector = format!("{}?err", key_expr); let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - let e = s.sample.unwrap_err(); + let e = s.result().unwrap_err(); assert_eq!(e.payload().len(), size); cnt += 1; } diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index f34704fb7e..78eded580c 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -234,7 +234,7 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { for _ in 0..msg_count { let rs = ztimeout!(s03.get(cke.clone()).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - assert_eq!(s.sample.unwrap().payload().len(), size); + assert_eq!(s.result().unwrap().payload().len(), size); cnt += 1; } } From 9d25e9838c4d35dcdd98fa3c9b9cf09a7c27e819 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 11:55:28 +0200 Subject: [PATCH 242/598] Remove accept_any_reply from selector --- zenoh/src/query.rs | 3 ++- zenoh/src/selector.rs | 7 +------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 2a94620237..14a30e983a 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -387,7 +387,8 @@ impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { pub fn accept_replies(self, accept: ReplyKeyExpr) -> Self { Self { selector: self.selector.map(|mut s| { - s.set_accept_any_keyexpr(accept == ReplyKeyExpr::Any); + s.parameters_mut() + .set_accept_any_keyexpr(accept == ReplyKeyExpr::Any); s }), ..self diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index fe5394ea8e..606e2022cd 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -135,11 +135,6 @@ impl<'a> Selector<'a> { self.parameters().time_range() } - #[cfg(any(feature = "unstable", test))] - pub(crate) fn set_accept_any_keyexpr>>(&mut self, anyke: T) { - self.parameters_mut().set_accept_any_keyexpr(anyke); - } - #[cfg(any(feature = "unstable", test))] pub(crate) fn accept_any_keyexpr(&self) -> ZResult> { self.parameters().accept_any_keyexpr() @@ -411,7 +406,7 @@ fn selector_accessors() { selector.parameters_mut().extend_from_iter(hm.iter()); assert_eq!(selector.parameters().get("_filter").unwrap(), ""); - selector.set_accept_any_keyexpr(true); + selector.parameters_mut().set_accept_any_keyexpr(true); println!("Parameters end: {}", selector.parameters()); for i in selector.parameters().iter() { From c8b3345e898d726ff8b3f151ae87c8595642d000 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 11:59:20 +0200 Subject: [PATCH 243/598] Remove accept_any_reply from parameters --- zenoh/src/query.rs | 5 +++-- zenoh/src/selector.rs | 21 +++------------------ 2 files changed, 6 insertions(+), 20 deletions(-) diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 14a30e983a..47ae8faef4 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -387,8 +387,9 @@ impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { pub fn accept_replies(self, accept: ReplyKeyExpr) -> Self { Self { selector: self.selector.map(|mut s| { - s.parameters_mut() - .set_accept_any_keyexpr(accept == ReplyKeyExpr::Any); + if accept == ReplyKeyExpr::Any { + s.parameters_mut().insert(_REPLY_KEY_EXPR_ANY_SEL_PARAM, ""); + } s }), ..self diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 606e2022cd..bad41b704a 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -228,23 +228,6 @@ impl Parameters<'_> { } } - #[cfg(any(feature = "unstable", test))] - pub(crate) fn set_accept_any_keyexpr>>(&mut self, anyke: T) { - use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; - - let mut anyke: Option = anyke.into(); - match anyke.take() { - Some(ak) => { - if ak { - self.0.insert(ANYKE, "") - } else { - self.0.insert(ANYKE, "false") - } - } - None => self.0.remove(ANYKE), - }; - } - #[cfg(any(feature = "unstable", test))] pub(crate) fn accept_any_keyexpr(&self) -> ZResult> { use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; @@ -374,6 +357,8 @@ impl<'a> From> for Selector<'a> { #[test] fn selector_accessors() { + use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; + let time_range = "[now(-2s)..now(2s)]".parse().unwrap(); for selector in [ "hello/there?_timetrick", @@ -406,7 +391,7 @@ fn selector_accessors() { selector.parameters_mut().extend_from_iter(hm.iter()); assert_eq!(selector.parameters().get("_filter").unwrap(), ""); - selector.parameters_mut().set_accept_any_keyexpr(true); + selector.parameters_mut().insert(ANYKE, ""); println!("Parameters end: {}", selector.parameters()); for i in selector.parameters().iter() { From f70143ce64491ef9fc9407bad04659436a9d78a8 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 12:02:43 +0200 Subject: [PATCH 244/598] Remove accept_any_keyexpr from selector and parameters --- zenoh/src/queryable.rs | 6 +++++- zenoh/src/selector.rs | 15 --------------- zenoh/src/session.rs | 5 ++++- 3 files changed, 9 insertions(+), 17 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 3f88c6dcd8..000a84d54d 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -222,7 +222,11 @@ impl Query { } #[cfg(feature = "unstable")] fn _accepts_any_replies(&self) -> ZResult { - Ok(self.parameters().accept_any_keyexpr()?.unwrap_or(false)) + use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; + + Ok(self + .parameters() + .contains_key(_REPLY_KEY_EXPR_ANY_SEL_PARAM)) } } diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index bad41b704a..4a26d4ae63 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -134,11 +134,6 @@ impl<'a> Selector<'a> { pub fn time_range(&self) -> ZResult> { self.parameters().time_range() } - - #[cfg(any(feature = "unstable", test))] - pub(crate) fn accept_any_keyexpr(&self) -> ZResult> { - self.parameters().accept_any_keyexpr() - } } /// A wrapper type to help decode zenoh selector parameters. @@ -227,16 +222,6 @@ impl Parameters<'_> { None => Ok(None), } } - - #[cfg(any(feature = "unstable", test))] - pub(crate) fn accept_any_keyexpr(&self) -> ZResult> { - use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; - - match self.0.get(ANYKE) { - Some(ak) => Ok(Some(ak.parse()?)), - None => Ok(None), - } - } } impl std::fmt::Debug for Selector<'_> { diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index ef1d56ac32..e01cbb2364 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -2189,7 +2189,10 @@ impl Primitives for Session { Some(query) => { let c = zcondfeat!( "unstable", - !matches!(query.selector.accept_any_keyexpr(), Ok(Some(true))), + !query + .selector + .parameters() + .contains_key(_REPLY_KEY_EXPR_ANY_SEL_PARAM), true ); if c && !query.selector.key_expr.intersects(&key_expr) { From 9a43b8322fe1a9f2f1ad5c91555c917ebe7ae649 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 12:05:36 +0200 Subject: [PATCH 245/598] Remove time_range from selector --- zenoh-ext/src/publication_cache.rs | 4 ++-- zenoh/src/selector.rs | 23 ++++++----------------- 2 files changed, 8 insertions(+), 19 deletions(-) diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index bbc90c0e8f..d11ef90537 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -208,7 +208,7 @@ impl<'a> PublicationCache<'a> { if !query.selector().key_expr().as_str().contains('*') { if let Some(queue) = cache.get(query.selector().key_expr().as_keyexpr()) { for sample in queue { - if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp()) { + if let (Ok(Some(time_range)), Some(timestamp)) = (query.parameters().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ continue; } @@ -222,7 +222,7 @@ impl<'a> PublicationCache<'a> { for (key_expr, queue) in cache.iter() { if query.selector().key_expr().intersects(unsafe{ keyexpr::from_str_unchecked(key_expr) }) { for sample in queue { - if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp()) { + if let (Ok(Some(time_range)), Some(timestamp)) = (query.parameters().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ continue; } diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 4a26d4ae63..21be85b49e 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -120,20 +120,6 @@ impl<'a> Selector<'a> { pub fn split(self) -> (KeyExpr<'a>, Parameters<'a>) { (self.key_expr, self.parameters) } - - #[zenoh_macros::unstable] - /// Sets the time range targeted by the selector. - pub fn set_time_range>>(&mut self, time_range: T) { - self.parameters_mut().set_time_range(time_range); - } - - #[zenoh_macros::unstable] - /// Extracts the standardized `_time` argument from the selector parameters. - /// - /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. - pub fn time_range(&self) -> ZResult> { - self.parameters().time_range() - } } /// A wrapper type to help decode zenoh selector parameters. @@ -216,7 +202,7 @@ impl Parameters<'_> { /// Extracts the standardized `_time` argument from the selector parameters. /// /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. - fn time_range(&self) -> ZResult> { + pub fn time_range(&self) -> ZResult> { match self.0.get(TIME_RANGE_KEY) { Some(tr) => Ok(Some(tr.parse()?)), None => Ok(None), @@ -360,8 +346,11 @@ fn selector_accessors() { assert_eq!(selector.parameters().get("_timetrick").unwrap(), ""); - selector.set_time_range(time_range); - assert_eq!(selector.time_range().unwrap().unwrap(), time_range); + selector.parameters_mut().set_time_range(time_range); + assert_eq!( + selector.parameters().time_range().unwrap().unwrap(), + time_range + ); assert!(selector.parameters().contains_key(TIME_RANGE_KEY)); let hm: HashMap<&str, &str> = HashMap::from(selector.parameters()); From 3ffded929820f6d28d1a1fb4afbfd092a86d0899 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 12:16:01 +0200 Subject: [PATCH 246/598] Improve docs --- zenoh/src/selector.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 21be85b49e..15ce36faa8 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -99,7 +99,7 @@ impl<'a> Selector<'a> { &mut self.parameters } - /// Sets the `parameters` part of this `Selector`. + /// Sets the parameters of this selector. This operation completly overwrites existing [`Parameters`]. #[inline(always)] pub fn set_parameters

(&mut self, parameters: P) where From 1bb65cc847729a9368cd50926c505738fbc00265 Mon Sep 17 00:00:00 2001 From: DenisBiryukov91 <155981813+DenisBiryukov91@users.noreply.github.com> Date: Tue, 16 Apr 2024 13:49:27 +0200 Subject: [PATCH 247/598] make receiver fields pub(crate) in Subscriber, Queryable, Scout and MatchingListener (#930) * make receiver fields pub(crate) in Subscriber, Quryable, Scout and MatchingListener * Add handler() and handler_mut() to Subscriber and Queryable * Fix conflict generic names --------- Co-authored-by: Luca Cominardi --- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 2 +- zenoh-ext/src/publication_cache.rs | 6 ++-- zenoh-ext/src/querying_subscriber.rs | 24 ++++++------- zenoh-ext/src/subscriber_ext.rs | 2 +- zenoh/src/handlers/mod.rs | 2 +- zenoh/src/liveliness.rs | 4 +-- zenoh/src/publication.rs | 2 +- zenoh/src/queryable.rs | 36 ++++++++++++++----- zenoh/src/scouting.rs | 2 +- zenoh/src/subscriber.rs | 32 ++++++++++++----- 10 files changed, 73 insertions(+), 39 deletions(-) diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index bb76005d6e..ac3e7bacfe 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -46,7 +46,7 @@ async fn main() { let queryable = session.declare_queryable(key).res().await.unwrap(); async_std::task::spawn({ - let receiver = queryable.receiver.clone(); + let receiver = queryable.handler().clone(); async move { while let Ok(request) = receiver.recv_async().await { request.reply(key, HTML).res().await.unwrap(); diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index d11ef90537..71b6e6b26b 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -160,8 +160,8 @@ impl<'a> PublicationCache<'a> { let queryable = queryable.res_sync()?; // take local ownership of stuff to be moved into task - let sub_recv = local_sub.receiver.clone(); - let quer_recv = queryable.receiver.clone(); + let sub_recv = local_sub.handler().clone(); + let quer_recv = queryable.handler().clone(); let pub_key_expr = key_expr.into_owned(); let resources_limit = conf.resources_limit; let history = conf.history; @@ -202,7 +202,7 @@ impl<'a> PublicationCache<'a> { } }, - // on query, reply with cach content + // on query, reply with cache content query = quer_recv.recv_async() => { if let Ok(query) = query { if !query.selector().key_expr().as_str().contains('*') { diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index d749a94ed9..090cc88ff0 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -614,38 +614,38 @@ where /// } /// # } /// ``` -pub struct FetchingSubscriber<'a, Receiver> { +pub struct FetchingSubscriber<'a, Handler> { subscriber: Subscriber<'a, ()>, callback: Arc, state: Arc>, - receiver: Receiver, + handler: Handler, } -impl std::ops::Deref for FetchingSubscriber<'_, Receiver> { - type Target = Receiver; +impl std::ops::Deref for FetchingSubscriber<'_, Handler> { + type Target = Handler; fn deref(&self) -> &Self::Target { - &self.receiver + &self.handler } } -impl std::ops::DerefMut for FetchingSubscriber<'_, Receiver> { +impl std::ops::DerefMut for FetchingSubscriber<'_, Handler> { fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.receiver + &mut self.handler } } -impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { +impl<'a, Handler> FetchingSubscriber<'a, Handler> { fn new< KeySpace, - Handler, + InputHandler, Fetch: FnOnce(Box) -> ZResult<()> + Send + Sync, TryIntoSample, >( - conf: FetchingSubscriberBuilder<'a, 'a, KeySpace, Handler, Fetch, TryIntoSample>, + conf: FetchingSubscriberBuilder<'a, 'a, KeySpace, InputHandler, Fetch, TryIntoSample>, ) -> ZResult where KeySpace: Into, - Handler: IntoHandler<'static, Sample, Handler = Receiver> + Send, + InputHandler: IntoHandler<'static, Sample, Handler = Handler> + Send, TryIntoSample: ExtractSample + Send + Sync, { let state = Arc::new(Mutex::new(InnerState { @@ -698,7 +698,7 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { subscriber, callback, state, - receiver, + handler: receiver, }; // run fetch diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 7d77fac05b..4b6346bcf8 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -38,7 +38,7 @@ where { type Output = Forward, fn(Sample) -> Result>, S>; fn forward(&'a mut self, sink: S) -> Self::Output { - futures::StreamExt::forward(futures::StreamExt::map(self.receiver.stream(), Ok), sink) + futures::StreamExt::forward(futures::StreamExt::map(self.stream(), Ok), sink) } } diff --git a/zenoh/src/handlers/mod.rs b/zenoh/src/handlers/mod.rs index 627c166795..2abd5b134a 100644 --- a/zenoh/src/handlers/mod.rs +++ b/zenoh/src/handlers/mod.rs @@ -26,7 +26,7 @@ use crate::API_DATA_RECEPTION_CHANNEL_SIZE; /// An alias for `Arc`. pub type Dyn = std::sync::Arc; -/// A type that can be converted into a [`Callback`]-handler pair. +/// A type that can be converted into a [`Callback`]-Handler pair. /// /// When Zenoh functions accept types that implement these, it intends to use the [`Callback`] as just that, /// while granting you access to the handler through the returned value via [`std::ops::Deref`] and [`std::ops::DerefMut`]. diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index e55b0a90dc..33022debbe 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -554,7 +554,7 @@ where fn res_sync(self) -> ::To { let key_expr = self.key_expr?; let session = self.session; - let (callback, receiver) = self.handler.into_handler(); + let (callback, handler) = self.handler.into_handler(); session .declare_subscriber_inner( &key_expr, @@ -569,7 +569,7 @@ where state: sub_state, alive: true, }, - receiver, + handler, }) } } diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index e3d43993f3..afe61cb3c4 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -1402,7 +1402,7 @@ impl<'a> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListene #[zenoh_macros::unstable] pub struct MatchingListener<'a, Receiver> { pub(crate) listener: MatchingListenerInner<'a>, - pub receiver: Receiver, + pub(crate) receiver: Receiver, } #[zenoh_macros::unstable] diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 000a84d54d..755e0364af 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -32,7 +32,7 @@ use crate::{ }; use std::fmt; use std::future::Ready; -use std::ops::Deref; +use std::ops::{Deref, DerefMut}; use std::sync::Arc; use uhlc::Timestamp; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; @@ -844,12 +844,12 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { /// ``` #[non_exhaustive] #[derive(Debug)] -pub struct Queryable<'a, Receiver> { +pub struct Queryable<'a, Handler> { pub(crate) queryable: CallbackQueryable<'a>, - pub receiver: Receiver, + pub(crate) handler: Handler, } -impl<'a, Receiver> Queryable<'a, Receiver> { +impl<'a, Handler> Queryable<'a, Handler> { /// Returns the [`EntityGlobalId`] of this Queryable. /// /// # Examples @@ -874,6 +874,20 @@ impl<'a, Receiver> Queryable<'a, Receiver> { } } + /// Returns a reference to this queryable's handler. + /// An handler is anything that implements [`IntoHandler`]. + /// The default handler is [`DefaultHandler`]. + pub fn handler(&self) -> &Handler { + &self.handler + } + + /// Returns a mutable reference to this queryable's handler. + /// An handler is anything that implements [`IntoHandler`]. + /// The default handler is [`DefaultHandler`]. + pub fn handler_mut(&mut self) -> &mut Handler { + &mut self.handler + } + #[inline] pub fn undeclare(self) -> impl Resolve> + 'a { Undeclarable::undeclare_inner(self, ()) @@ -886,11 +900,17 @@ impl<'a, T> Undeclarable<(), QueryableUndeclaration<'a>> for Queryable<'a, T> { } } -impl Deref for Queryable<'_, Receiver> { - type Target = Receiver; +impl Deref for Queryable<'_, Handler> { + type Target = Handler; fn deref(&self) -> &Self::Target { - &self.receiver + self.handler() + } +} + +impl DerefMut for Queryable<'_, Handler> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.handler_mut() } } @@ -923,7 +943,7 @@ where state: qable_state, alive: true, }, - receiver, + handler: receiver, }) } } diff --git a/zenoh/src/scouting.rs b/zenoh/src/scouting.rs index 41d0401d56..072c8dee8b 100644 --- a/zenoh/src/scouting.rs +++ b/zenoh/src/scouting.rs @@ -269,7 +269,7 @@ impl fmt::Debug for ScoutInner { #[derive(Debug)] pub struct Scout { pub(crate) scout: ScoutInner, - pub receiver: Receiver, + pub(crate) receiver: Receiver, } impl Deref for Scout { diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 47d41ebb1f..ded2a1acdc 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -394,7 +394,7 @@ where state: sub_state, alive: true, }, - receiver, + handler: receiver, }) } } @@ -440,12 +440,12 @@ where /// ``` #[non_exhaustive] #[derive(Debug)] -pub struct Subscriber<'a, Receiver> { +pub struct Subscriber<'a, Handler> { pub(crate) subscriber: SubscriberInner<'a>, - pub receiver: Receiver, + pub(crate) handler: Handler, } -impl<'a, Receiver> Subscriber<'a, Receiver> { +impl<'a, Handler> Subscriber<'a, Handler> { /// Returns the [`EntityGlobalId`] of this Subscriber. /// /// # Examples @@ -475,6 +475,20 @@ impl<'a, Receiver> Subscriber<'a, Receiver> { &self.subscriber.state.key_expr } + /// Returns a reference to this subscriber's handler. + /// An handler is anything that implements [`IntoHandler`]. + /// The default handler is [`DefaultHandler`]. + pub fn handler(&self) -> &Handler { + &self.handler + } + + /// Returns a mutable reference to this subscriber's handler. + /// An handler is anything that implements [`IntoHandler`]. + /// The default handler is [`DefaultHandler`]. + pub fn handler_mut(&mut self) -> &mut Handler { + &mut self.handler + } + /// Close a [`Subscriber`]. /// /// Subscribers are automatically closed when dropped, but you may want to use this function to handle errors or @@ -506,16 +520,16 @@ impl<'a, T> Undeclarable<(), SubscriberUndeclaration<'a>> for Subscriber<'a, T> } } -impl Deref for Subscriber<'_, Receiver> { - type Target = Receiver; +impl Deref for Subscriber<'_, Handler> { + type Target = Handler; fn deref(&self) -> &Self::Target { - &self.receiver + self.handler() } } -impl DerefMut for Subscriber<'_, Receiver> { +impl DerefMut for Subscriber<'_, Handler> { fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.receiver + self.handler_mut() } } From a0db071832ba0bbf2cf0ebac7e501a97785c58ee Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 16 Apr 2024 14:50:13 +0200 Subject: [PATCH 248/598] compilation fixes --- Cargo.lock | 3 +-- commons/zenoh-codec/src/network/interest.rs | 4 ++-- examples/Cargo.toml | 1 - plugins/zenoh-plugin-example/src/lib.rs | 1 - .../src/replica/aligner.rs | 6 +++--- .../src/replica/storage.rs | 8 ++++---- zenoh/src/admin.rs | 4 ++-- zenoh/src/handlers/callback.rs | 2 +- zenoh/src/handlers/fifo.rs | 2 +- zenoh/src/handlers/ring.rs | 2 +- zenoh/src/net/primitives/mux.rs | 6 +++--- zenoh/src/net/routing/dispatcher/pubsub.rs | 6 +++--- zenoh/src/net/routing/dispatcher/queries.rs | 4 ++-- zenoh/src/sample/mod.rs | 2 +- zenoh/src/session.rs | 10 ---------- 15 files changed, 24 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0e6a795b6b..754177e657 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2334,7 +2334,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -5113,7 +5113,6 @@ dependencies = [ "clap", "flume", "futures", - "log", "phf", "serde", "serde_cbor", diff --git a/commons/zenoh-codec/src/network/interest.rs b/commons/zenoh-codec/src/network/interest.rs index 9d1e64de76..852e106f98 100644 --- a/commons/zenoh-codec/src/network/interest.rs +++ b/commons/zenoh-codec/src/network/interest.rs @@ -24,8 +24,8 @@ use zenoh_protocol::{ core::WireExpr, network::{ declare, id, - interest::{self, Interest, InterestMode, InterestOptions}, - Mapping, + interest::{self, InterestMode, InterestOptions}, + Interest, Mapping, }, }; diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 968ddcd99d..2027133a1e 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -48,7 +48,6 @@ flume = { workspace = true } futures = { workspace = true } git-version = { workspace = true } json5 = { workspace = true } -log = { workspace = true } zenoh-collections = { workspace = true } tracing = { workspace = true } zenoh = { workspace = true, default-features = true } diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 35ce3f6e8f..c2112e28b2 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -14,7 +14,6 @@ #![recursion_limit = "256"] use futures::select; -use log::{debug, info}; use std::borrow::Cow; use std::collections::HashMap; use std::convert::TryFrom; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index a845f7065a..68387b6596 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -110,7 +110,7 @@ impl Aligner { .encoding(value.encoding().clone()) .timestamp(ts) .into(); - log::debug!("[ALIGNER] Adding {:?} to storage", sample); + tracing::debug!("[ALIGNER] Adding {:?} to storage", sample); self.tx_sample.send_async(sample).await.unwrap_or_else(|e| { tracing::error!("[ALIGNER] Error adding sample to storage: {}", e) }); @@ -343,7 +343,7 @@ impl Aligner { } Err(err) => { tracing::error!( - "[ALIGNER] Received error for query on selector {} :{}", + "[ALIGNER] Received error for query on selector {} :{:?}", selector, err ); @@ -353,7 +353,7 @@ impl Aligner { } } Err(err) => { - tracing::error!("[ALIGNER] Query failed on selector `{}`: {}", selector, err); + tracing::error!("[ALIGNER] Query failed on selector `{}`: {:?}", selector, err); no_err = false; } }; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 610c06bea2..3c218fa85a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -180,7 +180,7 @@ impl StorageService { // log error if the sample is not timestamped // This is to reduce down the line inconsistencies of having duplicate samples stored if sample.timestamp().is_none() { - tracing::error!("Sample {} is not timestamped. Please timestamp samples meant for replicated storage.", sample); + tracing::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { self.process_sample(sample).await; @@ -263,7 +263,7 @@ impl StorageService { // The storage should only simply save the key, sample pair while put and retrieve the same during get // the trimming during PUT and GET should be handled by the plugin async fn process_sample(&self, sample: Sample) { - tracing::trace!("[STORAGE] Processing sample: {}", sample); + tracing::trace!("[STORAGE] Processing sample: {:?}", sample); // if wildcard, update wildcard_updates if sample.key_expr().is_wild() { self.register_wildcard_update(sample.clone()).await; @@ -289,7 +289,7 @@ impl StorageService { && self.is_latest(&k, sample.timestamp().unwrap()).await)) { tracing::trace!( - "Sample `{}` identified as neded processing for key {}", + "Sample `{:?}` identified as neded processing for key {}", sample, k ); @@ -656,7 +656,7 @@ impl StorageService { self.process_sample(sample).await; } Err(e) => tracing::warn!( - "Storage '{}' received an error to align query: {}", + "Storage '{}' received an error to align query: {:?}", self.name, e ), diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 3c76ca468a..5cf4b68b05 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -72,7 +72,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { Ok(zbuf) => { let _ = query.reply(key_expr, zbuf).res_sync(); } - Err(e) => log::debug!("Admin query error: {}", e), + Err(e) => tracing::debug!("Admin query error: {}", e), } } } @@ -89,7 +89,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { Ok(zbuf) => { let _ = query.reply(key_expr, zbuf).res_sync(); } - Err(e) => log::debug!("Admin query error: {}", e), + Err(e) => tracing::debug!("Admin query error: {}", e), } } } diff --git a/zenoh/src/handlers/callback.rs b/zenoh/src/handlers/callback.rs index 21c1b0878c..4f49e7c41f 100644 --- a/zenoh/src/handlers/callback.rs +++ b/zenoh/src/handlers/callback.rs @@ -43,7 +43,7 @@ impl IntoHandler<'static, T> for (flume::Sender, flume::Re ( Dyn::new(move |t| { if let Err(e) = sender.send(t) { - log::error!("{}", e) + tracing::error!("{}", e) } }), receiver, diff --git a/zenoh/src/handlers/fifo.rs b/zenoh/src/handlers/fifo.rs index 0fa3ab304c..f0ae1a5257 100644 --- a/zenoh/src/handlers/fifo.rs +++ b/zenoh/src/handlers/fifo.rs @@ -52,7 +52,7 @@ impl IntoHandler<'static, T> ( Dyn::new(move |t| { if let Err(e) = sender.send(t) { - log::error!("{}", e) + tracing::error!("{}", e) } }), receiver, diff --git a/zenoh/src/handlers/ring.rs b/zenoh/src/handlers/ring.rs index 341a3efadd..23347f249e 100644 --- a/zenoh/src/handlers/ring.rs +++ b/zenoh/src/handlers/ring.rs @@ -108,7 +108,7 @@ impl IntoHandler<'static, T> for RingChannel { drop(g); let _ = sender.try_send(()); } - Err(e) => log::error!("{}", e), + Err(e) => tracing::error!("{}", e), }), receiver, ) diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index 365f390461..8589fab518 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -51,7 +51,7 @@ impl Primitives for Mux { let _ = self.handler.schedule(msg); } else if let Some(face) = self.face.get() { let Some(face) = face.upgrade() else { - log::debug!("Invalid face: {:?}. Interest not sent: {:?}", face, msg); + tracing::debug!("Invalid face: {:?}. Interest not sent: {:?}", face, msg); return; }; let ctx = RoutingContext::new_out(msg, face.clone()); @@ -65,7 +65,7 @@ impl Primitives for Mux { let _ = self.handler.schedule(ctx.msg); } } else { - log::debug!("Uninitialized multiplexer. Interest not sent: {:?}", msg); + tracing::debug!("Uninitialized multiplexer. Interest not sent: {:?}", msg); } } @@ -365,7 +365,7 @@ impl Primitives for McastMux { let _ = self.handler.schedule(ctx.msg); } } else { - log::error!("Uninitialized multiplexer!"); + tracing::error!("Uninitialized multiplexer!"); } } diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 7cefd3f455..fe2274ed64 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -43,7 +43,7 @@ pub(crate) fn declare_subscription( .cloned() { Some(mut prefix) => { - log::debug!( + tracing::debug!( "{} Declare subscriber {} ({}{})", face, id, @@ -114,7 +114,7 @@ pub(crate) fn undeclare_subscription( Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { Some(res) => Some(res), None => { - log::error!( + tracing::error!( "{} Undeclare unknown subscriber {}{}!", face, prefix.expr(), @@ -124,7 +124,7 @@ pub(crate) fn undeclare_subscription( } }, None => { - log::error!( + tracing::error!( "{} Undeclare subscriber with unknown scope {}", face, expr.scope diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 202fed9681..cd17f1339f 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -125,7 +125,7 @@ pub(crate) fn undeclare_queryable( Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { Some(res) => Some(res), None => { - log::error!( + tracing::error!( "{} Undeclare unknown queryable {}{}!", face, prefix.expr(), @@ -135,7 +135,7 @@ pub(crate) fn undeclare_queryable( } }, None => { - log::error!( + tracing::error!( "{} Undeclare queryable with unknown scope {}", face, expr.scope diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index 7bb3fe9cde..b8fc62be57 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -435,7 +435,7 @@ impl QoS { match Priority::try_from(self.inner.get_priority()) { Ok(p) => p, Err(e) => { - log::trace!( + tracing::trace!( "Failed to convert priority: {}; replacing with default value", e.to_string() ); diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 2658d39d42..875c72a395 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -2158,16 +2158,6 @@ impl Primitives for Session { fn send_response(&self, msg: Response) { trace!("recv Response {:?}", msg); match msg.payload { - ResponseBody::Ack(_) => { - tracing::warn!( - "Received a ResponseBody::Ack, but this isn't supported yet. Dropping message." - ) - } - ResponseBody::Put(_) => { - tracing::warn!( - "Received a ResponseBody::Put, but this isn't supported yet. Dropping message." - ) - } ResponseBody::Err(e) => { let mut state = zwrite!(self.state); match state.queries.get_mut(&msg.rid) { From 1c3eb8eeb0b32b111d8cd7e81eed22e903433845 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 16 Apr 2024 15:54:32 +0200 Subject: [PATCH 249/598] compile fix --- io/zenoh-transport/tests/unicast_time.rs | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs index 75d3ae1d98..0c35dc1f79 100644 --- a/io/zenoh-transport/tests/unicast_time.rs +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -230,7 +230,7 @@ async fn time_lowlatency_transport(endpoint: &EndPoint) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_tcp_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13000).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -239,7 +239,7 @@ async fn time_tcp_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_tcp_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13100).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -248,7 +248,7 @@ async fn time_tcp_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_udp_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13010).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -257,7 +257,7 @@ async fn time_udp_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_udp_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13110).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -266,7 +266,7 @@ async fn time_udp_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_ws_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13020).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -275,7 +275,7 @@ async fn time_ws_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_ws_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13120).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -284,7 +284,7 @@ async fn time_ws_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unixpipe_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "unixpipe/time_unixpipe_only".parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -293,7 +293,7 @@ async fn time_unixpipe_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unixpipe_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "unixpipe/time_unixpipe_only_with_lowlatency_transport" .parse() .unwrap(); @@ -304,7 +304,7 @@ async fn time_unixpipe_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unix_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let f1 = "zenoh-test-unix-socket-9.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); @@ -318,8 +318,7 @@ async fn time_unix_only() { #[ignore] async fn time_tls_only() { use zenoh_link::tls::config::*; - - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key @@ -515,7 +514,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_vsock_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); time_lowlatency_transport(&endpoint).await; } From 2be12212e1b5f023f0c0b1c52190983ec10ce5a4 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 16 Apr 2024 19:24:05 +0200 Subject: [PATCH 250/598] compilation fixes --- Cargo.lock | 2 ++ plugins/zenoh-plugin-rest/src/lib.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh-ext/src/publication_cache.rs | 1 - zenoh/src/api/builders/publication.rs | 2 +- zenoh/src/api/builders/sample.rs | 2 -- zenoh/src/api/publication.rs | 2 +- zenoh/src/api/queryable.rs | 9 +++++++-- zenoh/src/api/selector.rs | 5 ++++- zenoh/src/api/session.rs | 19 +++++++++++-------- zenoh/src/lib.rs | 3 ++- 11 files changed, 30 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bcd0301bc3..9848e366be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5122,6 +5122,7 @@ dependencies = [ "tokio", "tracing", "zenoh", + "zenoh-util", ] [[package]] @@ -5471,6 +5472,7 @@ dependencies = [ "urlencoding", "zenoh", "zenoh-plugin-trait", + "zenoh-util", "zenoh_backend_traits", ] diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 5d3c0f5c79..ed48580920 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -37,7 +37,7 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; use zenoh::sample::{Sample, SampleKind, ValueBuilderTrait}; -use zenoh::selector::{Parameters, Selector, TIME_RANGE_KEY}; +use zenoh::selector::{Selector, TIME_RANGE_KEY}; use zenoh::session::{Session, SessionDeclarations}; use zenoh::value::Value; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 2733850036..ef6e00d63f 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -24,12 +24,12 @@ use std::ops::Add; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; -use tokio::task::JoinHandle; use zenoh::core::AsyncResolve; use zenoh::core::Error as ZError; use zenoh::core::Result as ZResult; use zenoh::internal::bail; use zenoh::internal::Condition; +use zenoh::internal::TaskController; use zenoh::key_expr::keyexpr; use zenoh::key_expr::KeyExpr; use zenoh::key_expr::OwnedKeyExpr; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index afd567a8c9..b6a380d766 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -22,7 +22,6 @@ use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; use zenoh::queryable::{Query, Queryable}; use zenoh::runtime::ZRuntime; use zenoh::sample::{Locality, Sample}; -use zenoh::selector::Parameters; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::FlumeSubscriber; use zenoh::{core::Result as ZResult, internal::bail}; diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index 9a95317488..c710d0ad79 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -376,7 +376,7 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { is_express: self.is_express, destination: self.destination, }; - log::trace!("publish({:?})", publisher.key_expr); + tracing::trace!("publish({:?})", publisher.key_expr); Ok(publisher) } } diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 55a028f687..3b1bd642cd 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -18,8 +18,6 @@ use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; use crate::api::payload::Payload; use crate::api::publication::Priority; -#[cfg(feature = "unstable")] -use crate::api::sample::Attachment; use crate::api::sample::QoS; use crate::api::sample::QoSBuilder; use crate::api::sample::Sample; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 1f90ce422b..ed8422a75e 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -540,7 +540,7 @@ impl Publisher<'_> { #[cfg(feature = "unstable")] source_info: SourceInfo, #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { - log::trace!("write({:?}, [...])", &self.key_expr); + tracing::trace!("write({:?}, [...])", &self.key_expr); let primitives = zread!(self.session.state) .primitives .as_ref() diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index b4dd7b51ac..942022a510 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -25,7 +25,12 @@ use super::{ Id, }; use crate::net::primitives::Primitives; -use std::{fmt, future::Ready, ops::Deref, sync::Arc}; +use std::{ + fmt, + future::Ready, + ops::{Deref, DerefMut}, + sync::Arc, +}; use uhlc::Timestamp; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_protocol::{ @@ -213,7 +218,7 @@ impl Query { } #[cfg(feature = "unstable")] fn _accepts_any_replies(&self) -> ZResult { - use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; + use crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; Ok(self .parameters() diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 0e6881f526..1e2218d46d 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -20,7 +20,10 @@ use std::{ ops::{Deref, DerefMut}, str::FromStr, }; -use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; +use zenoh_protocol::core::{ + key_expr::{keyexpr, OwnedKeyExpr}, + Properties, +}; #[cfg(feature = "unstable")] use zenoh_result::ZResult; #[cfg(feature = "unstable")] diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 337c014597..412b4a2f6d 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -25,13 +25,15 @@ use super::{ query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, - selector::{Parameters, Selector, TIME_RANGE_KEY}, + selector::{Selector, TIME_RANGE_KEY}, subscriber::{SubscriberBuilder, SubscriberState}, value::Value, Id, }; -use crate::net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}; -use log::{error, trace, warn}; +use crate::{ + api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, + net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}, +}; use std::{ collections::HashMap, convert::{TryFrom, TryInto}, @@ -44,6 +46,7 @@ use std::{ }, time::Duration, }; +use tracing::{error, trace, warn}; use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; @@ -79,11 +82,11 @@ use zenoh_util::core::AsyncResolve; #[cfg(feature = "unstable")] use super::{ - liveliness::{Liveliness, LivelinessTokenState}, - publication::Publisher, - publication::{MatchingListenerState, MatchingStatus}, - sample::{Attachment, SourceInfo}, - }; + liveliness::{Liveliness, LivelinessTokenState}, + publication::Publisher, + publication::{MatchingListenerState, MatchingStatus}, + sample::{Attachment, SourceInfo}, +}; zconfigurable! { pub(crate) static ref API_DATA_RECEPTION_CHANNEL_SIZE: usize = 256; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 15896bb04f..8f7645a965 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -239,10 +239,10 @@ pub mod payload { /// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries pub mod selector { - pub use crate::api::selector::Parameter; pub use crate::api::selector::Parameters; pub use crate::api::selector::Selector; pub use crate::api::selector::TIME_RANGE_KEY; + pub use zenoh_protocol::core::Properties; pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; } @@ -351,6 +351,7 @@ pub mod internal { pub use zenoh_macros::unstable; pub use zenoh_result::bail; pub use zenoh_sync::Condition; + pub use zenoh_task::TaskController; pub use zenoh_task::TerminatableTask; pub use zenoh_util::core::ResolveFuture; pub use zenoh_util::LibLoader; From 334946c01b1bed73a4dfcf3377b31d4875de2448 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 16 Apr 2024 19:28:16 +0200 Subject: [PATCH 251/598] compilation fix --- zenoh/src/api/selector.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 1e2218d46d..9891726287 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -329,7 +329,7 @@ impl<'a> From> for Selector<'a> { #[test] fn selector_accessors() { - use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; + use crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; let time_range = "[now(-2s)..now(2s)]".parse().unwrap(); for selector in [ From f6aebd437731361a107af3f0c45044de75d121f5 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 16 Apr 2024 19:32:45 +0200 Subject: [PATCH 252/598] cargo fmt --- plugins/zenoh-plugin-example/src/lib.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs | 6 +++++- zenoh/src/api/encoding.rs | 1 - zenoh/src/api/scouting.rs | 1 - zenoh/tests/events.rs | 2 +- zenoh/tests/routing.rs | 2 +- zenohd/src/main.rs | 2 +- 7 files changed, 9 insertions(+), 7 deletions(-) diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 2b4c3b8b95..3a14525cd7 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -21,8 +21,8 @@ use std::sync::{ atomic::{AtomicBool, Ordering::Relaxed}, Arc, Mutex, }; -use zenoh::key_expr::{keyexpr, KeyExpr}; use tracing::{debug, info}; +use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::runtime::Runtime; use zenoh::sample::Sample; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 5322a819d3..92c743d512 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -354,7 +354,11 @@ impl Aligner { } } Err(err) => { - tracing::error!("[ALIGNER] Query failed on selector `{}`: {:?}", selector, err); + tracing::error!( + "[ALIGNER] Query failed on selector `{}`: {:?}", + selector, + err + ); no_err = false; } }; diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index aba01e01b4..3283ec1a84 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -850,4 +850,3 @@ impl EncodingMapping for SharedMemoryBuf { } pub struct EncodingBuilder(Encoding); - diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 4e0ad50b0b..2b0022f242 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -376,4 +376,3 @@ where handler: DefaultHandler::default(), } } - diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index b2c6ef862f..3f18b027a8 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::prelude::r#async::*; use zenoh::internal::ztimeout; +use zenoh::prelude::r#async::*; const TIMEOUT: Duration = Duration::from_secs(10); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 9c5ee527d5..dea8870905 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -16,9 +16,9 @@ use std::sync::atomic::Ordering; use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; +use zenoh::core::Result; use zenoh::internal::{bail, ztimeout}; use zenoh::prelude::r#async::*; -use zenoh::core::Result; const TIMEOUT: Duration = Duration::from_secs(10); const MSG_COUNT: usize = 50; diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 4e56fbfbfb..6867ac6a0b 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -15,10 +15,10 @@ use clap::Parser; use futures::future; use git_version::git_version; use std::collections::HashSet; -use zenoh::config::EndPoint; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; +use zenoh::config::EndPoint; use zenoh::config::{Config, ModeDependentValue, PermissionsConf, PluginLoad, ValidatedMap}; use zenoh::core::Result; use zenoh::plugins::PluginsManager; From cc7b3529ab94696f88e15fe0d87d2ddb6c0ea2de Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 10:06:58 +0200 Subject: [PATCH 253/598] Merge main --- zenoh-ext/src/group.rs | 1 - zenoh/tests/acl.rs | 44 +++++++++++++++++++----------------------- 2 files changed, 20 insertions(+), 25 deletions(-) diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index a60635becf..0926c63d33 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -24,7 +24,6 @@ use std::ops::Add; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; -use tokio::task::JoinHandle; use zenoh::payload::PayloadReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index fec29515db..c099fa021e 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -97,7 +97,7 @@ mod test { .declare_subscriber(KEY_EXPR) .callback(move |sample| { let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); + *temp_value = sample.payload().deserialize::().unwrap(); }) .res_async() .await @@ -142,7 +142,7 @@ mod test { .declare_subscriber(KEY_EXPR) .callback(move |sample| { let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); + *temp_value = sample.payload().deserialize::().unwrap(); }) .res_async()) .unwrap(); @@ -203,7 +203,7 @@ mod test { .declare_subscriber(KEY_EXPR) .callback(move |sample| { let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); + *temp_value = sample.payload().deserialize::().unwrap(); }) .res_async()) .unwrap(); @@ -263,7 +263,7 @@ mod test { .declare_subscriber(KEY_EXPR) .callback(move |sample| { let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); + *temp_value = sample.payload().deserialize::().unwrap(); }) .res_async()) .unwrap(); @@ -307,10 +307,9 @@ mod test { let qbl = ztimeout!(qbl_session .declare_queryable(KEY_EXPR) .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() }); }); }) @@ -320,12 +319,12 @@ mod test { tokio::time::sleep(SLEEP).await; let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { + match reply.result() { Ok(sample) => { - received_value = sample.value.to_string(); + received_value = sample.payload().deserialize::().unwrap(); break; } - Err(e) => println!("Error : {}", e), + Err(e) => println!("Error : {:?}", e), } } tokio::time::sleep(SLEEP).await; @@ -363,10 +362,9 @@ mod test { let qbl = ztimeout!(qbl_session .declare_queryable(KEY_EXPR) .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() }); }); }) @@ -376,12 +374,12 @@ mod test { tokio::time::sleep(SLEEP).await; let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { + match reply.result() { Ok(sample) => { - received_value = sample.value.to_string(); + received_value = sample.payload().deserialize::().unwrap(); break; } - Err(e) => println!("Error : {}", e), + Err(e) => println!("Error : {:?}", e), } } tokio::time::sleep(SLEEP).await; @@ -434,10 +432,9 @@ mod test { let qbl = ztimeout!(qbl_session .declare_queryable(KEY_EXPR) .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() }); }); }) @@ -447,12 +444,12 @@ mod test { tokio::time::sleep(SLEEP).await; let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { + match reply.result() { Ok(sample) => { - received_value = sample.value.to_string(); + received_value = sample.payload().deserialize::().unwrap(); break; } - Err(e) => println!("Error : {}", e), + Err(e) => println!("Error : {:?}", e), } } tokio::time::sleep(SLEEP).await; @@ -504,10 +501,9 @@ mod test { let qbl = ztimeout!(qbl_session .declare_queryable(KEY_EXPR) .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() }); }); }) @@ -517,12 +513,12 @@ mod test { tokio::time::sleep(SLEEP).await; let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { + match reply.result() { Ok(sample) => { - received_value = sample.value.to_string(); + received_value = sample.payload().deserialize::().unwrap(); break; } - Err(e) => println!("Error : {}", e), + Err(e) => println!("Error : {:?}", e), } } tokio::time::sleep(SLEEP).await; From 8a09bf90f42e543d04e2c8a6f03f5db2169baf41 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 10:11:46 +0200 Subject: [PATCH 254/598] cargo fmt --all --- plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 68387b6596..c1dc1196bf 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -353,7 +353,11 @@ impl Aligner { } } Err(err) => { - tracing::error!("[ALIGNER] Query failed on selector `{}`: {:?}", selector, err); + tracing::error!( + "[ALIGNER] Query failed on selector `{}`: {:?}", + selector, + err + ); no_err = false; } }; From a675130d99d5da5dcbd54b915f45e5d35c581322 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 10:14:10 +0200 Subject: [PATCH 255/598] Use tracing in examples --- examples/examples/z_sub_shm.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 630876f287..f89df5ee60 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -20,7 +20,7 @@ use zenoh_shm::SharedMemoryBuf; #[tokio::main] async fn main() { // Initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (mut config, key_expr) = parse_args(); From 72bcead175f3c874736bcaaa11aa686cb313dc66 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 10:21:23 +0200 Subject: [PATCH 256/598] Rename Payload to ZBytes --- examples/examples/z_ping.rs | 2 +- examples/examples/z_pub_thr.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 4 +- .../src/replica/align_queryable.rs | 2 +- .../src/replica/aligner.rs | 2 +- .../src/replica/mod.rs | 2 +- .../tests/operations.rs | 2 +- .../tests/wildcard.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh/src/admin.rs | 6 +- zenoh/src/{payload.rs => bytes.rs} | 498 +++++++++--------- zenoh/src/encoding.rs | 4 +- zenoh/src/lib.rs | 2 +- zenoh/src/net/runtime/adminspace.rs | 12 +- zenoh/src/prelude.rs | 4 +- zenoh/src/publication.rs | 18 +- zenoh/src/query.rs | 8 +- zenoh/src/queryable.rs | 16 +- zenoh/src/sample/builder.rs | 18 +- zenoh/src/sample/mod.rs | 18 +- zenoh/src/session.rs | 4 +- zenoh/src/value.rs | 12 +- 22 files changed, 320 insertions(+), 320 deletions(-) rename zenoh/src/{payload.rs => bytes.rs} (73%) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 1af30bfb10..f07c1eac66 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -39,7 +39,7 @@ fn main() { .res() .unwrap(); - let data: Payload = (0usize..size) + let data: ZBytes = (0usize..size) .map(|i| (i % 10) as u8) .collect::>() .into(); diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 41c5179b63..945a871094 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -30,7 +30,7 @@ fn main() { let payload_size = args.payload_size; - let data: Payload = (0..payload_size) + let data: ZBytes = (0..payload_size) .map(|i| (i % 10) as u8) .collect::>() .into(); diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index c9397aa4a9..82f02356d4 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -29,7 +29,7 @@ use std::sync::Arc; use tide::http::Mime; use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; -use zenoh::payload::StringOrBase64; +use zenoh::bytes::StringOrBase64; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; @@ -61,7 +61,7 @@ pub fn base64_encode(data: &[u8]) -> String { general_purpose::STANDARD.encode(data) } -fn payload_to_json(payload: &Payload, encoding: &Encoding) -> serde_json::Value { +fn payload_to_json(payload: &ZBytes, encoding: &Encoding) -> serde_json::Value { match payload.is_empty() { // If the value is empty return a JSON null true => serde_json::Value::Null, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index f3f8ade729..8ac9d18f88 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -18,7 +18,7 @@ use std::cmp::Ordering; use std::collections::{BTreeSet, HashMap, HashSet}; use std::str; use std::str::FromStr; -use zenoh::payload::StringOrBase64; +use zenoh::bytes::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index c1dc1196bf..3392bf28e8 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -18,8 +18,8 @@ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; +use zenoh::bytes::StringOrBase64; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; -use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::sample::builder::SampleBuilder; use zenoh::time::Timestamp; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 525d446f3a..467751b04d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -26,7 +26,7 @@ use std::str; use std::str::FromStr; use std::time::{Duration, SystemTime}; use urlencoding::encode; -use zenoh::payload::StringOrBase64; +use zenoh::bytes::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index c82459cdcc..a32648319e 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -20,7 +20,7 @@ use std::str::FromStr; use std::thread::sleep; use async_std::task; -use zenoh::payload::StringOrBase64; +use zenoh::bytes::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::{prelude::Config, time::Timestamp}; diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index d778eadde4..6698f2a5b9 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -21,7 +21,7 @@ use std::thread::sleep; // use std::collections::HashMap; use async_std::task; -use zenoh::payload::StringOrBase64; +use zenoh::bytes::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::{prelude::Config, time::Timestamp}; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 0926c63d33..83b3c7b199 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -24,7 +24,7 @@ use std::ops::Add; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; -use zenoh::payload::PayloadReader; +use zenoh::bytes::PayloadReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 5cf4b68b05..16de7dd0a5 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -17,7 +17,7 @@ use crate::{ prelude::sync::{KeyExpr, Locality, SampleKind}, queryable::Query, sample::DataInfo, - Payload, Session, ZResult, + Session, ZBytes, ZResult, }; use std::{ collections::hash_map::DefaultHasher, @@ -68,7 +68,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { let key_expr = *KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid; if query.key_expr().intersects(&key_expr) { if let Ok(value) = serde_json::value::to_value(peer.clone()) { - match Payload::try_from(value) { + match ZBytes::try_from(value) { Ok(zbuf) => { let _ = query.reply(key_expr, zbuf).res_sync(); } @@ -85,7 +85,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { *KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid / *KE_LINK / lid; if query.key_expr().intersects(&key_expr) { if let Ok(value) = serde_json::value::to_value(link) { - match Payload::try_from(value) { + match ZBytes::try_from(value) { Ok(zbuf) => { let _ = query.reply(key_expr, zbuf).res_sync(); } diff --git a/zenoh/src/payload.rs b/zenoh/src/bytes.rs similarity index 73% rename from zenoh/src/payload.rs rename to zenoh/src/bytes.rs index 5280c7af3c..91bae8f517 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/bytes.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // -//! Payload primitives. +//! ZBytes primitives. use crate::buffers::ZBuf; use std::str::Utf8Error; use std::{ @@ -45,21 +45,21 @@ pub trait Deserialize<'a, T> { type Error; /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. - fn deserialize(self, t: &'a Payload) -> Result; + fn deserialize(self, t: &'a ZBytes) -> Result; } /// A payload contains the serialized bytes of user data. #[repr(transparent)] #[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct Payload(ZBuf); +pub struct ZBytes(ZBuf); -impl Payload { +impl ZBytes { /// Create an empty payload. pub const fn empty() -> Self { Self(ZBuf::empty()) } - /// Create a [`Payload`] from any type `T` that implements [`Into`]. + /// Create a [`ZBytes`] from any type `T` that implements [`Into`]. pub fn new(t: T) -> Self where T: Into, @@ -77,34 +77,34 @@ impl Payload { self.0.len() } - /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. - pub fn reader(&self) -> PayloadReader<'_> { - PayloadReader(self.0.reader()) + /// Get a [`ZBytesReader`] implementing [`std::io::Read`] trait. + pub fn reader(&self) -> ZBytesReader<'_> { + ZBytesReader(self.0.reader()) } - /// Build a [`Payload`] from a generic reader implementing [`std::io::Read`]. This operation copies data from the reader. + /// Build a [`ZBytes`] from a generic reader implementing [`std::io::Read`]. This operation copies data from the reader. pub fn from_reader(mut reader: R) -> Result where R: std::io::Read, { let mut buf: Vec = vec![]; reader.read_to_end(&mut buf)?; - Ok(Payload::new(buf)) + Ok(ZBytes::new(buf)) } - /// Get a [`PayloadWriter`] implementing [`std::io::Write`] trait. - pub fn writer(&mut self) -> PayloadWriter<'_> { - PayloadWriter(self.0.writer()) + /// Get a [`ZBytesWriter`] implementing [`std::io::Write`] trait. + pub fn writer(&mut self) -> ZBytesWriter<'_> { + ZBytesWriter(self.0.writer()) } - /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. - pub fn iter(&self) -> PayloadIterator<'_, T> + /// Get a [`ZBytesReader`] implementing [`std::io::Read`] trait. + pub fn iter(&self) -> ZBytesIterator<'_, T> where - T: for<'b> TryFrom<&'b Payload>, + T: for<'b> TryFrom<&'b ZBytes>, for<'b> ZSerde: Deserialize<'b, T>, for<'b> >::Error: Debug, { - PayloadIterator { + ZBytesIterator { reader: self.0.reader(), _t: PhantomData::, } @@ -113,16 +113,16 @@ impl Payload { /// Serialize an object of type `T` as a [`Value`] using the [`ZSerde`]. /// /// ```rust - /// use zenoh::payload::Payload; + /// use zenoh::payload::ZBytes; /// /// let start = String::from("abc"); - /// let payload = Payload::serialize(start.clone()); + /// let payload = ZBytes::serialize(start.clone()); /// let end: String = payload.deserialize().unwrap(); /// assert_eq!(start, end); /// ``` pub fn serialize(t: T) -> Self where - ZSerde: Serialize, + ZSerde: Serialize, { ZSerde.serialize(t) } @@ -148,29 +148,29 @@ impl Payload { } } -/// A reader that implements [`std::io::Read`] trait to read from a [`Payload`]. +/// A reader that implements [`std::io::Read`] trait to read from a [`ZBytes`]. #[repr(transparent)] #[derive(Debug)] -pub struct PayloadReader<'a>(ZBufReader<'a>); +pub struct ZBytesReader<'a>(ZBufReader<'a>); -impl std::io::Read for PayloadReader<'_> { +impl std::io::Read for ZBytesReader<'_> { fn read(&mut self, buf: &mut [u8]) -> std::io::Result { std::io::Read::read(&mut self.0, buf) } } -impl std::io::Seek for PayloadReader<'_> { +impl std::io::Seek for ZBytesReader<'_> { fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { std::io::Seek::seek(&mut self.0, pos) } } -/// A writer that implements [`std::io::Write`] trait to write into a [`Payload`]. +/// A writer that implements [`std::io::Write`] trait to write into a [`ZBytes`]. #[repr(transparent)] #[derive(Debug)] -pub struct PayloadWriter<'a>(ZBufWriter<'a>); +pub struct ZBytesWriter<'a>(ZBufWriter<'a>); -impl std::io::Write for PayloadWriter<'_> { +impl std::io::Write for ZBytesWriter<'_> { fn write(&mut self, buf: &[u8]) -> std::io::Result { std::io::Write::write(&mut self.0, buf) } @@ -180,11 +180,11 @@ impl std::io::Write for PayloadWriter<'_> { } } -/// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`Payload`]. -/// Note that [`Payload`] contains a serialized version of `T` and iterating over a [`Payload`] performs lazy deserialization. +/// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`ZBytes`]. +/// Note that [`ZBytes`] contains a serialized version of `T` and iterating over a [`ZBytes`] performs lazy deserialization. #[repr(transparent)] #[derive(Debug)] -pub struct PayloadIterator<'a, T> +pub struct ZBytesIterator<'a, T> where ZSerde: Deserialize<'a, T>, { @@ -192,7 +192,7 @@ where _t: PhantomData, } -impl Iterator for PayloadIterator<'_, T> +impl Iterator for ZBytesIterator<'_, T> where for<'a> ZSerde: Deserialize<'a, T>, for<'a> >::Error: Debug, @@ -203,16 +203,16 @@ where let codec = Zenoh080::new(); let kbuf: ZBuf = codec.read(&mut self.reader).ok()?; - let kpld = Payload::new(kbuf); + let kpld = ZBytes::new(kbuf); let t = ZSerde.deserialize(&kpld).ok()?; Some(t) } } -impl FromIterator for Payload +impl FromIterator for ZBytes where - ZSerde: Serialize, + ZSerde: Serialize, { fn from_iter>(iter: T) -> Self { let codec = Zenoh080::new(); @@ -228,27 +228,27 @@ where } } - Payload::new(buffer) + ZBytes::new(buffer) } } -/// Wrapper type for API ergonomicity to allow any type `T` to be converted into `Option` where `T` implements `Into`. +/// Wrapper type for API ergonomicity to allow any type `T` to be converted into `Option` where `T` implements `Into`. #[repr(transparent)] #[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct OptionPayload(Option); +pub struct OptionZBytes(Option); -impl From for OptionPayload +impl From for OptionZBytes where - T: Into, + T: Into, { fn from(value: T) -> Self { Self(Some(value.into())) } } -impl From> for OptionPayload +impl From> for OptionZBytes where - T: Into, + T: Into, { fn from(mut value: Option) -> Self { match value.take() { @@ -258,9 +258,9 @@ where } } -impl From<&Option> for OptionPayload +impl From<&Option> for OptionZBytes where - for<'a> &'a T: Into, + for<'a> &'a T: Into, { fn from(value: &Option) -> Self { match value.as_ref() { @@ -270,8 +270,8 @@ where } } -impl From for Option { - fn from(value: OptionPayload) -> Self { +impl From for Option { + fn from(value: OptionZBytes) -> Self { value.0 } } @@ -286,28 +286,28 @@ pub struct ZDeserializeError; // ZBuf impl Serialize for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: ZBuf) -> Self::Output { - Payload::new(t) + ZBytes::new(t) } } -impl From for Payload { +impl From for ZBytes { fn from(t: ZBuf) -> Self { ZSerde.serialize(t) } } impl Serialize<&ZBuf> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &ZBuf) -> Self::Output { - Payload::new(t.clone()) + ZBytes::new(t.clone()) } } -impl From<&ZBuf> for Payload { +impl From<&ZBuf> for ZBytes { fn from(t: &ZBuf) -> Self { ZSerde.serialize(t) } @@ -316,47 +316,47 @@ impl From<&ZBuf> for Payload { impl Deserialize<'_, ZBuf> for ZSerde { type Error = Infallible; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { Ok(v.0.clone()) } } -impl From for ZBuf { - fn from(value: Payload) -> Self { +impl From for ZBuf { + fn from(value: ZBytes) -> Self { value.0 } } -impl From<&Payload> for ZBuf { - fn from(value: &Payload) -> Self { +impl From<&ZBytes> for ZBuf { + fn from(value: &ZBytes) -> Self { ZSerde.deserialize(value).unwrap_infallible() } } // ZSlice impl Serialize for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: ZSlice) -> Self::Output { - Payload::new(t) + ZBytes::new(t) } } -impl From for Payload { +impl From for ZBytes { fn from(t: ZSlice) -> Self { ZSerde.serialize(t) } } impl Serialize<&ZSlice> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &ZSlice) -> Self::Output { - Payload::new(t.clone()) + ZBytes::new(t.clone()) } } -impl From<&ZSlice> for Payload { +impl From<&ZSlice> for ZBytes { fn from(t: &ZSlice) -> Self { ZSerde.serialize(t) } @@ -365,47 +365,47 @@ impl From<&ZSlice> for Payload { impl Deserialize<'_, ZSlice> for ZSerde { type Error = Infallible; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { Ok(v.0.to_zslice()) } } -impl From for ZSlice { - fn from(value: Payload) -> Self { +impl From for ZSlice { + fn from(value: ZBytes) -> Self { ZBuf::from(value).to_zslice() } } -impl From<&Payload> for ZSlice { - fn from(value: &Payload) -> Self { +impl From<&ZBytes> for ZSlice { + fn from(value: &ZBytes) -> Self { ZSerde.deserialize(value).unwrap_infallible() } } // [u8; N] impl Serialize<[u8; N]> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: [u8; N]) -> Self::Output { - Payload::new(t) + ZBytes::new(t) } } -impl From<[u8; N]> for Payload { +impl From<[u8; N]> for ZBytes { fn from(t: [u8; N]) -> Self { ZSerde.serialize(t) } } impl Serialize<&[u8; N]> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &[u8; N]) -> Self::Output { - Payload::new(*t) + ZBytes::new(*t) } } -impl From<&[u8; N]> for Payload { +impl From<&[u8; N]> for ZBytes { fn from(t: &[u8; N]) -> Self { ZSerde.serialize(t) } @@ -414,7 +414,7 @@ impl From<&[u8; N]> for Payload { impl Deserialize<'_, [u8; N]> for ZSerde { type Error = ZDeserializeError; - fn deserialize(self, v: &Payload) -> Result<[u8; N], Self::Error> { + fn deserialize(self, v: &ZBytes) -> Result<[u8; N], Self::Error> { use std::io::Read; if v.0.len() != N { @@ -427,46 +427,46 @@ impl Deserialize<'_, [u8; N]> for ZSerde { } } -impl TryFrom for [u8; N] { +impl TryFrom for [u8; N] { type Error = ZDeserializeError; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for [u8; N] { +impl TryFrom<&ZBytes> for [u8; N] { type Error = ZDeserializeError; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } // Vec impl Serialize> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: Vec) -> Self::Output { - Payload::new(t) + ZBytes::new(t) } } -impl From> for Payload { +impl From> for ZBytes { fn from(t: Vec) -> Self { ZSerde.serialize(t) } } impl Serialize<&Vec> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &Vec) -> Self::Output { - Payload::new(t.clone()) + ZBytes::new(t.clone()) } } -impl From<&Vec> for Payload { +impl From<&Vec> for ZBytes { fn from(t: &Vec) -> Self { ZSerde.serialize(t) } @@ -475,33 +475,33 @@ impl From<&Vec> for Payload { impl Deserialize<'_, Vec> for ZSerde { type Error = Infallible; - fn deserialize(self, v: &Payload) -> Result, Self::Error> { + fn deserialize(self, v: &ZBytes) -> Result, Self::Error> { Ok(v.0.contiguous().to_vec()) } } -impl From for Vec { - fn from(value: Payload) -> Self { +impl From for Vec { + fn from(value: ZBytes) -> Self { ZSerde.deserialize(&value).unwrap_infallible() } } -impl From<&Payload> for Vec { - fn from(value: &Payload) -> Self { +impl From<&ZBytes> for Vec { + fn from(value: &ZBytes) -> Self { ZSerde.deserialize(value).unwrap_infallible() } } // &[u8] impl Serialize<&[u8]> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &[u8]) -> Self::Output { - Payload::new(t.to_vec()) + ZBytes::new(t.to_vec()) } } -impl From<&[u8]> for Payload { +impl From<&[u8]> for ZBytes { fn from(t: &[u8]) -> Self { ZSerde.serialize(t) } @@ -509,28 +509,28 @@ impl From<&[u8]> for Payload { // Cow<[u8]> impl<'a> Serialize> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: Cow<'a, [u8]>) -> Self::Output { - Payload::new(t.to_vec()) + ZBytes::new(t.to_vec()) } } -impl From> for Payload { +impl From> for ZBytes { fn from(t: Cow<'_, [u8]>) -> Self { ZSerde.serialize(t) } } impl<'a> Serialize<&Cow<'a, [u8]>> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &Cow<'a, [u8]>) -> Self::Output { - Payload::new(t.to_vec()) + ZBytes::new(t.to_vec()) } } -impl From<&Cow<'_, [u8]>> for Payload { +impl From<&Cow<'_, [u8]>> for ZBytes { fn from(t: &Cow<'_, [u8]>) -> Self { ZSerde.serialize(t) } @@ -539,13 +539,13 @@ impl From<&Cow<'_, [u8]>> for Payload { impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { type Error = Infallible; - fn deserialize(self, v: &'a Payload) -> Result, Self::Error> { + fn deserialize(self, v: &'a ZBytes) -> Result, Self::Error> { Ok(v.0.contiguous()) } } -impl From for Cow<'static, [u8]> { - fn from(v: Payload) -> Self { +impl From for Cow<'static, [u8]> { + fn from(v: ZBytes) -> Self { match v.0.contiguous() { Cow::Borrowed(s) => Cow::Owned(s.to_vec()), Cow::Owned(s) => Cow::Owned(s), @@ -553,36 +553,36 @@ impl From for Cow<'static, [u8]> { } } -impl<'a> From<&'a Payload> for Cow<'a, [u8]> { - fn from(value: &'a Payload) -> Self { +impl<'a> From<&'a ZBytes> for Cow<'a, [u8]> { + fn from(value: &'a ZBytes) -> Self { ZSerde.deserialize(value).unwrap_infallible() } } // String impl Serialize for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, s: String) -> Self::Output { - Payload::new(s.into_bytes()) + ZBytes::new(s.into_bytes()) } } -impl From for Payload { +impl From for ZBytes { fn from(t: String) -> Self { ZSerde.serialize(t) } } impl Serialize<&String> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, s: &String) -> Self::Output { - Payload::new(s.clone().into_bytes()) + ZBytes::new(s.clone().into_bytes()) } } -impl From<&String> for Payload { +impl From<&String> for ZBytes { fn from(t: &String) -> Self { ZSerde.serialize(t) } @@ -591,66 +591,66 @@ impl From<&String> for Payload { impl Deserialize<'_, String> for ZSerde { type Error = FromUtf8Error; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { let v: Vec = ZSerde.deserialize(v).unwrap_infallible(); String::from_utf8(v) } } -impl TryFrom for String { +impl TryFrom for String { type Error = FromUtf8Error; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for String { +impl TryFrom<&ZBytes> for String { type Error = FromUtf8Error; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } // &str impl Serialize<&str> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, s: &str) -> Self::Output { Self.serialize(s.to_string()) } } -impl From<&str> for Payload { +impl From<&str> for ZBytes { fn from(t: &str) -> Self { ZSerde.serialize(t) } } impl<'a> Serialize> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, s: Cow<'a, str>) -> Self::Output { Self.serialize(s.to_string()) } } -impl From> for Payload { +impl From> for ZBytes { fn from(t: Cow<'_, str>) -> Self { ZSerde.serialize(t) } } impl<'a> Serialize<&Cow<'a, str>> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, s: &Cow<'a, str>) -> Self::Output { Self.serialize(s.to_string()) } } -impl From<&Cow<'_, str>> for Payload { +impl From<&Cow<'_, str>> for ZBytes { fn from(t: &Cow<'_, str>) -> Self { ZSerde.serialize(t) } @@ -659,15 +659,15 @@ impl From<&Cow<'_, str>> for Payload { impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { type Error = Utf8Error; - fn deserialize(self, v: &'a Payload) -> Result, Self::Error> { + fn deserialize(self, v: &'a ZBytes) -> Result, Self::Error> { Cow::try_from(v) } } -impl TryFrom for Cow<'static, str> { +impl TryFrom for Cow<'static, str> { type Error = Utf8Error; - fn try_from(v: Payload) -> Result { + fn try_from(v: ZBytes) -> Result { let v: Cow<'static, [u8]> = Cow::from(v); let _ = core::str::from_utf8(v.as_ref())?; // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 @@ -676,10 +676,10 @@ impl TryFrom for Cow<'static, str> { } } -impl<'a> TryFrom<&'a Payload> for Cow<'a, str> { +impl<'a> TryFrom<&'a ZBytes> for Cow<'a, str> { type Error = Utf8Error; - fn try_from(v: &'a Payload) -> Result { + fn try_from(v: &'a ZBytes) -> Result { let v: Cow<'a, [u8]> = Cow::from(v); let _ = core::str::from_utf8(v.as_ref())?; // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 @@ -692,7 +692,7 @@ impl<'a> TryFrom<&'a Payload> for Cow<'a, str> { macro_rules! impl_int { ($t:ty) => { impl Serialize<$t> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: $t) -> Self::Output { let bs = t.to_le_bytes(); @@ -704,25 +704,25 @@ macro_rules! impl_int { // SAFETY: // - 0 is a valid start index because bs is guaranteed to always have a length greater or equal than 1 // - end is a valid end index because is bounded between 0 and bs.len() - Payload::new(unsafe { ZSlice::new_unchecked(Arc::new(bs), 0, end) }) + ZBytes::new(unsafe { ZSlice::new_unchecked(Arc::new(bs), 0, end) }) } } - impl From<$t> for Payload { + impl From<$t> for ZBytes { fn from(t: $t) -> Self { ZSerde.serialize(t) } } impl Serialize<&$t> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &$t) -> Self::Output { Self.serialize(*t) } } - impl From<&$t> for Payload { + impl From<&$t> for ZBytes { fn from(t: &$t) -> Self { ZSerde.serialize(t) } @@ -731,7 +731,7 @@ macro_rules! impl_int { impl<'a> Deserialize<'a, $t> for ZSerde { type Error = ZDeserializeError; - fn deserialize(self, v: &Payload) -> Result<$t, Self::Error> { + fn deserialize(self, v: &ZBytes) -> Result<$t, Self::Error> { use std::io::Read; let mut r = v.reader(); @@ -746,18 +746,18 @@ macro_rules! impl_int { } } - impl TryFrom for $t { + impl TryFrom for $t { type Error = ZDeserializeError; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } - impl TryFrom<&Payload> for $t { + impl TryFrom<&ZBytes> for $t { type Error = ZDeserializeError; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } @@ -784,30 +784,30 @@ impl_int!(f64); // Zenoh bool impl Serialize for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: bool) -> Self::Output { // SAFETY: casting a bool into an integer is well-defined behaviour. // 0 is false, 1 is true: https://doc.rust-lang.org/std/primitive.bool.html - Payload::new(ZBuf::from((t as u8).to_le_bytes())) + ZBytes::new(ZBuf::from((t as u8).to_le_bytes())) } } -impl From for Payload { +impl From for ZBytes { fn from(t: bool) -> Self { ZSerde.serialize(t) } } impl Serialize<&bool> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &bool) -> Self::Output { ZSerde.serialize(*t) } } -impl From<&bool> for Payload { +impl From<&bool> for ZBytes { fn from(t: &bool) -> Self { ZSerde.serialize(t) } @@ -816,7 +816,7 @@ impl From<&bool> for Payload { impl Deserialize<'_, bool> for ZSerde { type Error = ZDeserializeError; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { let p = v.deserialize::().map_err(|_| ZDeserializeError)?; match p { 0 => Ok(false), @@ -826,18 +826,18 @@ impl Deserialize<'_, bool> for ZSerde { } } -impl TryFrom for bool { +impl TryFrom for bool { type Error = ZDeserializeError; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for bool { +impl TryFrom<&ZBytes> for bool { type Error = ZDeserializeError; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } @@ -845,28 +845,28 @@ impl TryFrom<&Payload> for bool { // - Zenoh advanced types encoders/decoders // Properties impl Serialize> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: Properties<'_>) -> Self::Output { Self.serialize(t.as_str()) } } -impl From> for Payload { +impl From> for ZBytes { fn from(t: Properties<'_>) -> Self { ZSerde.serialize(t) } } impl Serialize<&Properties<'_>> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &Properties<'_>) -> Self::Output { Self.serialize(t.as_str()) } } -impl<'s> From<&'s Properties<'s>> for Payload { +impl<'s> From<&'s Properties<'s>> for ZBytes { fn from(t: &'s Properties<'s>) -> Self { ZSerde.serialize(t) } @@ -875,7 +875,7 @@ impl<'s> From<&'s Properties<'s>> for Payload { impl<'s> Deserialize<'s, Properties<'s>> for ZSerde { type Error = ZDeserializeError; - fn deserialize(self, v: &'s Payload) -> Result, Self::Error> { + fn deserialize(self, v: &'s ZBytes) -> Result, Self::Error> { let s = v .deserialize::>() .map_err(|_| ZDeserializeError)?; @@ -883,33 +883,33 @@ impl<'s> Deserialize<'s, Properties<'s>> for ZSerde { } } -impl TryFrom for Properties<'static> { +impl TryFrom for Properties<'static> { type Error = ZDeserializeError; - fn try_from(v: Payload) -> Result { + fn try_from(v: ZBytes) -> Result { let s = v.deserialize::>().map_err(|_| ZDeserializeError)?; Ok(Properties::from(s.into_owned())) } } -impl<'s> TryFrom<&'s Payload> for Properties<'s> { +impl<'s> TryFrom<&'s ZBytes> for Properties<'s> { type Error = ZDeserializeError; - fn try_from(value: &'s Payload) -> Result { + fn try_from(value: &'s ZBytes) -> Result { ZSerde.deserialize(value) } } // JSON impl Serialize for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: serde_json::Value) -> Self::Output { ZSerde.serialize(&t) } } -impl TryFrom for Payload { +impl TryFrom for ZBytes { type Error = serde_json::Error; fn try_from(value: serde_json::Value) -> Result { @@ -918,16 +918,16 @@ impl TryFrom for Payload { } impl Serialize<&serde_json::Value> for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: &serde_json::Value) -> Self::Output { - let mut payload = Payload::empty(); + let mut payload = ZBytes::empty(); serde_json::to_writer(payload.writer(), t)?; Ok(payload) } } -impl TryFrom<&serde_json::Value> for Payload { +impl TryFrom<&serde_json::Value> for ZBytes { type Error = serde_json::Error; fn try_from(value: &serde_json::Value) -> Result { @@ -938,37 +938,37 @@ impl TryFrom<&serde_json::Value> for Payload { impl Deserialize<'_, serde_json::Value> for ZSerde { type Error = serde_json::Error; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { serde_json::from_reader(v.reader()) } } -impl TryFrom for serde_json::Value { +impl TryFrom for serde_json::Value { type Error = serde_json::Error; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for serde_json::Value { +impl TryFrom<&ZBytes> for serde_json::Value { type Error = serde_json::Error; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } // Yaml impl Serialize for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: serde_yaml::Value) -> Self::Output { Self.serialize(&t) } } -impl TryFrom for Payload { +impl TryFrom for ZBytes { type Error = serde_yaml::Error; fn try_from(value: serde_yaml::Value) -> Result { @@ -977,16 +977,16 @@ impl TryFrom for Payload { } impl Serialize<&serde_yaml::Value> for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: &serde_yaml::Value) -> Self::Output { - let mut payload = Payload::empty(); + let mut payload = ZBytes::empty(); serde_yaml::to_writer(payload.writer(), t)?; Ok(payload) } } -impl TryFrom<&serde_yaml::Value> for Payload { +impl TryFrom<&serde_yaml::Value> for ZBytes { type Error = serde_yaml::Error; fn try_from(value: &serde_yaml::Value) -> Result { @@ -997,37 +997,37 @@ impl TryFrom<&serde_yaml::Value> for Payload { impl Deserialize<'_, serde_yaml::Value> for ZSerde { type Error = serde_yaml::Error; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { serde_yaml::from_reader(v.reader()) } } -impl TryFrom for serde_yaml::Value { +impl TryFrom for serde_yaml::Value { type Error = serde_yaml::Error; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for serde_yaml::Value { +impl TryFrom<&ZBytes> for serde_yaml::Value { type Error = serde_yaml::Error; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } // CBOR impl Serialize for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: serde_cbor::Value) -> Self::Output { Self.serialize(&t) } } -impl TryFrom for Payload { +impl TryFrom for ZBytes { type Error = serde_cbor::Error; fn try_from(value: serde_cbor::Value) -> Result { @@ -1036,16 +1036,16 @@ impl TryFrom for Payload { } impl Serialize<&serde_cbor::Value> for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: &serde_cbor::Value) -> Self::Output { - let mut payload = Payload::empty(); + let mut payload = ZBytes::empty(); serde_cbor::to_writer(payload.0.writer(), t)?; Ok(payload) } } -impl TryFrom<&serde_cbor::Value> for Payload { +impl TryFrom<&serde_cbor::Value> for ZBytes { type Error = serde_cbor::Error; fn try_from(value: &serde_cbor::Value) -> Result { @@ -1056,37 +1056,37 @@ impl TryFrom<&serde_cbor::Value> for Payload { impl Deserialize<'_, serde_cbor::Value> for ZSerde { type Error = serde_cbor::Error; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { serde_cbor::from_reader(v.reader()) } } -impl TryFrom for serde_cbor::Value { +impl TryFrom for serde_cbor::Value { type Error = serde_cbor::Error; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for serde_cbor::Value { +impl TryFrom<&ZBytes> for serde_cbor::Value { type Error = serde_cbor::Error; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } // Pickle impl Serialize for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: serde_pickle::Value) -> Self::Output { Self.serialize(&t) } } -impl TryFrom for Payload { +impl TryFrom for ZBytes { type Error = serde_pickle::Error; fn try_from(value: serde_pickle::Value) -> Result { @@ -1095,10 +1095,10 @@ impl TryFrom for Payload { } impl Serialize<&serde_pickle::Value> for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: &serde_pickle::Value) -> Self::Output { - let mut payload = Payload::empty(); + let mut payload = ZBytes::empty(); serde_pickle::value_to_writer( &mut payload.0.writer(), t, @@ -1108,7 +1108,7 @@ impl Serialize<&serde_pickle::Value> for ZSerde { } } -impl TryFrom<&serde_pickle::Value> for Payload { +impl TryFrom<&serde_pickle::Value> for ZBytes { type Error = serde_pickle::Error; fn try_from(value: &serde_pickle::Value) -> Result { @@ -1119,23 +1119,23 @@ impl TryFrom<&serde_pickle::Value> for Payload { impl Deserialize<'_, serde_pickle::Value> for ZSerde { type Error = serde_pickle::Error; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) } } -impl TryFrom for serde_pickle::Value { +impl TryFrom for serde_pickle::Value { type Error = serde_pickle::Error; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for serde_pickle::Value { +impl TryFrom<&ZBytes> for serde_pickle::Value { type Error = serde_pickle::Error; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } @@ -1143,14 +1143,14 @@ impl TryFrom<&Payload> for serde_pickle::Value { // Shared memory conversion #[cfg(feature = "shared-memory")] impl Serialize> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: Arc) -> Self::Output { - Payload::new(t) + ZBytes::new(t) } } #[cfg(feature = "shared-memory")] -impl From> for Payload { +impl From> for ZBytes { fn from(t: Arc) -> Self { ZSerde.serialize(t) } @@ -1158,7 +1158,7 @@ impl From> for Payload { #[cfg(feature = "shared-memory")] impl Serialize> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: Box) -> Self::Output { let smb: Arc = t.into(); @@ -1167,7 +1167,7 @@ impl Serialize> for ZSerde { } #[cfg(feature = "shared-memory")] -impl From> for Payload { +impl From> for ZBytes { fn from(t: Box) -> Self { ZSerde.serialize(t) } @@ -1175,15 +1175,15 @@ impl From> for Payload { #[cfg(feature = "shared-memory")] impl Serialize for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: SharedMemoryBuf) -> Self::Output { - Payload::new(t) + ZBytes::new(t) } } #[cfg(feature = "shared-memory")] -impl From for Payload { +impl From for ZBytes { fn from(t: SharedMemoryBuf) -> Self { ZSerde.serialize(t) } @@ -1193,7 +1193,7 @@ impl From for Payload { impl Deserialize<'_, SharedMemoryBuf> for ZSerde { type Error = ZDeserializeError; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { // A SharedMemoryBuf is expected to have only one slice let mut zslices = v.0.zslices(); if let Some(zs) = zslices.next() { @@ -1206,10 +1206,10 @@ impl Deserialize<'_, SharedMemoryBuf> for ZSerde { } #[cfg(feature = "shared-memory")] -impl TryFrom for SharedMemoryBuf { +impl TryFrom for SharedMemoryBuf { type Error = ZDeserializeError; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } @@ -1222,8 +1222,8 @@ macro_rules! impl_tuple { let codec = Zenoh080::new(); let mut buffer: ZBuf = ZBuf::empty(); let mut writer = buffer.writer(); - let apld: Payload = a.into(); - let bpld: Payload = b.into(); + let apld: ZBytes = a.into(); + let bpld: ZBytes = b.into(); // SAFETY: we are serializing slices on a ZBuf, so serialization will never // fail unless we run out of memory. In that case, Rust memory allocator @@ -1233,15 +1233,15 @@ macro_rules! impl_tuple { codec.write(&mut writer, &bpld.0).unwrap_unchecked(); } - Payload::new(buffer) + ZBytes::new(buffer) }}; } impl Serialize<(A, B)> for ZSerde where - A: Into, - B: Into, + A: Into, + B: Into, { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: (A, B)) -> Self::Output { impl_tuple!(t) @@ -1250,20 +1250,20 @@ where impl Serialize<&(A, B)> for ZSerde where - for<'a> &'a A: Into, - for<'b> &'b B: Into, + for<'a> &'a A: Into, + for<'b> &'b B: Into, { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &(A, B)) -> Self::Output { impl_tuple!(t) } } -impl From<(A, B)> for Payload +impl From<(A, B)> for ZBytes where - A: Into, - B: Into, + A: Into, + B: Into, { fn from(value: (A, B)) -> Self { ZSerde.serialize(value) @@ -1272,22 +1272,22 @@ where impl Deserialize<'_, (A, B)> for ZSerde where - for<'a> A: TryFrom<&'a Payload>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b Payload>, - for<'b> >::Error: Debug, + for<'a> A: TryFrom<&'a ZBytes>, + for<'a> >::Error: Debug, + for<'b> B: TryFrom<&'b ZBytes>, + for<'b> >::Error: Debug, { type Error = ZError; - fn deserialize(self, payload: &Payload) -> Result<(A, B), Self::Error> { + fn deserialize(self, payload: &ZBytes) -> Result<(A, B), Self::Error> { let codec = Zenoh080::new(); let mut reader = payload.0.reader(); let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; - let apld = Payload::new(abuf); + let apld = ZBytes::new(abuf); let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; - let bpld = Payload::new(bbuf); + let bpld = ZBytes::new(bbuf); let a = A::try_from(&apld).map_err(|e| zerror!("{:?}", e))?; let b = B::try_from(&bpld).map_err(|e| zerror!("{:?}", e))?; @@ -1295,30 +1295,30 @@ where } } -impl TryFrom for (A, B) +impl TryFrom for (A, B) where - A: for<'a> TryFrom<&'a Payload>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b Payload>, - for<'b> >::Error: Debug, + A: for<'a> TryFrom<&'a ZBytes>, + for<'a> >::Error: Debug, + for<'b> B: TryFrom<&'b ZBytes>, + for<'b> >::Error: Debug, { type Error = ZError; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for (A, B) +impl TryFrom<&ZBytes> for (A, B) where - for<'a> A: TryFrom<&'a Payload>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b Payload>, - for<'b> >::Error: Debug, + for<'a> A: TryFrom<&'a ZBytes>, + for<'a> >::Error: Debug, + for<'b> B: TryFrom<&'b ZBytes>, + for<'b> >::Error: Debug, { type Error = ZError; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } @@ -1354,8 +1354,8 @@ impl std::fmt::Display for StringOrBase64 { } } -impl From<&Payload> for StringOrBase64 { - fn from(v: &Payload) -> Self { +impl From<&ZBytes> for StringOrBase64 { + fn from(v: &ZBytes) -> Self { use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; match v.deserialize::() { Ok(s) => StringOrBase64::String(s), @@ -1367,7 +1367,7 @@ impl From<&Payload> for StringOrBase64 { mod tests { #[test] fn serializer() { - use super::Payload; + use super::ZBytes; use rand::Rng; use std::borrow::Cow; use zenoh_buffers::{ZBuf, ZSlice}; @@ -1380,7 +1380,7 @@ mod tests { let i = $in; let t = i.clone(); println!("Serialize:\t{:?}", t); - let v = Payload::serialize(t); + let v = ZBytes::serialize(t); println!("Deserialize:\t{:?}", v); let o: $t = v.deserialize().unwrap(); assert_eq!(i, o); @@ -1476,7 +1476,7 @@ mod tests { // Iterator let v: [usize; 5] = [0, 1, 2, 3, 4]; println!("Serialize:\t{:?}", v); - let p = Payload::from_iter(v.iter()); + let p = ZBytes::from_iter(v.iter()); println!("Deserialize:\t{:?}\n", p); for (i, t) in p.iter::().enumerate() { assert_eq!(i, t); @@ -1484,7 +1484,7 @@ mod tests { let mut v = vec![[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]; println!("Serialize:\t{:?}", v); - let p = Payload::from_iter(v.drain(..)); + let p = ZBytes::from_iter(v.drain(..)); println!("Deserialize:\t{:?}\n", p); let mut iter = p.iter::<[u8; 4]>(); assert_eq!(iter.next().unwrap(), [0, 1, 2, 3]); @@ -1498,7 +1498,7 @@ mod tests { hm.insert(0, 0); hm.insert(1, 1); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().drain()); + let p = ZBytes::from_iter(hm.clone().drain()); println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, usize)>()); assert_eq!(hm, o); @@ -1507,7 +1507,7 @@ mod tests { hm.insert(0, vec![0u8; 8]); hm.insert(1, vec![1u8; 16]); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().drain()); + let p = ZBytes::from_iter(hm.clone().drain()); println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); assert_eq!(hm, o); @@ -1516,7 +1516,7 @@ mod tests { hm.insert(0, vec![0u8; 8]); hm.insert(1, vec![1u8; 16]); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().drain()); + let p = ZBytes::from_iter(hm.clone().drain()); println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); assert_eq!(hm, o); @@ -1525,7 +1525,7 @@ mod tests { hm.insert(0, ZSlice::from(vec![0u8; 8])); hm.insert(1, ZSlice::from(vec![1u8; 16])); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().drain()); + let p = ZBytes::from_iter(hm.clone().drain()); println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, ZSlice)>()); assert_eq!(hm, o); @@ -1534,7 +1534,7 @@ mod tests { hm.insert(0, ZBuf::from(vec![0u8; 8])); hm.insert(1, ZBuf::from(vec![1u8; 16])); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().drain()); + let p = ZBytes::from_iter(hm.clone().drain()); println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, ZBuf)>()); assert_eq!(hm, o); @@ -1543,7 +1543,7 @@ mod tests { hm.insert(0, vec![0u8; 8]); hm.insert(1, vec![1u8; 16]); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().iter().map(|(k, v)| (k, Cow::from(v)))); + let p = ZBytes::from_iter(hm.clone().iter().map(|(k, v)| (k, Cow::from(v)))); println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); assert_eq!(hm, o); @@ -1552,7 +1552,7 @@ mod tests { hm.insert(String::from("0"), String::from("a")); hm.insert(String::from("1"), String::from("b")); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.iter()); + let p = ZBytes::from_iter(hm.iter()); println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(String, String)>()); assert_eq!(hm, o); diff --git a/zenoh/src/encoding.rs b/zenoh/src/encoding.rs index d9fa725ed5..81dfb04752 100644 --- a/zenoh/src/encoding.rs +++ b/zenoh/src/encoding.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::payload::Payload; +use crate::bytes::ZBytes; use phf::phf_map; use std::{borrow::Cow, convert::Infallible, fmt, str::FromStr}; use zenoh_buffers::{ZBuf, ZSlice}; @@ -727,7 +727,7 @@ pub trait EncodingMapping { } // Bytes -impl EncodingMapping for Payload { +impl EncodingMapping for ZBytes { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 8de143fd8d..7e25375d64 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -134,12 +134,12 @@ pub use net::runtime; pub mod selector; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; +pub mod bytes; pub(crate) mod encoding; pub mod handlers; pub mod info; #[cfg(feature = "unstable")] pub mod liveliness; -pub mod payload; pub mod plugins; pub mod prelude; pub mod publication; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 09009cabd7..c7e951a963 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -12,10 +12,10 @@ // ZettaScale Zenoh Team, use super::routing::dispatcher::face::Face; use super::Runtime; +use crate::bytes::ZBytes; use crate::encoding::Encoding; use crate::key_expr::KeyExpr; use crate::net::primitives::Primitives; -use crate::payload::Payload; use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::SyncResolve; use crate::queryable::Query; @@ -580,7 +580,7 @@ fn router_data(context: &AdminContext, query: Query) { } tracing::trace!("AdminSpace router_data: {:?}", json); - let payload = match Payload::try_from(json) { + let payload = match ZBytes::try_from(json) { Ok(p) => p, Err(e) => { tracing::error!("Error serializing AdminSpace reply: {:?}", e); @@ -664,7 +664,7 @@ fn subscribers_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(key, Payload::empty()).res() { + if let Err(e) = query.reply(key, ZBytes::empty()).res() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -681,7 +681,7 @@ fn queryables_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(key, Payload::empty()).res() { + if let Err(e) = query.reply(key, ZBytes::empty()).res() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -699,7 +699,7 @@ fn plugins_data(context: &AdminContext, query: Query) { tracing::debug!("plugin status: {:?}", status); let key = root_key.join(status.name()).unwrap(); let status = serde_json::to_value(status).unwrap(); - match Payload::try_from(status) { + match ZBytes::try_from(status) { Ok(zbuf) => { if let Err(e) = query.reply(key, zbuf).res_sync() { tracing::error!("Error sending AdminSpace reply: {:?}", e); @@ -744,7 +744,7 @@ fn plugins_status(context: &AdminContext, query: Query) { Ok(Ok(responses)) => { for response in responses { if let Ok(key_expr) = KeyExpr::try_from(response.key) { - match Payload::try_from(response.value) { + match ZBytes::try_from(response.value) { Ok(zbuf) => { if let Err(e) = query.reply(key_expr, zbuf).res_sync() { tracing::error!("Error sending AdminSpace reply: {:?}", e); diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 9158425034..2bb14fe2a4 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -43,9 +43,9 @@ pub(crate) mod common { pub use crate::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; - pub use crate::encoding::Encoding; /// The encoding of a zenoh `Value`. - pub use crate::payload::{Deserialize, Payload, Serialize}; + pub use crate::bytes::{Deserialize, Serialize, ZBytes}; + pub use crate::encoding::Encoding; pub use crate::value::Value; #[zenoh_macros::unstable] diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 303f120360..8dd5883f0f 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -20,8 +20,8 @@ use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] use crate::{ + bytes::OptionZBytes, handlers::{Callback, DefaultHandler, IntoHandler}, - payload::OptionPayload, sample::Attachment, Id, }; @@ -40,7 +40,7 @@ pub use zenoh_protocol::core::CongestionControl; #[derive(Debug, Clone)] pub struct PublicationBuilderPut { - pub(crate) payload: Payload, + pub(crate) payload: ZBytes, pub(crate) encoding: Encoding, } #[derive(Debug, Clone)] @@ -138,7 +138,7 @@ impl

ValueBuilderTrait for PublicationBuilder { fn payload(self, payload: IntoPayload) -> Self where - IntoPayload: Into, + IntoPayload: Into, { Self { kind: PublicationBuilderPut { @@ -167,8 +167,8 @@ impl SampleBuilderTrait for PublicationBuilder { } } #[cfg(feature = "unstable")] - fn attachment>(self, attachment: TA) -> Self { - let attachment: OptionPayload = attachment.into(); + fn attachment>(self, attachment: TA) -> Self { + let attachment: OptionZBytes = attachment.into(); Self { attachment: attachment.into(), ..self @@ -213,7 +213,7 @@ impl SyncResolve for PublicationBuilder, PublicationBui let publisher = self.publisher.create_one_shot_publisher()?; resolve_put( &publisher, - Payload::empty(), + ZBytes::empty(), SampleKind::Delete, Encoding::ZENOH_BYTES, self.timestamp, @@ -421,7 +421,7 @@ impl<'a> Publisher<'a> { #[inline] pub fn put(&self, payload: IntoPayload) -> PublisherPutBuilder<'_> where - IntoPayload: Into, + IntoPayload: Into, { PublicationBuilder { publisher: self, @@ -708,7 +708,7 @@ impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete fn res_sync(self) -> ::To { resolve_put( self.publisher, - Payload::empty(), + ZBytes::empty(), SampleKind::Delete, Encoding::ZENOH_BYTES, self.timestamp, @@ -941,7 +941,7 @@ impl<'a, 'b> AsyncResolve for PublisherBuilder<'a, 'b> { fn resolve_put( publisher: &Publisher<'_>, - payload: Payload, + payload: ZBytes, kind: SampleKind, encoding: Encoding, timestamp: Option, diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index becfad4922..a0c2e3cfbb 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -18,7 +18,7 @@ use crate::prelude::*; use crate::sample::QoSBuilder; use crate::Session; #[cfg(feature = "unstable")] -use crate::{payload::OptionPayload, sample::Attachment}; +use crate::{bytes::OptionZBytes, sample::Attachment}; use std::collections::HashMap; use std::future::Ready; use std::time::Duration; @@ -165,8 +165,8 @@ impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { } #[cfg(feature = "unstable")] - fn attachment>(self, attachment: T) -> Self { - let attachment: OptionPayload = attachment.into(); + fn attachment>(self, attachment: T) -> Self { + let attachment: OptionZBytes = attachment.into(); Self { attachment: attachment.into(), ..self @@ -201,7 +201,7 @@ impl ValueBuilderTrait for GetBuilder<'_, '_, Handler> { } } - fn payload>(self, payload: T) -> Self { + fn payload>(self, payload: T) -> Self { let mut value = self.value.unwrap_or_default(); value.payload = payload.into(); Self { diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 755e0364af..8147eb2885 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -26,7 +26,7 @@ use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] use crate::{ - payload::OptionPayload, + bytes::OptionZBytes, query::ReplyKeyExpr, sample::{Attachment, SourceInfo}, }; @@ -105,7 +105,7 @@ impl Query { /// This Query's payload. #[inline(always)] - pub fn payload(&self) -> Option<&Payload> { + pub fn payload(&self) -> Option<&ZBytes> { self.inner.value.as_ref().map(|v| &v.payload) } @@ -150,7 +150,7 @@ impl Query { where TryIntoKeyExpr: TryInto>, >>::Error: Into, - IntoPayload: Into, + IntoPayload: Into, { ReplyBuilder { query: self, @@ -275,7 +275,7 @@ impl AsyncResolve for ReplySample<'_> { #[derive(Debug)] pub struct ReplyBuilderPut { - payload: super::Payload, + payload: super::ZBytes, encoding: super::Encoding, } #[derive(Debug)] @@ -314,8 +314,8 @@ impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] - fn attachment>(self, attachment: U) -> Self { - let attachment: OptionPayload = attachment.into(); + fn attachment>(self, attachment: U) -> Self { + let attachment: OptionZBytes = attachment.into(); Self { attachment: attachment.into(), ..self @@ -359,7 +359,7 @@ impl ValueBuilderTrait for ReplyBuilder<'_, '_, ReplyBuilderPut> { } } - fn payload>(self, payload: T) -> Self { + fn payload>(self, payload: T) -> Self { Self { kind: ReplyBuilderPut { payload: payload.into(), @@ -501,7 +501,7 @@ impl ValueBuilderTrait for ReplyErrBuilder<'_> { Self { value, ..self } } - fn payload>(self, payload: T) -> Self { + fn payload>(self, payload: T) -> Self { let mut value = self.value.clone(); value.payload = payload.into(); Self { value, ..self } diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 6dc85c4046..cab5c2333a 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -14,13 +14,13 @@ use crate::sample::{QoS, QoSBuilder}; use crate::Encoding; use crate::KeyExpr; -use crate::Payload; use crate::Priority; use crate::Sample; use crate::SampleKind; use crate::Value; +use crate::ZBytes; #[cfg(feature = "unstable")] -use crate::{payload::OptionPayload, sample::SourceInfo}; +use crate::{bytes::OptionZBytes, sample::SourceInfo}; use std::marker::PhantomData; use uhlc::Timestamp; use zenoh_core::zresult; @@ -49,14 +49,14 @@ pub trait SampleBuilderTrait { fn source_info(self, source_info: SourceInfo) -> Self; /// Attach user-provided data in key-value format #[zenoh_macros::unstable] - fn attachment>(self, attachment: T) -> Self; + fn attachment>(self, attachment: T) -> Self; } pub trait ValueBuilderTrait { /// Set the [`Encoding`] fn encoding>(self, encoding: T) -> Self; /// Sets the payload - fn payload>(self, payload: T) -> Self; + fn payload>(self, payload: T) -> Self; /// Sets both payload and encoding at once. /// This is convenient for passing user type which supports `Into` when both payload and encoding depends on user type fn value>(self, value: T) -> Self; @@ -82,7 +82,7 @@ impl SampleBuilder { ) -> SampleBuilder where IntoKeyExpr: Into>, - IntoPayload: Into, + IntoPayload: Into, { Self { sample: Sample { @@ -110,7 +110,7 @@ impl SampleBuilder { Self { sample: Sample { key_expr: key_expr.into(), - payload: Payload::empty(), + payload: ZBytes::empty(), kind: SampleKind::Delete, encoding: Encoding::default(), timestamp: None, @@ -175,8 +175,8 @@ impl SampleBuilderTrait for SampleBuilder { } #[zenoh_macros::unstable] - fn attachment>(self, attachment: U) -> Self { - let attachment: OptionPayload = attachment.into(); + fn attachment>(self, attachment: U) -> Self { + let attachment: OptionZBytes = attachment.into(); Self { sample: Sample { attachment: attachment.into(), @@ -224,7 +224,7 @@ impl ValueBuilderTrait for SampleBuilder { _t: PhantomData::, } } - fn payload>(self, payload: T) -> Self { + fn payload>(self, payload: T) -> Self { Self { sample: Sample { payload: payload.into(), diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index b8fc62be57..b1093847bb 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -13,8 +13,8 @@ // //! Sample primitives +use crate::bytes::ZBytes; use crate::encoding::Encoding; -use crate::payload::Payload; use crate::prelude::{KeyExpr, Value}; use crate::sample::builder::QoSBuilderTrait; use crate::time::Timestamp; @@ -67,7 +67,7 @@ pub(crate) trait DataInfoIntoSample { ) -> Sample where IntoKeyExpr: Into>, - IntoPayload: Into; + IntoPayload: Into; } impl DataInfoIntoSample for DataInfo { @@ -84,7 +84,7 @@ impl DataInfoIntoSample for DataInfo { ) -> Sample where IntoKeyExpr: Into>, - IntoPayload: Into, + IntoPayload: Into, { Sample { key_expr: key_expr.into(), @@ -114,7 +114,7 @@ impl DataInfoIntoSample for Option { ) -> Sample where IntoKeyExpr: Into>, - IntoPayload: Into, + IntoPayload: Into, { if let Some(data_info) = self { data_info.into_sample( @@ -213,12 +213,12 @@ impl From> for SourceInfo { mod attachment { #[cfg(feature = "unstable")] - use crate::payload::Payload; + use crate::bytes::ZBytes; #[cfg(feature = "unstable")] use zenoh_protocol::zenoh::ext::AttachmentType; #[zenoh_macros::unstable] - pub type Attachment = Payload; + pub type Attachment = ZBytes; #[zenoh_macros::unstable] impl From for AttachmentType { @@ -274,7 +274,7 @@ pub use attachment::Attachment; /// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. pub struct SampleFields { pub key_expr: KeyExpr<'static>, - pub payload: Payload, + pub payload: ZBytes, pub kind: SampleKind, pub encoding: Encoding, pub timestamp: Option, @@ -311,7 +311,7 @@ impl From for SampleFields { #[derive(Clone, Debug)] pub struct Sample { pub(crate) key_expr: KeyExpr<'static>, - pub(crate) payload: Payload, + pub(crate) payload: ZBytes, pub(crate) kind: SampleKind, pub(crate) encoding: Encoding, pub(crate) timestamp: Option, @@ -333,7 +333,7 @@ impl Sample { /// Gets the payload of this Sample. #[inline] - pub fn payload(&self) -> &Payload { + pub fn payload(&self) -> &ZBytes { &self.payload } diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 6f047fda5d..b075de042b 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use crate::admin; +use crate::bytes::ZBytes; use crate::config::Config; use crate::config::Notifier; use crate::encoding::Encoding; @@ -23,7 +24,6 @@ use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; -use crate::payload::Payload; use crate::prelude::KeyExpr; use crate::prelude::Locality; use crate::publication::*; @@ -715,7 +715,7 @@ impl Session { where TryIntoKeyExpr: TryInto>, >>::Error: Into, - IntoPayload: Into, + IntoPayload: Into, { PublicationBuilder { publisher: self.declare_publisher(key_expr), diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index d1b582111a..26165334eb 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -13,13 +13,13 @@ // //! Value primitives. -use crate::{encoding::Encoding, payload::Payload}; +use crate::{bytes::ZBytes, encoding::Encoding}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] #[derive(Clone, Debug, PartialEq, Eq)] pub struct Value { - pub(crate) payload: Payload, + pub(crate) payload: ZBytes, pub(crate) encoding: Encoding, } @@ -27,7 +27,7 @@ impl Value { /// Creates a new [`Value`] with specified [`Payload`] and [`Encoding`]. pub fn new(payload: T, encoding: E) -> Self where - T: Into, + T: Into, E: Into, { Value { @@ -38,7 +38,7 @@ impl Value { /// Creates an empty [`Value`]. pub const fn empty() -> Self { Value { - payload: Payload::empty(), + payload: ZBytes::empty(), encoding: Encoding::default(), } } @@ -49,7 +49,7 @@ impl Value { } /// Gets binary [`Payload`] of this [`Value`]. - pub fn payload(&self) -> &Payload { + pub fn payload(&self) -> &ZBytes { &self.payload } @@ -61,7 +61,7 @@ impl Value { impl From for Value where - T: Into, + T: Into, { fn from(t: T) -> Self { Value { From e0d7784a7467481c921da46581bdf14d077144f0 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 10:27:27 +0200 Subject: [PATCH 257/598] Remove Attachment in favor of ZBYtes --- zenoh-ext/src/group.rs | 6 +++--- zenoh/src/bytes.rs | 16 ++++++++++++++- zenoh/src/publication.rs | 7 +++---- zenoh/src/query.rs | 6 +++--- zenoh/src/queryable.rs | 10 +++++----- zenoh/src/sample/mod.rs | 41 ++++++-------------------------------- zenoh/src/session.rs | 10 ++++------ zenoh/tests/attachments.rs | 12 +++++------ 8 files changed, 45 insertions(+), 63 deletions(-) diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 83b3c7b199..839623cdb9 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -24,7 +24,7 @@ use std::ops::Add; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; -use zenoh::bytes::PayloadReader; +use zenoh::bytes::ZBytesReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; @@ -242,7 +242,7 @@ async fn net_event_handler(z: Arc, state: Arc) { .await .unwrap(); while let Ok(s) = sub.recv_async().await { - match bincode::deserialize_from::(s.payload().reader()) { + match bincode::deserialize_from::(s.payload().reader()) { Ok(evt) => match evt { GroupNetEvent::Join(je) => { tracing::debug!("Member join: {:?}", &je.member); @@ -301,7 +301,7 @@ async fn net_event_handler(z: Arc, state: Arc) { while let Ok(reply) = receiver.recv_async().await { match reply.result() { Ok(sample) => { - match bincode::deserialize_from::( + match bincode::deserialize_from::( sample.payload().reader(), ) { Ok(m) => { diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index 91bae8f517..9e2e441f3a 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -28,7 +28,7 @@ use zenoh_buffers::{ ZBufReader, ZSlice, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_protocol::core::Properties; +use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; use zenoh_result::{ZError, ZResult}; #[cfg(feature = "shared-memory")] use zenoh_shm::SharedMemoryBuf; @@ -1364,6 +1364,20 @@ impl From<&ZBytes> for StringOrBase64 { } } +impl From for AttachmentType { + fn from(this: ZBytes) -> Self { + AttachmentType { + buffer: this.into(), + } + } +} + +impl From> for ZBytes { + fn from(this: AttachmentType) -> Self { + this.buffer.into() + } +} + mod tests { #[test] fn serializer() { diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 8dd5883f0f..c59cca8b9e 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -20,9 +20,8 @@ use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] use crate::{ - bytes::OptionZBytes, + bytes::{OptionZBytes, ZBytes}, handlers::{Callback, DefaultHandler, IntoHandler}, - sample::Attachment, Id, }; use std::future::Ready; @@ -76,7 +75,7 @@ pub struct PublicationBuilder { #[cfg(feature = "unstable")] pub(crate) source_info: SourceInfo, #[cfg(feature = "unstable")] - pub(crate) attachment: Option, + pub(crate) attachment: Option, } pub type SessionPutBuilder<'a, 'b> = @@ -946,7 +945,7 @@ fn resolve_put( encoding: Encoding, timestamp: Option, #[cfg(feature = "unstable")] source_info: SourceInfo, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { tracing::trace!("write({:?}, [...])", &publisher.key_expr); let primitives = zread!(publisher.session.state) diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index a0c2e3cfbb..db7071c278 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -13,12 +13,12 @@ // //! Query primitives. +#[cfg(feature = "unstable")] +use crate::bytes::{OptionZBytes, ZBytes}; use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; use crate::sample::QoSBuilder; use crate::Session; -#[cfg(feature = "unstable")] -use crate::{bytes::OptionZBytes, sample::Attachment}; use std::collections::HashMap; use std::future::Ready; use std::time::Duration; @@ -149,7 +149,7 @@ pub struct GetBuilder<'a, 'b, Handler> { pub(crate) handler: Handler, pub(crate) value: Option, #[cfg(feature = "unstable")] - pub(crate) attachment: Option, + pub(crate) attachment: Option, #[cfg(feature = "unstable")] pub(crate) source_info: SourceInfo, } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 8147eb2885..563df461b8 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -26,9 +26,9 @@ use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] use crate::{ - bytes::OptionZBytes, + bytes::{OptionZBytes, ZBytes}, query::ReplyKeyExpr, - sample::{Attachment, SourceInfo}, + sample::SourceInfo, }; use std::fmt; use std::future::Ready; @@ -55,7 +55,7 @@ pub(crate) struct QueryInner { pub(crate) zid: ZenohId, pub(crate) primitives: Arc, #[cfg(feature = "unstable")] - pub(crate) attachment: Option, + pub(crate) attachment: Option, } impl Drop for QueryInner { @@ -116,7 +116,7 @@ impl Query { } #[zenoh_macros::unstable] - pub fn attachment(&self) -> Option<&Attachment> { + pub fn attachment(&self) -> Option<&ZBytes> { self.inner.attachment.as_ref() } @@ -295,7 +295,7 @@ pub struct ReplyBuilder<'a, 'b, T> { source_info: SourceInfo, #[cfg(feature = "unstable")] - attachment: Option, + attachment: Option, } pub type ReplyPutBuilder<'a, 'b> = ReplyBuilder<'a, 'b, ReplyBuilderPut>; diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index b1093847bb..6078a5a350 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -63,7 +63,7 @@ pub(crate) trait DataInfoIntoSample { self, key_expr: IntoKeyExpr, payload: IntoPayload, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, ) -> Sample where IntoKeyExpr: Into>, @@ -80,7 +80,7 @@ impl DataInfoIntoSample for DataInfo { self, key_expr: IntoKeyExpr, payload: IntoPayload, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, ) -> Sample where IntoKeyExpr: Into>, @@ -110,7 +110,7 @@ impl DataInfoIntoSample for Option { self, key_expr: IntoKeyExpr, payload: IntoPayload, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, ) -> Sample where IntoKeyExpr: Into>, @@ -211,32 +211,6 @@ impl From> for SourceInfo { } } -mod attachment { - #[cfg(feature = "unstable")] - use crate::bytes::ZBytes; - #[cfg(feature = "unstable")] - use zenoh_protocol::zenoh::ext::AttachmentType; - - #[zenoh_macros::unstable] - pub type Attachment = ZBytes; - - #[zenoh_macros::unstable] - impl From for AttachmentType { - fn from(this: Attachment) -> Self { - AttachmentType { - buffer: this.into(), - } - } - } - - #[zenoh_macros::unstable] - impl From> for Attachment { - fn from(this: AttachmentType) -> Self { - this.buffer.into() - } - } -} - /// The kind of a `Sample`. #[repr(u8)] #[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] @@ -268,9 +242,6 @@ impl TryFrom for SampleKind { } } -#[zenoh_macros::unstable] -pub use attachment::Attachment; - /// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. pub struct SampleFields { pub key_expr: KeyExpr<'static>, @@ -284,7 +255,7 @@ pub struct SampleFields { #[cfg(feature = "unstable")] pub source_info: SourceInfo, #[cfg(feature = "unstable")] - pub attachment: Option, + pub attachment: Option, } impl From for SampleFields { @@ -321,7 +292,7 @@ pub struct Sample { pub(crate) source_info: SourceInfo, #[cfg(feature = "unstable")] - pub(crate) attachment: Option, + pub(crate) attachment: Option, } impl Sample { @@ -371,7 +342,7 @@ impl Sample { /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. #[zenoh_macros::unstable] #[inline] - pub fn attachment(&self) -> Option<&Attachment> { + pub fn attachment(&self) -> Option<&ZBytes> { self.attachment.as_ref() } } diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index b075de042b..4a6a312dcf 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -29,8 +29,6 @@ use crate::prelude::Locality; use crate::publication::*; use crate::query::*; use crate::queryable::*; -#[cfg(feature = "unstable")] -use crate::sample::Attachment; use crate::sample::DataInfo; use crate::sample::DataInfoIntoSample; use crate::sample::QoS; @@ -1482,7 +1480,7 @@ impl Session { key_expr: &WireExpr, info: Option, payload: ZBuf, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, ) { let mut callbacks = SingleOrVec::default(); let state = zread!(self.state); @@ -1613,7 +1611,7 @@ impl Session { destination: Locality, timeout: Duration, value: Option, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, #[cfg(feature = "unstable")] source: SourceInfo, callback: Callback<'static, Reply>, ) -> ZResult<()> { @@ -1755,7 +1753,7 @@ impl Session { _target: TargetType, _consolidation: Consolidation, body: Option, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, ) { let (primitives, key_expr, queryables) = { let state = zread!(self.state); @@ -2240,7 +2238,7 @@ impl Primitives for Session { payload: ZBuf, info: DataInfo, #[cfg(feature = "unstable")] - attachment: Option, + attachment: Option, } let Ret { payload, diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index df9ebcca2e..975103b8c9 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,8 +14,8 @@ #[cfg(feature = "unstable")] #[test] fn attachment_pubsub() { + use zenoh::bytes::ZBytes; use zenoh::prelude::sync::*; - use zenoh::sample::Attachment; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh @@ -44,12 +44,12 @@ fn attachment_pubsub() { zenoh .put("test/attachment", "put") - .attachment(Attachment::from_iter(backer.iter())) + .attachment(ZBytes::from_iter(backer.iter())) .res() .unwrap(); publisher .put("publisher") - .attachment(Attachment::from_iter(backer.iter())) + .attachment(ZBytes::from_iter(backer.iter())) .res() .unwrap(); } @@ -58,7 +58,7 @@ fn attachment_pubsub() { #[cfg(feature = "unstable")] #[test] fn attachment_queries() { - use zenoh::{prelude::sync::*, sample::builder::SampleBuilderTrait, sample::Attachment}; + use zenoh::{bytes::ZBytes, prelude::sync::*, sample::builder::SampleBuilderTrait}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh @@ -84,7 +84,7 @@ fn attachment_queries() { query.key_expr().clone(), query.value().unwrap().payload().clone(), ) - .attachment(Attachment::from_iter( + .attachment(ZBytes::from_iter( attachment .iter::<( [u8; std::mem::size_of::()], @@ -109,7 +109,7 @@ fn attachment_queries() { let get = zenoh .get("test/attachment") .payload("query") - .attachment(Attachment::from_iter(backer.iter())) + .attachment(ZBytes::from_iter(backer.iter())) .res() .unwrap(); while let Ok(reply) = get.recv() { From 89d6a2df6c822e1d05a9ea5e2b15d567a6418402 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 10:49:07 +0200 Subject: [PATCH 258/598] Fix ZBytes doc --- zenoh/src/bytes.rs | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index 9e2e441f3a..f4fd882467 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -48,13 +48,13 @@ pub trait Deserialize<'a, T> { fn deserialize(self, t: &'a ZBytes) -> Result; } -/// A payload contains the serialized bytes of user data. +/// ZBytes contains the serialized bytes of user data. #[repr(transparent)] #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct ZBytes(ZBuf); impl ZBytes { - /// Create an empty payload. + /// Create an empty ZBytes. pub const fn empty() -> Self { Self(ZBuf::empty()) } @@ -67,12 +67,12 @@ impl ZBytes { Self(t.into()) } - /// Returns wether the payload is empty or not. + /// Returns wether the ZBytes is empty or not. pub fn is_empty(&self) -> bool { self.0.is_empty() } - /// Returns the length of the payload. + /// Returns the length of the ZBytes. pub fn len(&self) -> usize { self.0.len() } @@ -113,11 +113,11 @@ impl ZBytes { /// Serialize an object of type `T` as a [`Value`] using the [`ZSerde`]. /// /// ```rust - /// use zenoh::payload::ZBytes; + /// use zenoh::bytes::ZBytes; /// /// let start = String::from("abc"); - /// let payload = ZBytes::serialize(start.clone()); - /// let end: String = payload.deserialize().unwrap(); + /// let bytes = ZBytes::serialize(start.clone()); + /// let end: String = bytes.deserialize().unwrap(); /// assert_eq!(start, end); /// ``` pub fn serialize(t: T) -> Self @@ -276,7 +276,7 @@ impl From for Option { } } -/// The default serializer for Zenoh payload. It supports primitives types, such as: vec, int, uint, float, string, bool. +/// The default serializer for ZBytes. It supports primitives types, such as: Vec, int, uint, float, string, bool. /// It also supports common Rust serde values. #[derive(Clone, Copy, Debug)] pub struct ZSerde; @@ -921,9 +921,9 @@ impl Serialize<&serde_json::Value> for ZSerde { type Output = Result; fn serialize(self, t: &serde_json::Value) -> Self::Output { - let mut payload = ZBytes::empty(); - serde_json::to_writer(payload.writer(), t)?; - Ok(payload) + let mut bytes = ZBytes::empty(); + serde_json::to_writer(bytes.writer(), t)?; + Ok(bytes) } } @@ -980,9 +980,9 @@ impl Serialize<&serde_yaml::Value> for ZSerde { type Output = Result; fn serialize(self, t: &serde_yaml::Value) -> Self::Output { - let mut payload = ZBytes::empty(); - serde_yaml::to_writer(payload.writer(), t)?; - Ok(payload) + let mut bytes = ZBytes::empty(); + serde_yaml::to_writer(bytes.writer(), t)?; + Ok(bytes) } } @@ -1039,9 +1039,9 @@ impl Serialize<&serde_cbor::Value> for ZSerde { type Output = Result; fn serialize(self, t: &serde_cbor::Value) -> Self::Output { - let mut payload = ZBytes::empty(); - serde_cbor::to_writer(payload.0.writer(), t)?; - Ok(payload) + let mut bytes = ZBytes::empty(); + serde_cbor::to_writer(bytes.0.writer(), t)?; + Ok(bytes) } } @@ -1098,13 +1098,13 @@ impl Serialize<&serde_pickle::Value> for ZSerde { type Output = Result; fn serialize(self, t: &serde_pickle::Value) -> Self::Output { - let mut payload = ZBytes::empty(); + let mut bytes = ZBytes::empty(); serde_pickle::value_to_writer( - &mut payload.0.writer(), + &mut bytes.0.writer(), t, serde_pickle::SerOptions::default(), )?; - Ok(payload) + Ok(bytes) } } @@ -1279,9 +1279,9 @@ where { type Error = ZError; - fn deserialize(self, payload: &ZBytes) -> Result<(A, B), Self::Error> { + fn deserialize(self, bytes: &ZBytes) -> Result<(A, B), Self::Error> { let codec = Zenoh080::new(); - let mut reader = payload.0.reader(); + let mut reader = bytes.0.reader(); let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; let apld = ZBytes::new(abuf); From 459318430d49806c8a4016ea16337d785b2bee52 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 11:23:45 +0200 Subject: [PATCH 259/598] Fix unstable feature selector test --- zenoh/src/selector.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 15ce36faa8..2c7fc2d782 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -330,7 +330,6 @@ impl<'a> From> for Selector<'a> { fn selector_accessors() { use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; - let time_range = "[now(-2s)..now(2s)]".parse().unwrap(); for selector in [ "hello/there?_timetrick", "hello/there?_timetrick;_time", @@ -346,12 +345,25 @@ fn selector_accessors() { assert_eq!(selector.parameters().get("_timetrick").unwrap(), ""); - selector.parameters_mut().set_time_range(time_range); + let time_range = "[now(-2s)..now(2s)]"; + zcondfeat!( + "unstable", + { + let time_range = time_range.parse().unwrap(); + selector.parameters_mut().set_time_range(time_range); + assert_eq!( + selector.parameters().time_range().unwrap().unwrap(), + time_range + ); + }, + { + selector.parameters_mut().insert(TIME_RANGE_KEY, time_range); + } + ); assert_eq!( - selector.parameters().time_range().unwrap().unwrap(), + selector.parameters().get(TIME_RANGE_KEY).unwrap(), time_range ); - assert!(selector.parameters().contains_key(TIME_RANGE_KEY)); let hm: HashMap<&str, &str> = HashMap::from(selector.parameters()); assert!(hm.contains_key(TIME_RANGE_KEY)); From 5b0bdaa97fa13d75dfaf7ef603053d0e140d75c0 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 11:24:14 +0200 Subject: [PATCH 260/598] Fix serialization of 0 integers --- zenoh/src/bytes.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index f4fd882467..56046f9e2a 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -696,10 +696,9 @@ macro_rules! impl_int { fn serialize(self, t: $t) -> Self::Output { let bs = t.to_le_bytes(); - let end = if t == 0 as $t { - 0 - } else { - 1 + bs.iter().rposition(|b| *b != 0).unwrap_or(bs.len() - 1) + let mut end = 1; + if t != 0 as $t { + end += bs.iter().rposition(|b| *b != 0).unwrap_or(bs.len() - 1); }; // SAFETY: // - 0 is a valid start index because bs is guaranteed to always have a length greater or equal than 1 @@ -1223,7 +1222,10 @@ macro_rules! impl_tuple { let mut buffer: ZBuf = ZBuf::empty(); let mut writer = buffer.writer(); let apld: ZBytes = a.into(); + println!("Write A: {:?}", apld.0); + let bpld: ZBytes = b.into(); + println!("Write B: {:?}", bpld.0); // SAFETY: we are serializing slices on a ZBuf, so serialization will never // fail unless we run out of memory. In that case, Rust memory allocator From 3f770295f68b61d73fe84cbf47e920587172564f Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 12:05:00 +0200 Subject: [PATCH 261/598] Remove forgotten println --- zenoh/src/bytes.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index 56046f9e2a..6dce95980f 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -1222,10 +1222,7 @@ macro_rules! impl_tuple { let mut buffer: ZBuf = ZBuf::empty(); let mut writer = buffer.writer(); let apld: ZBytes = a.into(); - println!("Write A: {:?}", apld.0); - let bpld: ZBytes = b.into(); - println!("Write B: {:?}", bpld.0); // SAFETY: we are serializing slices on a ZBuf, so serialization will never // fail unless we run out of memory. In that case, Rust memory allocator From d9a5ae671bf33ef6a1bda93a16f872ba3d86ed64 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 12:18:51 +0200 Subject: [PATCH 262/598] Sort use --- zenoh/src/bytes.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index 6dce95980f..714e1c3ff1 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -20,12 +20,11 @@ use std::{ string::FromUtf8Error, sync::Arc, }; use unwrap_infallible::UnwrapInfallible; -use zenoh_buffers::ZBufWriter; use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, writer::HasWriter, - ZBufReader, ZSlice, + ZBufReader, ZBufWriter, ZSlice, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; From 2daee35420a67b7bde70e491dfaf504496913d3c Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 12:22:33 +0200 Subject: [PATCH 263/598] Sort use --- zenoh/src/bytes.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index 714e1c3ff1..6f8ba23a65 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -14,9 +14,8 @@ //! ZBytes primitives. use crate::buffers::ZBuf; -use std::str::Utf8Error; use std::{ - borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, + borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, str::Utf8Error, string::FromUtf8Error, sync::Arc, }; use unwrap_infallible::UnwrapInfallible; From 09f9529e6b09a3384c9a1cb7bde37e94f81bc82d Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 17 Apr 2024 18:44:14 +0200 Subject: [PATCH 264/598] removed unused EncodingBuilder --- zenoh/src/api/encoding.rs | 2 -- zenoh/src/lib.rs | 1 - 2 files changed, 3 deletions(-) diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index 3283ec1a84..7518671eed 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -848,5 +848,3 @@ impl EncodingMapping for Box { impl EncodingMapping for SharedMemoryBuf { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } - -pub struct EncodingBuilder(Encoding); diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 8f7645a965..655a2699bd 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -224,7 +224,6 @@ pub mod value { /// Encoding support pub mod encoding { pub use crate::api::encoding::Encoding; - pub use crate::api::encoding::EncodingBuilder; } /// Payload primitives From 6a05bcb18e68d8cd30fd618e3b9d9793995952a2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 17 Apr 2024 21:46:38 +0200 Subject: [PATCH 265/598] compiation fix --- examples/examples/z_sub_shm.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 630876f287..f89df5ee60 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -20,7 +20,7 @@ use zenoh_shm::SharedMemoryBuf; #[tokio::main] async fn main() { // Initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (mut config, key_expr) = parse_args(); From 18e60a32124303f973acc31d707bce0b323fce33 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 17 Apr 2024 21:59:27 +0200 Subject: [PATCH 266/598] compile fix --- zenoh/src/api/session.rs | 1 - zenoh/src/net/routing/interceptor/access_control.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 412b4a2f6d..9ed85899c3 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -31,7 +31,6 @@ use super::{ Id, }; use crate::{ - api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}, }; use std::{ diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 1b0876160a..1467a9baa5 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -23,7 +23,7 @@ use super::{ InterceptorFactoryTrait, InterceptorTrait, }; use crate::net::routing::RoutingContext; -use crate::KeyExpr; +use crate::api::key_expr::KeyExpr; use std::any::Any; use std::sync::Arc; use zenoh_config::{AclConfig, Action, InterceptorFlow, Permission, Subject, ZenohId}; From f96c6146b47f9676911fdeaea57ff519fcf1eb86 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 17 Apr 2024 22:11:23 +0200 Subject: [PATCH 267/598] build fix --- io/zenoh-transport/tests/unicast_time.rs | 16 ++++++++-------- zenoh/src/api/session.rs | 5 ++--- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs index baebb5a95f..088db05049 100644 --- a/io/zenoh-transport/tests/unicast_time.rs +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -230,7 +230,7 @@ async fn time_lowlatency_transport(endpoint: &EndPoint) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_tcp_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13000).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -239,7 +239,7 @@ async fn time_tcp_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_tcp_only_with_lowlatency_transport() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13100).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -248,7 +248,7 @@ async fn time_tcp_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_udp_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13010).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -257,7 +257,7 @@ async fn time_udp_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_udp_only_with_lowlatency_transport() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13110).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -266,7 +266,7 @@ async fn time_udp_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_ws_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13020).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -275,7 +275,7 @@ async fn time_ws_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_ws_only_with_lowlatency_transport() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13120).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -304,7 +304,7 @@ async fn time_unixpipe_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unix_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let f1 = "zenoh-test-unix-socket-9.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); @@ -318,7 +318,7 @@ async fn time_unix_only() { #[ignore] async fn time_tls_only() { use zenoh_link::tls::config::*; - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 9ed85899c3..56a1288f40 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -22,6 +22,7 @@ use super::{ key_expr::{KeyExpr, KeyExprInner}, payload::Payload, publication::Priority, + query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, @@ -30,9 +31,7 @@ use super::{ value::Value, Id, }; -use crate::{ - net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}, -}; +use crate::net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}; use std::{ collections::HashMap, convert::{TryFrom, TryInto}, From fc835434d212750fac6d08771a81a830eecb6a52 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 17 Apr 2024 22:16:55 +0200 Subject: [PATCH 268/598] cargo fmt --- zenoh/src/net/routing/interceptor/access_control.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 1467a9baa5..b23db9765e 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -22,8 +22,8 @@ use super::{ authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, InterceptorFactoryTrait, InterceptorTrait, }; -use crate::net::routing::RoutingContext; use crate::api::key_expr::KeyExpr; +use crate::net::routing::RoutingContext; use std::any::Any; use std::sync::Arc; use zenoh_config::{AclConfig, Action, InterceptorFlow, Permission, Subject, ZenohId}; From a12d2c80290fbd649129c3c3768eb7a84cc1c64e Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 18 Apr 2024 12:10:35 +0200 Subject: [PATCH 269/598] Fix build --- examples/examples/z_sub_shm.rs | 2 +- io/zenoh-transport/tests/unicast_time.rs | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index f89df5ee60..d304d6a7f6 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -20,7 +20,7 @@ use zenoh_shm::SharedMemoryBuf; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let (mut config, key_expr) = parse_args(); diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs index baebb5a95f..5b9209ada3 100644 --- a/io/zenoh-transport/tests/unicast_time.rs +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -230,7 +230,7 @@ async fn time_lowlatency_transport(endpoint: &EndPoint) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_tcp_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13000).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -239,7 +239,7 @@ async fn time_tcp_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_tcp_only_with_lowlatency_transport() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13100).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -248,7 +248,7 @@ async fn time_tcp_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_udp_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13010).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -257,7 +257,7 @@ async fn time_udp_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_udp_only_with_lowlatency_transport() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13110).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -266,7 +266,7 @@ async fn time_udp_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_ws_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13020).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -275,7 +275,7 @@ async fn time_ws_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_ws_only_with_lowlatency_transport() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13120).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -284,7 +284,7 @@ async fn time_ws_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unixpipe_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "unixpipe/time_unixpipe_only".parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -293,7 +293,7 @@ async fn time_unixpipe_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unixpipe_only_with_lowlatency_transport() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "unixpipe/time_unixpipe_only_with_lowlatency_transport" .parse() .unwrap(); @@ -304,7 +304,7 @@ async fn time_unixpipe_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unix_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let f1 = "zenoh-test-unix-socket-9.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); @@ -318,7 +318,7 @@ async fn time_unix_only() { #[ignore] async fn time_tls_only() { use zenoh_link::tls::config::*; - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key @@ -514,7 +514,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_vsock_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); time_lowlatency_transport(&endpoint).await; } From b6cc94577fdff9e3c8d8c51eafc777928cdb77bd Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 18 Apr 2024 12:19:59 +0200 Subject: [PATCH 270/598] build fix unifished --- Cargo.lock | 2 -- plugins/zenoh-plugin-rest/Cargo.toml | 1 - plugins/zenoh-plugin-rest/src/lib.rs | 7 +++--- .../zenoh-plugin-storage-manager/Cargo.toml | 1 - .../zenoh-plugin-storage-manager/src/lib.rs | 5 +++-- zenoh-ext/src/group.rs | 1 + zenoh/src/api/admin.rs | 2 +- zenoh/src/api/builders/publication.rs | 22 +++++++++---------- zenoh/src/api/builders/sample.rs | 6 ++--- zenoh/src/api/{payload.rs => bytes.rs} | 3 --- zenoh/src/api/mod.rs | 2 +- zenoh/src/api/publication.rs | 8 +++---- zenoh/src/api/query.rs | 8 ++----- zenoh/src/api/queryable.rs | 12 +++++----- zenoh/src/api/sample.rs | 8 +------ zenoh/src/api/session.rs | 10 +++++---- zenoh/src/lib.rs | 17 +++++++------- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/prelude.rs | 4 ++-- 19 files changed, 52 insertions(+), 69 deletions(-) rename zenoh/src/api/{payload.rs => bytes.rs} (99%) diff --git a/Cargo.lock b/Cargo.lock index c2bbd6a6db..7653db324a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5447,7 +5447,6 @@ dependencies = [ "zenoh", "zenoh-plugin-trait", "zenoh-result", - "zenoh-util", ] [[package]] @@ -5473,7 +5472,6 @@ dependencies = [ "urlencoding", "zenoh", "zenoh-plugin-trait", - "zenoh-util", "zenoh_backend_traits", ] diff --git a/plugins/zenoh-plugin-rest/Cargo.toml b/plugins/zenoh-plugin-rest/Cargo.toml index 19fa9eafdc..05f010bdb8 100644 --- a/plugins/zenoh-plugin-rest/Cargo.toml +++ b/plugins/zenoh-plugin-rest/Cargo.toml @@ -36,7 +36,6 @@ anyhow = { workspace = true, features = ["default"] } async-std = { workspace = true, features = ["default", "attributes"] } base64 = { workspace = true } const_format = { workspace = true } -zenoh-util = {workspace = true } flume = { workspace = true } futures = { workspace = true } git-version = { workspace = true } diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index fb5aa96a99..7b51f2054b 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -29,11 +29,10 @@ use std::sync::Arc; use tide::http::Mime; use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; -use zenoh::core::AsyncResolve; +use zenoh::bytes::{StringOrBase64, ZBytes}; +use zenoh::core::{try_init_log_from_env, AsyncResolve}; use zenoh::encoding::Encoding; use zenoh::key_expr::{keyexpr, KeyExpr}; -use zenoh::payload::{Payload, StringOrBase64}; -use zenoh::bytes::StringOrBase64; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; @@ -245,7 +244,7 @@ impl Plugin for RestPlugin { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - zenoh_util::try_init_log_from_env(); + try_init_log_from_env(); tracing::debug!("REST plugin {}", LONG_VERSION.as_str()); let runtime_conf = runtime.config().lock(); diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index 5d479d04c6..9486ab5367 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -37,7 +37,6 @@ async-trait = { workspace = true } crc = { workspace = true } const_format = { workspace = true } derive-new = { workspace = true } -zenoh-util = { workspace = true } flume = { workspace = true } futures = { workspace = true } git-version = { workspace = true } diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index ac3f009ab9..a87a6194cb 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -22,6 +22,7 @@ use async_std::task; use flume::Sender; use memory_backend::MemoryBackend; +use zenoh::core::try_init_log_from_env; use std::collections::HashMap; use std::convert::TryFrom; use std::sync::Arc; @@ -68,7 +69,7 @@ impl Plugin for StoragesPlugin { type Instance = zenoh::plugins::RunningPlugin; fn start(name: &str, runtime: &Self::StartArgs) -> ZResult { - zenoh_util::try_init_log_from_env(); + try_init_log_from_env(); tracing::debug!("StorageManager plugin {}", Self::PLUGIN_VERSION); let config = { PluginConfig::try_from((name, runtime.config().lock().plugin(name).unwrap())) }?; @@ -101,7 +102,7 @@ impl StorageRuntimeInner { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - zenoh_util::try_init_log_from_env(); + try_init_log_from_env(); let PluginConfig { name, backend_search_dirs, diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 6d4f688327..7528dcbdb9 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -25,6 +25,7 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; use zenoh::prelude::r#async::*; +use zenoh::internal::{bail, Condition, TaskController}; const GROUP_PREFIX: &str = "zenoh/ext/net/group"; const EVENT_POSTFIX: &str = "evt"; diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index 7055eb63da..c221d7f27c 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -12,9 +12,9 @@ // ZettaScale Zenoh Team, // use super::{ + bytes::ZBytes, encoding::Encoding, key_expr::KeyExpr, - payload::Payload, queryable::Query, sample::Locality, sample::{DataInfo, SampleKind}, diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index c710d0ad79..639ae4ed37 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -15,19 +15,19 @@ use std::future::Ready; // use crate::api::builders::sample::SampleBuilderTrait; use crate::api::builders::sample::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; -use crate::api::key_expr::KeyExpr; #[cfg(feature = "unstable")] -use crate::api::payload::OptionPayload; +use crate::api::bytes::OptionZBytes; +use crate::api::bytes::ZBytes; +use crate::api::key_expr::KeyExpr; use crate::api::publication::Priority; #[cfg(feature = "unstable")] -use crate::api::sample::Attachment; use crate::api::sample::Locality; use crate::api::sample::SampleKind; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; use crate::api::session::SessionRef; use crate::api::value::Value; -use crate::api::{encoding::Encoding, payload::Payload, publication::Publisher}; +use crate::api::{encoding::Encoding, publication::Publisher}; use zenoh_core::{AsyncResolve, Resolvable, Result as ZResult, SyncResolve}; use zenoh_protocol::core::CongestionControl; use zenoh_protocol::network::Mapping; @@ -45,7 +45,7 @@ pub type PublisherDeleteBuilder<'a> = #[derive(Debug, Clone)] pub struct PublicationBuilderPut { - pub(crate) payload: Payload, + pub(crate) payload: ZBytes, pub(crate) encoding: Encoding, } #[derive(Debug, Clone)] @@ -79,7 +79,7 @@ pub struct PublicationBuilder { #[cfg(feature = "unstable")] pub(crate) source_info: SourceInfo, #[cfg(feature = "unstable")] - pub(crate) attachment: Option, + pub(crate) attachment: Option, } impl QoSBuilderTrait for PublicationBuilder, T> { @@ -130,7 +130,7 @@ impl

ValueBuilderTrait for PublicationBuilder { fn payload(self, payload: IntoPayload) -> Self where - IntoPayload: Into, + IntoPayload: Into, { Self { kind: PublicationBuilderPut { @@ -158,8 +158,8 @@ impl SampleBuilderTrait for PublicationBuilder { } } #[cfg(feature = "unstable")] - fn attachment>(self, attachment: TA) -> Self { - let attachment: OptionPayload = attachment.into(); + fn attachment>(self, attachment: TA) -> Self { + let attachment: OptionZBytes = attachment.into(); Self { attachment: attachment.into(), ..self @@ -202,7 +202,7 @@ impl SyncResolve for PublicationBuilder, PublicationBui fn res_sync(self) -> ::To { let publisher = self.publisher.create_one_shot_publisher()?; publisher.resolve_put( - Payload::empty(), + ZBytes::empty(), SampleKind::Delete, Encoding::ZENOH_BYTES, self.timestamp, @@ -407,7 +407,7 @@ impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { fn res_sync(self) -> ::To { self.publisher.resolve_put( - Payload::empty(), + ZBytes::empty(), SampleKind::Delete, Encoding::ZENOH_BYTES, self.timestamp, diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 420a150509..ccf10d9574 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -11,10 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::marker::PhantomData; +use crate::api::bytes::ZBytes; use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; -use crate::api::payload::Payload; use crate::api::publication::Priority; use crate::api::sample::QoS; use crate::api::sample::QoSBuilder; @@ -22,7 +21,8 @@ use crate::api::sample::Sample; use crate::api::sample::SampleKind; use crate::api::value::Value; #[cfg(feature = "unstable")] -use crate::{api::payload::OptionPayload, sample::SourceInfo}; +use crate::{api::bytes::OptionZBytes, sample::SourceInfo}; +use std::marker::PhantomData; use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; diff --git a/zenoh/src/api/payload.rs b/zenoh/src/api/bytes.rs similarity index 99% rename from zenoh/src/api/payload.rs rename to zenoh/src/api/bytes.rs index d9e6c1afdd..6f8ba23a65 100644 --- a/zenoh/src/api/payload.rs +++ b/zenoh/src/api/bytes.rs @@ -12,10 +12,7 @@ // ZettaScale Zenoh Team, // -<<<<<<<< HEAD:zenoh/src/api/payload.rs -======== //! ZBytes primitives. ->>>>>>>> protocol_changes:zenoh/src/bytes.rs use crate::buffers::ZBuf; use std::{ borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, str::Utf8Error, diff --git a/zenoh/src/api/mod.rs b/zenoh/src/api/mod.rs index ab38844ea6..c2cc3504f0 100644 --- a/zenoh/src/api/mod.rs +++ b/zenoh/src/api/mod.rs @@ -16,13 +16,13 @@ pub(crate) type Id = u32; pub(crate) mod admin; pub(crate) mod builders; +pub(crate) mod bytes; pub(crate) mod encoding; pub(crate) mod handlers; pub(crate) mod info; pub(crate) mod key_expr; #[cfg(feature = "unstable")] pub(crate) mod liveliness; -pub(crate) mod payload; pub(crate) mod plugins; pub(crate) mod publication; pub(crate) mod query; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index b8c39a3fb5..22375decd3 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -17,9 +17,9 @@ use super::{ PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherDeleteBuilder, PublisherPutBuilder, }, + bytes::ZBytes, encoding::Encoding, key_expr::KeyExpr, - payload::Payload, sample::{DataInfo, Locality, QoS, Sample, SampleFields, SampleKind}, session::{SessionRef, Undeclarable}, }; @@ -43,7 +43,7 @@ use zenoh_result::{Error, ZResult}; #[zenoh_macros::unstable] use { crate::api::handlers::{Callback, DefaultHandler, IntoHandler}, - crate::api::sample::{Attachment, SourceInfo}, + crate::api::sample::SourceInfo, crate::api::Id, zenoh_protocol::core::EntityGlobalId, zenoh_protocol::core::EntityId, @@ -533,12 +533,12 @@ impl<'a> Sink for Publisher<'a> { impl Publisher<'_> { pub(crate) fn resolve_put( &self, - payload: Payload, + payload: ZBytes, kind: SampleKind, encoding: Encoding, timestamp: Option, #[cfg(feature = "unstable")] source_info: SourceInfo, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { tracing::trace!("write({:?}, [...])", &self.key_expr); let primitives = zread!(self.session.state) diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index dd5d269fa4..164be63b8c 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -14,10 +14,10 @@ use super::{ builders::sample::{QoSBuilderTrait, ValueBuilderTrait}, + bytes::ZBytes, encoding::Encoding, handlers::{locked, Callback, DefaultHandler, IntoHandler}, key_expr::KeyExpr, - payload::Payload, publication::Priority, sample::{Locality, QoSBuilder, Sample}, selector::Selector, @@ -31,11 +31,7 @@ use zenoh_protocol::core::{CongestionControl, ZenohId}; use zenoh_result::ZResult; #[zenoh_macros::unstable] -use super::{ - builders::sample::SampleBuilderTrait, - payload::OptionPayload, - sample::{Attachment, SourceInfo}, -}; +use super::{builders::sample::SampleBuilderTrait, bytes::OptionZBytes, sample::SourceInfo}; /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 3c013fc8d7..2d84530f6b 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -13,10 +13,10 @@ // use super::{ builders::sample::{QoSBuilderTrait, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}, + bytes::ZBytes, encoding::Encoding, handlers::{locked, DefaultHandler, IntoHandler}, key_expr::KeyExpr, - payload::Payload, publication::Priority, sample::{Locality, QoSBuilder, Sample, SampleKind}, selector::{Parameters, Selector}, @@ -43,10 +43,8 @@ use zenoh_result::ZResult; #[zenoh_macros::unstable] use { super::{ - builders::sample::SampleBuilderTrait, - payload::OptionPayload, - query::ReplyKeyExpr, - sample::{Attachment, SourceInfo}, + builders::sample::SampleBuilderTrait, bytes::OptionZBytes, query::ReplyKeyExpr, + sample::SourceInfo, }, zenoh_protocol::core::EntityGlobalId, }; @@ -283,8 +281,8 @@ impl AsyncResolve for ReplySample<'_> { #[derive(Debug)] pub struct ReplyBuilderPut { - payload: super::ZBytes, - encoding: super::Encoding, + payload: ZBytes, + encoding: Encoding, } #[derive(Debug)] pub struct ReplyBuilderDelete; diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 3aa03c4392..b9f77a7157 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -14,9 +14,8 @@ //! Sample primitives use super::{ - builders::sample::QoSBuilderTrait, encoding::Encoding, key_expr::KeyExpr, payload::Payload, + builders::sample::QoSBuilderTrait, bytes::ZBytes, encoding::Encoding, key_expr::KeyExpr, publication::Priority, value::Value, -bytes::ZBytes }; #[cfg(feature = "unstable")] use serde::Serialize; @@ -26,11 +25,6 @@ use zenoh_protocol::{ network::declare::ext::QoSType, }; -#[zenoh_macros::unstable] -pub use attachment::Attachment; -#[zenoh_macros::unstable] -use serde::Serialize; - pub type SourceSn = u64; /// The locality of samples to be received by subscribers or targeted by publishers. diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 01bd3cf9ad..7d6a0c2b66 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -16,14 +16,16 @@ use super::{ builders::publication::{ PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, }, + bytes::ZBytes, encoding::Encoding, handlers::{Callback, DefaultHandler}, info::SessionInfo, key_expr::{KeyExpr, KeyExprInner}, - payload::Payload, publication::Priority, - query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, - query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, + query::{ + ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply, + _REPLY_KEY_EXPR_ANY_SEL_PARAM, + }, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, selector::{Selector, TIME_RANGE_KEY}, @@ -83,7 +85,7 @@ use super::{ liveliness::{Liveliness, LivelinessTokenState}, publication::Publisher, publication::{MatchingListenerState, MatchingStatus}, - sample::{Attachment, SourceInfo}, + sample::SourceInfo, }; zconfigurable! { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 655a2699bd..054b256ccd 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -122,6 +122,7 @@ pub mod core { /// A zenoh result. pub use zenoh_result::ZResult as Result; pub use zenoh_util::core::zresult::ErrNo; + pub use zenoh_util::try_init_log_from_env; } /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate @@ -207,8 +208,6 @@ pub mod sample { pub use crate::api::builders::sample::TimestampBuilderTrait; pub use crate::api::builders::sample::ValueBuilderTrait; #[zenoh_macros::unstable] - pub use crate::api::sample::Attachment; - #[zenoh_macros::unstable] pub use crate::api::sample::Locality; pub use crate::api::sample::Sample; pub use crate::api::sample::SampleKind; @@ -227,13 +226,13 @@ pub mod encoding { } /// Payload primitives -pub mod payload { - pub use crate::api::payload::Deserialize; - pub use crate::api::payload::Payload; - pub use crate::api::payload::PayloadReader; - pub use crate::api::payload::Serialize; - pub use crate::api::payload::StringOrBase64; - pub use crate::api::payload::ZSerde; +pub mod bytes { + pub use crate::api::bytes::Deserialize; + pub use crate::api::bytes::ZBytes; + pub use crate::api::bytes::ZBytesReader; + pub use crate::api::bytes::Serialize; + pub use crate::api::bytes::StringOrBase64; + pub use crate::api::bytes::ZSerde; } /// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 630e58f2ca..48665866fe 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -20,7 +20,7 @@ use crate::api::queryable::QueryInner; use crate::api::value::Value; use crate::encoding::Encoding; use crate::net::primitives::Primitives; -use crate::payload::Payload; +use crate::api::bytes::ZBytes; use serde_json::json; use std::collections::HashMap; use std::convert::TryFrom; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index e89542122d..1f9ccb057c 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -43,7 +43,7 @@ pub(crate) mod flat { pub use crate::encoding::*; pub use crate::handlers::*; pub use crate::key_expr::*; - pub use crate::payload::*; + pub use crate::bytes::*; pub use crate::publication::*; pub use crate::query::*; pub use crate::queryable::*; @@ -66,7 +66,7 @@ pub(crate) mod mods { pub use crate::encoding; pub use crate::handlers; pub use crate::key_expr; - pub use crate::payload; + pub use crate::bytes; pub use crate::publication; pub use crate::query; pub use crate::queryable; From 820e2b51606349d73b3e260469a4ce9af3e68862 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 18 Apr 2024 12:21:32 +0200 Subject: [PATCH 271/598] build fix --- plugins/zenoh-plugin-rest/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 7b51f2054b..7fe591e3f7 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -494,7 +494,7 @@ pub async fn run(runtime: Runtime, conf: Config) -> ZResult<()> { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - zenoh_util::try_init_log_from_env(); + try_init_log_from_env(); let zid = runtime.zid().to_string(); let session = zenoh::session::init(runtime).res().await.unwrap(); From dd37f4545f0d035ea6e7bc0112d25fa03858f5fa Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 18 Apr 2024 12:22:35 +0200 Subject: [PATCH 272/598] cargo fmt --- plugins/zenoh-plugin-storage-manager/src/lib.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh/src/lib.rs | 4 ++-- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/prelude.rs | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index a87a6194cb..77617a487b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -22,12 +22,12 @@ use async_std::task; use flume::Sender; use memory_backend::MemoryBackend; -use zenoh::core::try_init_log_from_env; use std::collections::HashMap; use std::convert::TryFrom; use std::sync::Arc; use std::sync::Mutex; use storages_mgt::StorageMessage; +use zenoh::core::try_init_log_from_env; use zenoh::core::Result as ZResult; use zenoh::core::SyncResolve; use zenoh::internal::zlock; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 7528dcbdb9..1bf37f365c 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -24,8 +24,8 @@ use std::ops::Add; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; -use zenoh::prelude::r#async::*; use zenoh::internal::{bail, Condition, TaskController}; +use zenoh::prelude::r#async::*; const GROUP_PREFIX: &str = "zenoh/ext/net/group"; const EVENT_POSTFIX: &str = "evt"; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 054b256ccd..c3418ecb4c 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -228,10 +228,10 @@ pub mod encoding { /// Payload primitives pub mod bytes { pub use crate::api::bytes::Deserialize; - pub use crate::api::bytes::ZBytes; - pub use crate::api::bytes::ZBytesReader; pub use crate::api::bytes::Serialize; pub use crate::api::bytes::StringOrBase64; + pub use crate::api::bytes::ZBytes; + pub use crate::api::bytes::ZBytesReader; pub use crate::api::bytes::ZSerde; } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 48665866fe..d5e2ca1628 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -13,6 +13,7 @@ use super::routing::dispatcher::face::Face; use super::Runtime; use crate::api::builders::sample::ValueBuilderTrait; +use crate::api::bytes::ZBytes; use crate::api::key_expr::KeyExpr; use crate::api::plugins; use crate::api::queryable::Query; @@ -20,7 +21,6 @@ use crate::api::queryable::QueryInner; use crate::api::value::Value; use crate::encoding::Encoding; use crate::net::primitives::Primitives; -use crate::api::bytes::ZBytes; use serde_json::json; use std::collections::HashMap; use std::convert::TryFrom; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 1f9ccb057c..17286ddeea 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -38,12 +38,12 @@ // Reexport API in flat namespace pub(crate) mod flat { pub use crate::buffers::*; + pub use crate::bytes::*; pub use crate::config::*; pub use crate::core::{Error as ZError, Resolvable, Resolve, Result as ZResult}; pub use crate::encoding::*; pub use crate::handlers::*; pub use crate::key_expr::*; - pub use crate::bytes::*; pub use crate::publication::*; pub use crate::query::*; pub use crate::queryable::*; @@ -61,12 +61,12 @@ pub(crate) mod flat { // Reexport API in hierarchical namespace pub(crate) mod mods { pub use crate::buffers; + pub use crate::bytes; pub use crate::config; pub use crate::core; pub use crate::encoding; pub use crate::handlers; pub use crate::key_expr; - pub use crate::bytes; pub use crate::publication; pub use crate::query; pub use crate::queryable; From f527cecd722c5b3590688cec55f84d0d71b95fc8 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 18 Apr 2024 12:28:53 +0200 Subject: [PATCH 273/598] clippy fix --- plugins/zenoh-plugin-rest/examples/z_serve_sse.rs | 4 ++-- plugins/zenoh-plugin-storage-manager/tests/operations.rs | 1 + plugins/zenoh-plugin-storage-manager/tests/wildcard.rs | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 23d692a554..5e5485d0d2 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -14,7 +14,7 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::config::Config; -use zenoh::core::AsyncResolve; +use zenoh::core::{try_init_log_from_env, AsyncResolve}; use zenoh::key_expr::keyexpr; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; @@ -36,7 +36,7 @@ if(typeof(EventSource) !== "undefined") { #[async_std::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + try_init_log_from_env(); let config = parse_args(); let key = keyexpr::new("demo/sse").unwrap(); diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 6e579539dd..0678431b7e 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -20,6 +20,7 @@ use std::str::FromStr; use std::thread::sleep; use async_std::task; +use zenoh::internal::zasync_executor_init; use zenoh::prelude::r#async::*; use zenoh_plugin_trait::Plugin; diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 0e29e0b531..72fa62f3ca 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -21,6 +21,7 @@ use std::thread::sleep; // use std::collections::HashMap; use async_std::task; +use zenoh::internal::zasync_executor_init; use zenoh::prelude::r#async::*; use zenoh_plugin_trait::Plugin; From 43d280f5709886aa83fd4ada9c847ed53a264a4e Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 18 Apr 2024 15:29:09 +0200 Subject: [PATCH 274/598] Fix valgrind-check build --- ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 3a5a11a98f..676db8f7d0 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -31,10 +31,11 @@ async fn main() { .declare_queryable(&queryable_key_expr.clone()) .callback(move |query| { println!(">> Handling query '{}'", query.selector()); + let queryable_key_expr = queryable_key_expr.clone(); zenoh_runtime::ZRuntime::Application.block_in_place(async move { query .reply( - queryable_key_expr.clone(), + queryable_key_expr, query.value().unwrap().payload().clone(), ) .res() From 26cb7e925d69dc28b5d0407ee315b999a4f32ec2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 18 Apr 2024 16:25:22 +0200 Subject: [PATCH 275/598] locality under unstable --- zenoh/src/api/builders/publication.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index 639ae4ed37..ef2224193f 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -255,6 +255,7 @@ pub struct PublisherBuilder<'a, 'b: 'a> { pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, pub(crate) is_express: bool, + #[cfg(feature = "unstable")] pub(crate) destination: Locality, } From 3711d4568bcc4a54ac0009a6573fe3cd6c5da3ae Mon Sep 17 00:00:00 2001 From: Dmitrii Bannov <104833606+yellowhatter@users.noreply.github.com> Date: Fri, 19 Apr 2024 15:09:29 +0300 Subject: [PATCH 276/598] SHM subsystem: Rust (#823) * add watchdog codebase * integrated and tested POC * Update shm.rs * WIP: implemented buffer headers in separate SHM segment, made buffer generation support * - refactored POSIX shm wrapper - generation works * - use posix_shm module for data segment in SharedMemoryManager - use numeric ID instead of string ID for SHM buffer identification - this feature speeds up segment lookups and reduces wire overhead - remove unnecessary fields from SharedMemoryManager - fix clippy warnings - added comments * WIP on SHM * Fix clippy, additional improvements * Implement watchdog periodic tasks with optional realtime scheduling * [skip ci] WIP on SHM provider\client API * [skip ci] WIP on SHM API * [skip ci] Big WIP on SHM API integration * [skip ci] working SHM (tests passing) * [skip ci] WIP on API * [skip ci] WIP on SHM API * WIP on API * - added SharedMemoryReader to Session and Runtime building API - support ProtocolID exchange in establish - convert buffer based on supported protocol ids * [skip ci] correct shm establish logic * Remove SharedMemoryFactory * [skip ci] - WIP to brush-up the API, eliminate some API flaws - SHM provider is now conceptually made thread-safe for better flexibility * [skip ci] Final updates to SHM provider API * ZSlice: safe mutable acces to inner contents and support for copy-on-write and it's elision as an optimization for SHM buffers * [skip ci] - added ZSliceMut and it's functionality - documented public SHM APIs - brush-up for SHM API - hide SharedMemoryBuf from API and return ZSlice instead - add is_valid() for ZSlice that checks generation counter on SHM * [skip ci] - ZSliceMut API changed - ZSliceBuffer: different API for shared-memory feature - Hide unnecessary APIs from pub in SharedMemoryReader - Fix default alignment calculation for AllocAlignment - Expose necessary SHM API in zenoh crate - Brush-up examples - Fix shmbuf to rawbuf conversion * [skip ci] - ignore some tests because they need to be run globally sequentially - transit shared-memory feature to zenoh-buffers in zenoh-transport * Solved additive feature problem * - remove dependency in zenoh-buffers - make periodic_task compile on win * fix tests * - refine buld system to optimize workspace dependencies - fix posix shm segment size estimation * Update shm.rs * - replace async-std with tokio in zenoh-shm * fix examples * ooops * ignore test with too long SHM segment id for MacOS * lower test complexity to adopt runner capabilities * ignore test with too long SHM segment id for MacOS * - use md5 hashes to shorten SHM segments OS ids (need for MacOS) - enable 128 bit ids in tests * use crc instead of md5 * - get rid of allocator-api2 as work for Allocator is postponed for a while - allow shared-memory feature for macOS and Win in CI (without unixpipe transport) - move deps into workspace - add documentation * move from 'const ID: ProtocolID' to trait interface to support both static (Rust API) and dynamic (other languages API) protocol ID setting * support compile-time and runtime ProtocolID setting * Add more tests to dedicated execution * [skip ci] add more *concurrent tests to dedicated execution * - more SHM API docs - document all SHM API as "unstable" - hide all SHM API behind "unstable" feature - some API brush-up - improve CI for SHM * exclude test_default_features for SHM tests * Move test_helpers.rs into tests to follow the guideline of integration tests. * update doc * Eliminate zenoh-buffers -> zenoh-shm dependency to illustarte the problem * fix: Add disabled by default `shared-memory` feature in zenoh-shm * [skip ci] client storage interface fix * [skip ci] fix map method interface for SharedMemoryProvider * brush-up some things after merge * PR review fixes * [skip ci] add shm feature to zenoh-ext * [skip ci] oops * [skip ci] add shared-memory feature traversing for zenoh-shm dependants * rename ZSliceShm to ZSliceShmMut * - fix build - support SharedMemoryClient sharing * [skip ci] - support SharedMemoryClient sharing - add way to build SharedMemoryClientStorage from slice of clients * [skip ci] - add way to build SharedMemoryClientStorage from slice of clients AND default client set * - remove ZSliceMut API * - stabby fixed for 1.72 - build with zenoh's mainline toolchain 1.72 * fix doctests * - ZSliceShm * Support ZSliceShm and ZSliceShmMut in Payload * - optimize ZSlicBuffer trait - add full ZSliceShm support to Payload - add partial ZSliceShmMut support to Payload - remove a lot of unnecessary code * fix code format errors * - SHM buffer API evolution - Payload API for SHM buffers * [skip ci] polish Payload API for SHM * move SHM Buffer API to separate "slice"module * Improve SHM Buffer API concept * Update payload.rs test * fixes after merge * build fixes * - fix recursion error for in SHM buffer API - add som docs - fix Payload test causing stack overflow :) * - implement trait API for SHM buffers - extend SHM buf Payload API test - add missing DerefMut to zsliceshmmut * Fix merge * Rework Serialize trait * Impl &mut T for Serialize/Deserialize. Fix valgrind CI. * Update commons/zenoh-shm/src/lib.rs * Revert wrong change on log * Update zenoh/src/bytes.rs * Fix use * Fix use * Review fixes * fix recursive call * Update commons/zenoh-shm/src/api/provider/types.rs * unstable for shm slice traits * Add more #[zenoh_macros::unstable_doc] * SHM establishment reorg * add missing ztimeout! in tests --------- Co-authored-by: yuanyuyuan Co-authored-by: Mahmoud Mazouz Co-authored-by: Luca Cominardi --- .config/nextest.toml | 8 +- .github/workflows/ci.yml | 9 +- Cargo.lock | 132 ++- Cargo.toml | 4 + .../src/queryable_get/bin/z_queryable_get.rs | 9 +- commons/zenoh-buffers/src/zslice.rs | 42 +- commons/zenoh-codec/Cargo.toml | 3 +- commons/zenoh-codec/src/core/shm.rs | 134 ++- commons/zenoh-codec/src/transport/init.rs | 24 +- commons/zenoh-codec/src/transport/open.rs | 24 +- commons/zenoh-codec/src/zenoh/err.rs | 21 + commons/zenoh-codec/tests/codec.rs | 15 +- commons/zenoh-macros/src/lib.rs | 13 +- commons/zenoh-protocol/src/transport/init.rs | 7 + commons/zenoh-protocol/src/transport/open.rs | 26 +- commons/zenoh-protocol/src/zenoh/err.rs | 15 + commons/zenoh-shm/Cargo.toml | 22 +- commons/zenoh-shm/src/api/client/mod.rs | 16 + .../src/api/client/shared_memory_client.rs | 31 + .../src/api/client/shared_memory_segment.rs | 29 + .../zenoh-shm/src/api/client_storage/mod.rs | 163 ++++ commons/zenoh-shm/src/api/common/mod.rs | 15 + commons/zenoh-shm/src/api/common/types.rs | 27 + commons/zenoh-shm/src/api/mod.rs | 20 + .../src/api/protocol_implementations/mod.rs | 15 + .../api/protocol_implementations/posix/mod.rs | 19 + .../posix/posix_shared_memory_client.rs | 39 + .../posix_shared_memory_provider_backend.rs | 286 ++++++ .../posix/posix_shared_memory_segment.rs | 47 + .../posix/protocol_id.rs | 19 + commons/zenoh-shm/src/api/provider/chunk.rs | 53 + commons/zenoh-shm/src/api/provider/mod.rs | 18 + .../api/provider/shared_memory_provider.rs | 916 ++++++++++++++++++ .../shared_memory_provider_backend.rs | 52 + commons/zenoh-shm/src/api/provider/types.rs | 173 ++++ commons/zenoh-shm/src/api/slice/mod.rs | 17 + commons/zenoh-shm/src/api/slice/traits.rs | 24 + commons/zenoh-shm/src/api/slice/zsliceshm.rs | 172 ++++ .../zenoh-shm/src/api/slice/zsliceshmmut.rs | 189 ++++ .../src/header/allocated_descriptor.rs | 26 + commons/zenoh-shm/src/header/chunk_header.rs | 28 + commons/zenoh-shm/src/header/descriptor.rs | 63 ++ commons/zenoh-shm/src/header/mod.rs | 23 + commons/zenoh-shm/src/header/segment.rs | 40 + commons/zenoh-shm/src/header/storage.rs | 87 ++ commons/zenoh-shm/src/header/subscription.rs | 61 ++ commons/zenoh-shm/src/lib.rs | 516 +++------- commons/zenoh-shm/src/posix_shm/array.rs | 124 +++ commons/zenoh-shm/src/posix_shm/mod.rs | 16 + commons/zenoh-shm/src/posix_shm/segment.rs | 127 +++ commons/zenoh-shm/src/reader.rs | 147 +++ .../src/watchdog/allocated_watchdog.rs | 35 + commons/zenoh-shm/src/watchdog/confirmator.rs | 192 ++++ commons/zenoh-shm/src/watchdog/descriptor.rs | 116 +++ commons/zenoh-shm/src/watchdog/mod.rs | 24 + .../zenoh-shm/src/watchdog/periodic_task.rs | 100 ++ commons/zenoh-shm/src/watchdog/segment.rs | 41 + commons/zenoh-shm/src/watchdog/storage.rs | 76 ++ commons/zenoh-shm/src/watchdog/validator.rs | 102 ++ commons/zenoh-shm/tests/common/mod.rs | 105 ++ commons/zenoh-shm/tests/header.rs | 130 +++ commons/zenoh-shm/tests/periodic_task.rs | 172 ++++ commons/zenoh-shm/tests/posix_array.rs | 161 +++ commons/zenoh-shm/tests/posix_segment.rs | 136 +++ commons/zenoh-shm/tests/posix_shm_provider.rs | 117 +++ commons/zenoh-shm/tests/watchdog.rs | 311 ++++++ commons/zenoh-sync/src/object_pool.rs | 8 +- examples/Cargo.toml | 28 +- examples/examples/z_alloc_shm.rs | 136 +++ examples/examples/z_ping_shm.rs | 147 +++ examples/examples/z_pong.rs | 7 +- examples/examples/z_pub_shm.rs | 103 +- examples/examples/z_pub_shm_thr.rs | 54 +- examples/examples/z_sub_shm.rs | 6 +- io/zenoh-transport/Cargo.toml | 1 + io/zenoh-transport/src/common/batch.rs | 6 +- io/zenoh-transport/src/manager.rs | 42 +- io/zenoh-transport/src/multicast/link.rs | 6 +- io/zenoh-transport/src/multicast/manager.rs | 7 - io/zenoh-transport/src/multicast/mod.rs | 2 - io/zenoh-transport/src/multicast/rx.rs | 2 +- io/zenoh-transport/src/multicast/shm.rs | 44 - io/zenoh-transport/src/multicast/transport.rs | 12 + io/zenoh-transport/src/multicast/tx.rs | 10 +- io/zenoh-transport/src/shm.rs | 316 +++--- .../src/unicast/establishment/accept.rs | 82 +- .../src/unicast/establishment/ext/shm.rs | 331 ++++--- .../src/unicast/establishment/open.rs | 79 +- io/zenoh-transport/src/unicast/link.rs | 6 +- .../src/unicast/lowlatency/rx.rs | 4 +- .../src/unicast/lowlatency/transport.rs | 2 +- .../src/unicast/lowlatency/tx.rs | 10 +- io/zenoh-transport/src/unicast/manager.rs | 28 +- io/zenoh-transport/src/unicast/mod.rs | 4 +- .../src/unicast/shared_memory_unicast.rs | 57 -- .../src/unicast/universal/link.rs | 2 +- .../src/unicast/universal/rx.rs | 4 +- .../src/unicast/universal/transport.rs | 2 +- .../src/unicast/universal/tx.rs | 10 +- .../tests/multicast_compression.rs | 30 +- .../tests/multicast_transport.rs | 30 +- .../tests/unicast_authenticator.rs | 25 +- .../tests/unicast_compression.rs | 18 +- .../tests/unicast_concurrent.rs | 20 +- .../tests/unicast_defragmentation.rs | 5 +- .../tests/unicast_intermittent.rs | 23 +- io/zenoh-transport/tests/unicast_multilink.rs | 26 +- io/zenoh-transport/tests/unicast_openclose.rs | 2 +- .../tests/unicast_priorities.rs | 5 +- io/zenoh-transport/tests/unicast_shm.rs | 91 +- .../tests/unicast_simultaneous.rs | 4 +- io/zenoh-transport/tests/unicast_transport.rs | 9 +- zenoh-ext/Cargo.toml | 3 + zenoh/Cargo.toml | 1 + zenoh/src/bytes.rs | 838 +++++++++++++--- zenoh/src/encoding.rs | 12 +- zenoh/src/lib.rs | 35 +- zenoh/src/net/runtime/mod.rs | 62 +- zenoh/src/publication.rs | 8 +- zenoh/src/queryable.rs | 8 +- zenoh/src/sample/builder.rs | 6 +- zenoh/src/sample/mod.rs | 18 +- zenoh/src/session.rs | 23 +- zenoh/src/value.rs | 6 +- zenoh/tests/events.rs | 18 +- zenoh/tests/payload.rs | 97 ++ zenoh/tests/routing.rs | 12 +- zenoh/tests/shm.rs | 204 ++++ 128 files changed, 7727 insertions(+), 1394 deletions(-) create mode 100644 commons/zenoh-shm/src/api/client/mod.rs create mode 100644 commons/zenoh-shm/src/api/client/shared_memory_client.rs create mode 100644 commons/zenoh-shm/src/api/client/shared_memory_segment.rs create mode 100644 commons/zenoh-shm/src/api/client_storage/mod.rs create mode 100644 commons/zenoh-shm/src/api/common/mod.rs create mode 100644 commons/zenoh-shm/src/api/common/types.rs create mode 100644 commons/zenoh-shm/src/api/mod.rs create mode 100644 commons/zenoh-shm/src/api/protocol_implementations/mod.rs create mode 100644 commons/zenoh-shm/src/api/protocol_implementations/posix/mod.rs create mode 100644 commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs create mode 100644 commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs create mode 100644 commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs create mode 100644 commons/zenoh-shm/src/api/protocol_implementations/posix/protocol_id.rs create mode 100644 commons/zenoh-shm/src/api/provider/chunk.rs create mode 100644 commons/zenoh-shm/src/api/provider/mod.rs create mode 100644 commons/zenoh-shm/src/api/provider/shared_memory_provider.rs create mode 100644 commons/zenoh-shm/src/api/provider/shared_memory_provider_backend.rs create mode 100644 commons/zenoh-shm/src/api/provider/types.rs create mode 100644 commons/zenoh-shm/src/api/slice/mod.rs create mode 100644 commons/zenoh-shm/src/api/slice/traits.rs create mode 100644 commons/zenoh-shm/src/api/slice/zsliceshm.rs create mode 100644 commons/zenoh-shm/src/api/slice/zsliceshmmut.rs create mode 100644 commons/zenoh-shm/src/header/allocated_descriptor.rs create mode 100644 commons/zenoh-shm/src/header/chunk_header.rs create mode 100644 commons/zenoh-shm/src/header/descriptor.rs create mode 100644 commons/zenoh-shm/src/header/mod.rs create mode 100644 commons/zenoh-shm/src/header/segment.rs create mode 100644 commons/zenoh-shm/src/header/storage.rs create mode 100644 commons/zenoh-shm/src/header/subscription.rs create mode 100644 commons/zenoh-shm/src/posix_shm/array.rs create mode 100644 commons/zenoh-shm/src/posix_shm/mod.rs create mode 100644 commons/zenoh-shm/src/posix_shm/segment.rs create mode 100644 commons/zenoh-shm/src/reader.rs create mode 100644 commons/zenoh-shm/src/watchdog/allocated_watchdog.rs create mode 100644 commons/zenoh-shm/src/watchdog/confirmator.rs create mode 100644 commons/zenoh-shm/src/watchdog/descriptor.rs create mode 100644 commons/zenoh-shm/src/watchdog/mod.rs create mode 100644 commons/zenoh-shm/src/watchdog/periodic_task.rs create mode 100644 commons/zenoh-shm/src/watchdog/segment.rs create mode 100644 commons/zenoh-shm/src/watchdog/storage.rs create mode 100644 commons/zenoh-shm/src/watchdog/validator.rs create mode 100644 commons/zenoh-shm/tests/common/mod.rs create mode 100644 commons/zenoh-shm/tests/header.rs create mode 100644 commons/zenoh-shm/tests/periodic_task.rs create mode 100644 commons/zenoh-shm/tests/posix_array.rs create mode 100644 commons/zenoh-shm/tests/posix_segment.rs create mode 100644 commons/zenoh-shm/tests/posix_shm_provider.rs create mode 100644 commons/zenoh-shm/tests/watchdog.rs create mode 100644 examples/examples/z_alloc_shm.rs create mode 100644 examples/examples/z_ping_shm.rs delete mode 100644 io/zenoh-transport/src/multicast/shm.rs delete mode 100644 io/zenoh-transport/src/unicast/shared_memory_unicast.rs create mode 100644 zenoh/tests/payload.rs create mode 100644 zenoh/tests/shm.rs diff --git a/.config/nextest.toml b/.config/nextest.toml index aa2c3ac37b..79e299f524 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -8,9 +8,15 @@ slow-timeout = { period = "60s", terminate-after = 2 } filter = """ test(=zenoh_session_unicast) | test(=zenoh_session_multicast) | +test(=zenoh_unicity_p2p) | +test(=zenoh_unicity_brokered) | test(=transport_tcp_intermittent) | test(=transport_tcp_intermittent_for_lowlatency_transport) | -test(=three_node_combination) +test(=three_node_combination) | +test(=watchdog_alloc_concurrent) | +test(=header_check_memory_concurrent) | +test(=header_link_concurrent) | +test(=header_link_failure_concurrent) """ threads-required = 'num-cpus' slow-timeout = { period = "60s", terminate-after = 6 } diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8ac80b747c..b28ea827c0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -52,6 +52,9 @@ jobs: - name: Clippy unstable targets run: cargo +stable clippy --all-targets --features unstable -- --deny warnings + - name: Clippy shared memory without unstable + run: cargo +stable clippy --all-targets --features shared-memory -- --deny warnings + - name: Clippy all features if: ${{ matrix.os == 'ubuntu-latest' || matrix.os == 'macOS-latest' }} run: cargo +stable clippy --all-targets --all-features -- --deny warnings @@ -92,8 +95,12 @@ jobs: run: cargo nextest run --exclude zenoh-examples --exclude zenoh-plugin-example --workspace - name: Run tests with SHM + if: ${{ matrix.os == 'macOS-latest' || matrix.os == 'windows-latest' }} + run: cargo nextest run -F shared-memory -F unstable -E 'not (test(test_default_features))' --exclude zenoh-examples --exclude zenoh-plugin-example --workspace + + - name: Run tests with SHM + unixpipe if: ${{ matrix.os == 'ubuntu-latest' }} - run: cargo nextest run -F shared-memory -F transport_unixpipe -p zenoh-transport + run: cargo nextest run -F shared-memory -F unstable -F transport_unixpipe -E 'not (test(test_default_features))' --exclude zenoh-examples --exclude zenoh-plugin-example --workspace - name: Check for feature leaks if: ${{ matrix.os == 'ubuntu-latest' }} diff --git a/Cargo.lock b/Cargo.lock index 8dd1450361..d009eb94cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1948,6 +1948,15 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "lockfree" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74ee94b5ad113c7cb98c5a040f783d0952ee4fe100993881d1673c2cb002dd23" +dependencies = [ + "owned-alloc", +] + [[package]] name = "log" version = "0.4.20" @@ -2295,9 +2304,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" @@ -2376,6 +2385,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "owned-alloc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30fceb411f9a12ff9222c5f824026be368ff15dc2f13468d850c7d3f502205d6" + [[package]] name = "parking" version = "2.1.0" @@ -2691,6 +2706,15 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "proc-macro-crate" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +dependencies = [ + "toml_edit", +] + [[package]] name = "proc-macro-hack" version = "0.5.20+deprecated" @@ -3234,6 +3258,12 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + [[package]] name = "ryu" version = "1.0.15" @@ -3529,6 +3559,12 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha2-const-stable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f179d4e11094a893b82fff208f74d448a7512f99f5a0acbd5c679b705f83ed9" + [[package]] name = "sha3" version = "0.10.8" @@ -3680,6 +3716,42 @@ dependencies = [ "der", ] +[[package]] +name = "stabby" +version = "4.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ec04c5825384722310b6a1fd83023bee0bfdc838f7aa3069f0a59e10203836b" +dependencies = [ + "lazy_static", + "rustversion", + "stabby-abi", +] + +[[package]] +name = "stabby-abi" +version = "4.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "976322da1deb6cc64a8406fd24378b840b1962acaac1978a993131c3838d81b3" +dependencies = [ + "libc", + "rustversion", + "sha2-const-stable", + "stabby-macros", +] + +[[package]] +name = "stabby-macros" +version = "4.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "736712a13ab37b1fa6e073831efca751bbcb31033af4d7308bd5d9d605939183" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "rand 0.8.5", + "syn 1.0.109", +] + [[package]] name = "standback" version = "0.2.17" @@ -3917,6 +3989,20 @@ dependencies = [ "syn 2.0.52", ] +[[package]] +name = "thread-priority" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b72cb4958060ee2d9540cef68bb3871fd1e547037772c7fe7650d5d1cbec53b3" +dependencies = [ + "bitflags 1.3.2", + "cfg-if 1.0.0", + "libc", + "log", + "rustversion", + "winapi", +] + [[package]] name = "thread_local" version = "1.1.8" @@ -4173,6 +4259,23 @@ dependencies = [ "vsock", ] +[[package]] +name = "toml_datetime" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" + +[[package]] +name = "toml_edit" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +dependencies = [ + "indexmap", + "toml_datetime", + "winnow", +] + [[package]] name = "tower-service" version = "0.3.2" @@ -4911,6 +5014,15 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +[[package]] +name = "winnow" +version = "0.5.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" @@ -5105,7 +5217,6 @@ dependencies = [ "zenoh", "zenoh-collections", "zenoh-ext", - "zenoh-shm", "zenoh-util", ] @@ -5539,11 +5650,26 @@ dependencies = [ name = "zenoh-shm" version = "0.11.0-dev" dependencies = [ + "async-trait", + "bincode", + "crc", + "lazy_static", + "libc", + "lockfree", + "num-traits", + "num_cpus", + "rand 0.8.5", "serde", "shared_memory", + "stabby", + "thread-priority", + "tokio", "tracing", "zenoh-buffers", + "zenoh-core", + "zenoh-macros", "zenoh-result", + "zenoh-shm", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 7d5e230e4c..f00f625d0b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -112,9 +112,11 @@ lazy_static = "1.4.0" libc = "0.2.139" libloading = "0.8" tracing = "0.1" +lockfree = "0.5" lz4_flex = "0.11" nix = { version = "0.27", features = ["fs"] } num_cpus = "1.16.0" +num-traits = { version = "0.2.17", default-features = false } ordered-float = "4.1.1" panic-message = "0.3.0" paste = "1.0.12" @@ -146,6 +148,7 @@ serde_cbor = "0.11.2" serde_json = "1.0.114" serde-pickle = "1.1.1" serde_yaml = "0.9.19" +stabby = "4.0.5" sha3 = "0.10.6" shared_memory = "0.12.4" shellexpand = "3.0.0" @@ -159,6 +162,7 @@ tokio-util = "0.7.10" tokio-tungstenite = "0.21" tokio-rustls = "0.25.0" # tokio-vsock = see: io/zenoh-links/zenoh-link-vsock/Cargo.toml (workspaces does not support platform dependent dependencies) +thread-priority = "0.15" console-subscriber = "0.2" typenum = "1.16.0" uhlc = { version = "0.7.0", default-features = false } # Default features are disabled due to usage in no_std crates diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 676db8f7d0..2121d0ea34 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -22,22 +22,19 @@ async fn main() { let _z = zenoh_runtime::ZRuntimePoolGuard; - let queryable_key_expr = KeyExpr::try_from("test/valgrind/data").unwrap(); + let queryable_key_expr = keyexpr::new("test/valgrind/data").unwrap(); let get_selector = Selector::try_from("test/valgrind/**").unwrap(); println!("Declaring Queryable on '{queryable_key_expr}'..."); let queryable_session = zenoh::open(Config::default()).res().await.unwrap(); let _queryable = queryable_session - .declare_queryable(&queryable_key_expr.clone()) + .declare_queryable(queryable_key_expr) .callback(move |query| { println!(">> Handling query '{}'", query.selector()); let queryable_key_expr = queryable_key_expr.clone(); zenoh_runtime::ZRuntime::Application.block_in_place(async move { query - .reply( - queryable_key_expr, - query.value().unwrap().payload().clone(), - ) + .reply(queryable_key_expr, query.value().unwrap().payload().clone()) .res() .await .unwrap(); diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index 05c77cac7d..60dbdab5e1 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -28,46 +28,52 @@ use core::{ /*************************************/ /* ZSLICE BUFFER */ /*************************************/ -pub trait ZSliceBuffer: Send + Sync + fmt::Debug { +pub trait ZSliceBuffer: Any + Send + Sync + fmt::Debug { fn as_slice(&self) -> &[u8]; - fn as_mut_slice(&mut self) -> &mut [u8]; fn as_any(&self) -> &dyn Any; + fn as_any_mut(&mut self) -> &mut dyn Any; } impl ZSliceBuffer for Vec { fn as_slice(&self) -> &[u8] { self.as_ref() } - fn as_mut_slice(&mut self) -> &mut [u8] { - self.as_mut() - } + fn as_any(&self) -> &dyn Any { self } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } impl ZSliceBuffer for Box<[u8]> { fn as_slice(&self) -> &[u8] { self.as_ref() } - fn as_mut_slice(&mut self) -> &mut [u8] { - self.as_mut() - } + fn as_any(&self) -> &dyn Any { self } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } impl ZSliceBuffer for [u8; N] { fn as_slice(&self) -> &[u8] { self.as_ref() } - fn as_mut_slice(&mut self) -> &mut [u8] { - self.as_mut() - } + fn as_any(&self) -> &dyn Any { self } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } /*************************************/ @@ -140,6 +146,15 @@ impl ZSlice { self.buf.as_any().downcast_ref::() } + #[inline] + #[must_use] + pub fn downcast_mut(&mut self) -> Option<&mut T> + where + T: Any, + { + Arc::get_mut(&mut self.buf).and_then(|val| val.as_any_mut().downcast_mut::()) + } + #[inline] #[must_use] pub const fn range(&self) -> Range { @@ -424,8 +439,9 @@ mod tests { assert_eq!(buf.as_slice(), zslice.as_slice()); let range = zslice.range(); - let mbuf = Arc::get_mut(&mut zslice.buf).unwrap(); - mbuf.as_mut_slice()[range][..buf.len()].clone_from_slice(&buf[..]); + let mut_slice = zslice.downcast_mut::>().unwrap(); + + mut_slice[range][..buf.len()].clone_from_slice(&buf[..]); assert_eq!(buf.as_slice(), zslice.as_slice()); } diff --git a/commons/zenoh-codec/Cargo.toml b/commons/zenoh-codec/Cargo.toml index 57fa34a4ab..209a4c698d 100644 --- a/commons/zenoh-codec/Cargo.toml +++ b/commons/zenoh-codec/Cargo.toml @@ -39,7 +39,8 @@ std = [ shared-memory = [ "std", "zenoh-shm", - "zenoh-protocol/shared-memory" + "zenoh-protocol/shared-memory", + "zenoh-buffers/shared-memory" ] [dependencies] diff --git a/commons/zenoh-codec/src/core/shm.rs b/commons/zenoh-codec/src/core/shm.rs index 69c5c59ce0..2548e4ed14 100644 --- a/commons/zenoh-codec/src/core/shm.rs +++ b/commons/zenoh-codec/src/core/shm.rs @@ -16,7 +16,50 @@ use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; -use zenoh_shm::SharedMemoryBufInfo; +use zenoh_shm::{ + api::provider::chunk::ChunkDescriptor, header::descriptor::HeaderDescriptor, + watchdog::descriptor::Descriptor, SharedMemoryBufInfo, +}; + +impl WCodec<&Descriptor, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &Descriptor) -> Self::Output { + self.write(&mut *writer, x.id)?; + self.write(&mut *writer, x.index_and_bitpos)?; + Ok(()) + } +} + +impl WCodec<&HeaderDescriptor, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &HeaderDescriptor) -> Self::Output { + self.write(&mut *writer, x.id)?; + self.write(&mut *writer, x.index)?; + Ok(()) + } +} + +impl WCodec<&ChunkDescriptor, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &ChunkDescriptor) -> Self::Output { + self.write(&mut *writer, x.segment)?; + self.write(&mut *writer, x.chunk)?; + self.write(&mut *writer, x.len)?; + Ok(()) + } +} impl WCodec<&SharedMemoryBufInfo, &mut W> for Zenoh080 where @@ -26,20 +69,74 @@ where fn write(self, writer: &mut W, x: &SharedMemoryBufInfo) -> Self::Output { let SharedMemoryBufInfo { - offset, - length, - shm_manager, - kind, + data_descriptor, + shm_protocol, + data_len, + watchdog_descriptor, + header_descriptor, + generation, } = x; - self.write(&mut *writer, offset)?; - self.write(&mut *writer, length)?; - self.write(&mut *writer, shm_manager.as_str())?; - self.write(&mut *writer, kind)?; + self.write(&mut *writer, data_descriptor)?; + self.write(&mut *writer, shm_protocol)?; + self.write(&mut *writer, data_len)?; + self.write(&mut *writer, watchdog_descriptor)?; + self.write(&mut *writer, header_descriptor)?; + self.write(&mut *writer, generation)?; Ok(()) } } +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let id = self.read(&mut *reader)?; + let index_and_bitpos = self.read(&mut *reader)?; + + Ok(Descriptor { + id, + index_and_bitpos, + }) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let id = self.read(&mut *reader)?; + let index = self.read(&mut *reader)?; + + Ok(HeaderDescriptor { id, index }) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let segment = self.read(&mut *reader)?; + let chunk = self.read(&mut *reader)?; + let len = self.read(&mut *reader)?; + + Ok(ChunkDescriptor { + segment, + chunk, + len, + }) + } +} + impl RCodec for Zenoh080 where R: Reader, @@ -47,12 +144,21 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let offset: usize = self.read(&mut *reader)?; - let length: usize = self.read(&mut *reader)?; - let shm_manager: String = self.read(&mut *reader)?; - let kind: u8 = self.read(&mut *reader)?; + let data_descriptor = self.read(&mut *reader)?; + let shm_protocol = self.read(&mut *reader)?; + let data_len = self.read(&mut *reader)?; + let watchdog_descriptor = self.read(&mut *reader)?; + let header_descriptor = self.read(&mut *reader)?; + let generation = self.read(&mut *reader)?; - let shm_info = SharedMemoryBufInfo::new(offset, length, shm_manager, kind); + let shm_info = SharedMemoryBufInfo::new( + data_descriptor, + shm_protocol, + data_len, + watchdog_descriptor, + header_descriptor, + generation, + ); Ok(shm_info) } } diff --git a/commons/zenoh-codec/src/transport/init.rs b/commons/zenoh-codec/src/transport/init.rs index d3a92165ea..fec9f07afd 100644 --- a/commons/zenoh-codec/src/transport/init.rs +++ b/commons/zenoh-codec/src/transport/init.rs @@ -44,6 +44,7 @@ where resolution, batch_size, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -57,11 +58,16 @@ where header |= flag::S; } let mut n_exts = (ext_qos.is_some() as u8) - + (ext_shm.is_some() as u8) + (ext_auth.is_some() as u8) + (ext_mlink.is_some() as u8) + (ext_lowlatency.is_some() as u8) + (ext_compression.is_some() as u8); + + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; + } + if n_exts != 0 { header |= flag::Z; } @@ -91,6 +97,7 @@ where n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } + #[cfg(feature = "shared-memory")] if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; @@ -165,6 +172,7 @@ where // Extensions let mut ext_qos = None; + #[cfg(feature = "shared-memory")] let mut ext_shm = None; let mut ext_auth = None; let mut ext_mlink = None; @@ -181,6 +189,7 @@ where ext_qos = Some(q); has_ext = ext; } + #[cfg(feature = "shared-memory")] ext::Shm::ID => { let (s, ext): (ext::Shm, bool) = eodec.read(&mut *reader)?; ext_shm = Some(s); @@ -219,6 +228,7 @@ where resolution, batch_size, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -244,6 +254,7 @@ where batch_size, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -257,11 +268,16 @@ where header |= flag::S; } let mut n_exts = (ext_qos.is_some() as u8) - + (ext_shm.is_some() as u8) + (ext_auth.is_some() as u8) + (ext_mlink.is_some() as u8) + (ext_lowlatency.is_some() as u8) + (ext_compression.is_some() as u8); + + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; + } + if n_exts != 0 { header |= flag::Z; } @@ -294,6 +310,7 @@ where n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } + #[cfg(feature = "shared-memory")] if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; @@ -371,6 +388,7 @@ where // Extensions let mut ext_qos = None; + #[cfg(feature = "shared-memory")] let mut ext_shm = None; let mut ext_auth = None; let mut ext_mlink = None; @@ -387,6 +405,7 @@ where ext_qos = Some(q); has_ext = ext; } + #[cfg(feature = "shared-memory")] ext::Shm::ID => { let (s, ext): (ext::Shm, bool) = eodec.read(&mut *reader)?; ext_shm = Some(s); @@ -426,6 +445,7 @@ where batch_size, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, diff --git a/commons/zenoh-codec/src/transport/open.rs b/commons/zenoh-codec/src/transport/open.rs index f895942ea1..d539526715 100644 --- a/commons/zenoh-codec/src/transport/open.rs +++ b/commons/zenoh-codec/src/transport/open.rs @@ -40,6 +40,7 @@ where initial_sn, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -53,11 +54,16 @@ where header |= flag::T; } let mut n_exts = (ext_qos.is_some() as u8) - + (ext_shm.is_some() as u8) + (ext_auth.is_some() as u8) + (ext_mlink.is_some() as u8) + (ext_lowlatency.is_some() as u8) + (ext_compression.is_some() as u8); + + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; + } + if n_exts != 0 { header |= flag::Z; } @@ -77,6 +83,7 @@ where n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } + #[cfg(feature = "shared-memory")] if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; @@ -138,6 +145,7 @@ where // Extensions let mut ext_qos = None; + #[cfg(feature = "shared-memory")] let mut ext_shm = None; let mut ext_auth = None; let mut ext_mlink = None; @@ -154,6 +162,7 @@ where ext_qos = Some(q); has_ext = ext; } + #[cfg(feature = "shared-memory")] ext::Shm::ID => { let (s, ext): (ext::Shm, bool) = eodec.read(&mut *reader)?; ext_shm = Some(s); @@ -190,6 +199,7 @@ where initial_sn, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -211,6 +221,7 @@ where lease, initial_sn, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -226,11 +237,16 @@ where header |= flag::T; } let mut n_exts = (ext_qos.is_some() as u8) - + (ext_shm.is_some() as u8) + (ext_auth.is_some() as u8) + (ext_mlink.is_some() as u8) + (ext_lowlatency.is_some() as u8) + (ext_compression.is_some() as u8); + + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; + } + if n_exts != 0 { header |= flag::Z; } @@ -249,6 +265,7 @@ where n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } + #[cfg(feature = "shared-memory")] if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; @@ -309,6 +326,7 @@ where // Extensions let mut ext_qos = None; + #[cfg(feature = "shared-memory")] let mut ext_shm = None; let mut ext_auth = None; let mut ext_mlink = None; @@ -325,6 +343,7 @@ where ext_qos = Some(q); has_ext = ext; } + #[cfg(feature = "shared-memory")] ext::Shm::ID => { let (s, ext): (ext::Shm, bool) = eodec.read(&mut *reader)?; ext_shm = Some(s); @@ -360,6 +379,7 @@ where lease, initial_sn, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, diff --git a/commons/zenoh-codec/src/zenoh/err.rs b/commons/zenoh-codec/src/zenoh/err.rs index b459f67b3f..5291645bf0 100644 --- a/commons/zenoh-codec/src/zenoh/err.rs +++ b/commons/zenoh-codec/src/zenoh/err.rs @@ -37,6 +37,8 @@ where let Err { encoding, ext_sinfo, + #[cfg(feature = "shared-memory")] + ext_shm, ext_unknown, payload, } = x; @@ -47,6 +49,10 @@ where header |= flag::E; } let mut n_exts = (ext_sinfo.is_some() as u8) + (ext_unknown.len() as u8); + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; + } if n_exts != 0 { header |= flag::Z; } @@ -62,6 +68,11 @@ where n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } + #[cfg(feature = "shared-memory")] + if let Some(eshm) = ext_shm.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (eshm, n_exts != 0))?; + } for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; @@ -107,6 +118,8 @@ where // Extensions let mut ext_sinfo: Option = None; + #[cfg(feature = "shared-memory")] + let mut ext_shm: Option = None; let mut ext_unknown = Vec::new(); let mut has_ext = imsg::has_flag(self.header, flag::Z); @@ -119,6 +132,12 @@ where ext_sinfo = Some(s); has_ext = ext; } + #[cfg(feature = "shared-memory")] + ext::Shm::ID => { + let (s, ext): (ext::ShmType, bool) = eodec.read(&mut *reader)?; + ext_shm = Some(s); + has_ext = ext; + } _ => { let (u, ext) = extension::read(reader, "Err", ext)?; ext_unknown.push(u); @@ -134,6 +153,8 @@ where Ok(Err { encoding, ext_sinfo, + #[cfg(feature = "shared-memory")] + ext_shm, ext_unknown, payload, }) diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index e6f6500843..e9b8140f21 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -360,15 +360,24 @@ fn codec_encoding() { #[cfg(feature = "shared-memory")] #[test] fn codec_shm_info() { - use zenoh_shm::SharedMemoryBufInfo; + use zenoh_shm::api::provider::chunk::ChunkDescriptor; + use zenoh_shm::header::descriptor::HeaderDescriptor; + use zenoh_shm::{watchdog::descriptor::Descriptor, SharedMemoryBufInfo}; run!(SharedMemoryBufInfo, { let mut rng = rand::thread_rng(); - let len = rng.gen_range(0..16); SharedMemoryBufInfo::new( + ChunkDescriptor::new(rng.gen(), rng.gen(), rng.gen()), rng.gen(), rng.gen(), - Alphanumeric.sample_string(&mut rng, len), + Descriptor { + id: rng.gen(), + index_and_bitpos: rng.gen(), + }, + HeaderDescriptor { + id: rng.gen(), + index: rng.gen(), + }, rng.gen(), ) }); diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index b77dffeba0..71184d4245 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -60,10 +60,9 @@ pub fn rustc_version_release(_tokens: TokenStream) -> TokenStream { } #[proc_macro_attribute] -pub fn unstable(_attr: TokenStream, item: TokenStream) -> TokenStream { +pub fn unstable_doc(_attr: TokenStream, item: TokenStream) -> TokenStream { let item = proc_macro2::TokenStream::from(item); TokenStream::from(quote! { - #[cfg(feature = "unstable")] ///

/// 🔬 /// This API has been marked as unstable: it works as advertised, but we may change it in a future release. @@ -74,6 +73,16 @@ pub fn unstable(_attr: TokenStream, item: TokenStream) -> TokenStream { }) } +#[proc_macro_attribute] +pub fn unstable(_attr: TokenStream, item: TokenStream) -> TokenStream { + let item = proc_macro2::TokenStream::from(item); + TokenStream::from(quote! { + #[cfg(feature = "unstable")] + #[zenoh_macros::unstable_doc] + #item + }) +} + fn keformat_support(source: &str) -> proc_macro2::TokenStream { let format = match KeFormat::new(&source) { Ok(format) => format, diff --git a/commons/zenoh-protocol/src/transport/init.rs b/commons/zenoh-protocol/src/transport/init.rs index de517a353c..7e86d17af2 100644 --- a/commons/zenoh-protocol/src/transport/init.rs +++ b/commons/zenoh-protocol/src/transport/init.rs @@ -114,6 +114,7 @@ pub struct InitSyn { pub resolution: Resolution, pub batch_size: BatchSize, pub ext_qos: Option, + #[cfg(feature = "shared-memory")] pub ext_shm: Option, pub ext_auth: Option, pub ext_mlink: Option, @@ -134,6 +135,7 @@ pub mod ext { /// # Shm extension /// Used as challenge for probing shared memory capabilities + #[cfg(feature = "shared-memory")] pub type Shm = zextzbuf!(0x2, false); /// # Auth extension @@ -167,6 +169,7 @@ impl InitSyn { let resolution = Resolution::rand(); let batch_size: BatchSize = rng.gen(); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + #[cfg(feature = "shared-memory")] let ext_shm = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); @@ -180,6 +183,7 @@ impl InitSyn { resolution, batch_size, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -198,6 +202,7 @@ pub struct InitAck { pub batch_size: BatchSize, pub cookie: ZSlice, pub ext_qos: Option, + #[cfg(feature = "shared-memory")] pub ext_shm: Option, pub ext_auth: Option, pub ext_mlink: Option, @@ -224,6 +229,7 @@ impl InitAck { let batch_size: BatchSize = rng.gen(); let cookie = ZSlice::rand(64); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + #[cfg(feature = "shared-memory")] let ext_shm = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); @@ -238,6 +244,7 @@ impl InitAck { batch_size, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, diff --git a/commons/zenoh-protocol/src/transport/open.rs b/commons/zenoh-protocol/src/transport/open.rs index d793671b06..c643286193 100644 --- a/commons/zenoh-protocol/src/transport/open.rs +++ b/commons/zenoh-protocol/src/transport/open.rs @@ -78,6 +78,7 @@ pub struct OpenSyn { pub initial_sn: TransportSn, pub cookie: ZSlice, pub ext_qos: Option, + #[cfg(feature = "shared-memory")] pub ext_shm: Option, pub ext_auth: Option, pub ext_mlink: Option, @@ -88,16 +89,22 @@ pub struct OpenSyn { // Extensions pub mod ext { use crate::{ - common::{ZExtUnit, ZExtZ64, ZExtZBuf}, - zextunit, zextz64, zextzbuf, + common::{ZExtUnit, ZExtZBuf}, + zextunit, zextzbuf, }; + #[cfg(feature = "shared-memory")] + use crate::common::ZExtZ64; + #[cfg(feature = "shared-memory")] + use crate::zextz64; + /// # QoS extension /// Used to negotiate the use of QoS pub type QoS = zextunit!(0x1, false); /// # Shm extension /// Used as challenge for probing shared memory capabilities + #[cfg(feature = "shared-memory")] pub type Shm = zextz64!(0x2, false); /// # Auth extension @@ -121,9 +128,12 @@ pub mod ext { impl OpenSyn { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZ64, ZExtZBuf}; + use crate::common::{ZExtUnit, ZExtZBuf}; use rand::Rng; + #[cfg(feature = "shared-memory")] + use crate::common::ZExtZ64; + const MIN: usize = 32; const MAX: usize = 1_024; @@ -138,6 +148,7 @@ impl OpenSyn { let initial_sn: TransportSn = rng.gen(); let cookie = ZSlice::rand(rng.gen_range(MIN..=MAX)); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + #[cfg(feature = "shared-memory")] let ext_shm = rng.gen_bool(0.5).then_some(ZExtZ64::rand()); let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); @@ -149,6 +160,7 @@ impl OpenSyn { initial_sn, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -163,6 +175,7 @@ pub struct OpenAck { pub lease: Duration, pub initial_sn: TransportSn, pub ext_qos: Option, + #[cfg(feature = "shared-memory")] pub ext_shm: Option, pub ext_auth: Option, pub ext_mlink: Option, @@ -173,9 +186,12 @@ pub struct OpenAck { impl OpenAck { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZ64, ZExtZBuf}; + use crate::common::{ZExtUnit, ZExtZBuf}; use rand::Rng; + #[cfg(feature = "shared-memory")] + use crate::common::ZExtZ64; + let mut rng = rand::thread_rng(); let lease = if rng.gen_bool(0.5) { @@ -186,6 +202,7 @@ impl OpenAck { let initial_sn: TransportSn = rng.gen(); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + #[cfg(feature = "shared-memory")] let ext_shm = rng.gen_bool(0.5).then_some(ZExtZ64::rand()); let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); @@ -196,6 +213,7 @@ impl OpenAck { lease, initial_sn, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, diff --git a/commons/zenoh-protocol/src/zenoh/err.rs b/commons/zenoh-protocol/src/zenoh/err.rs index eacbb26596..b6aa5f4954 100644 --- a/commons/zenoh-protocol/src/zenoh/err.rs +++ b/commons/zenoh-protocol/src/zenoh/err.rs @@ -44,17 +44,28 @@ pub mod flag { pub struct Err { pub encoding: Encoding, pub ext_sinfo: Option, + #[cfg(feature = "shared-memory")] + pub ext_shm: Option, pub ext_unknown: Vec, pub payload: ZBuf, } pub mod ext { + #[cfg(feature = "shared-memory")] + use crate::{common::ZExtUnit, zextunit}; use crate::{common::ZExtZBuf, zextzbuf}; /// # SourceInfo extension /// Used to carry additional information about the source of data pub type SourceInfo = zextzbuf!(0x1, false); pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; + + /// # Shared Memory extension + /// Used to carry additional information about the shared-memory layour of data + #[cfg(feature = "shared-memory")] + pub type Shm = zextunit!(0x2, true); + #[cfg(feature = "shared-memory")] + pub type ShmType = crate::zenoh::ext::ShmType<{ Shm::ID }>; } impl Err { @@ -66,6 +77,8 @@ impl Err { let encoding = Encoding::rand(); let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); + #[cfg(feature = "shared-memory")] + let ext_shm = rng.gen_bool(0.5).then_some(ext::ShmType::rand()); let mut ext_unknown = Vec::new(); for _ in 0..rng.gen_range(0..4) { ext_unknown.push(ZExtUnknown::rand2( @@ -78,6 +91,8 @@ impl Err { Self { encoding, ext_sinfo, + #[cfg(feature = "shared-memory")] + ext_shm, ext_unknown, payload, } diff --git a/commons/zenoh-shm/Cargo.toml b/commons/zenoh-shm/Cargo.toml index e6107b9a13..60b9acde1d 100644 --- a/commons/zenoh-shm/Cargo.toml +++ b/commons/zenoh-shm/Cargo.toml @@ -28,9 +28,29 @@ categories = { workspace = true } description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[features] +test = ["num_cpus"] + [dependencies] +async-trait = { workspace = true } +bincode = { workspace = true } +crc = { workspace = true } tracing = {workspace = true} serde = { workspace = true, features = ["default"] } shared_memory = { workspace = true } -zenoh-buffers = { workspace = true } +tokio = { workspace = true } zenoh-result = { workspace = true } +zenoh-core = { workspace = true } +zenoh-macros = { workspace = true } +zenoh-buffers = { workspace = true } +rand = { workspace = true } +lazy_static = { workspace = true } +num-traits = { workspace = true } +num_cpus = { workspace = true, optional = true } +thread-priority = { workspace = true } +lockfree = { workspace = true } +stabby = { workspace = true } + +[dev-dependencies] +zenoh-shm = { workspace = true, features = ["test"] } +libc = { workspace = true } diff --git a/commons/zenoh-shm/src/api/client/mod.rs b/commons/zenoh-shm/src/api/client/mod.rs new file mode 100644 index 0000000000..eab20733e7 --- /dev/null +++ b/commons/zenoh-shm/src/api/client/mod.rs @@ -0,0 +1,16 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod shared_memory_client; +pub mod shared_memory_segment; diff --git a/commons/zenoh-shm/src/api/client/shared_memory_client.rs b/commons/zenoh-shm/src/api/client/shared_memory_client.rs new file mode 100644 index 0000000000..abc7221300 --- /dev/null +++ b/commons/zenoh-shm/src/api/client/shared_memory_client.rs @@ -0,0 +1,31 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::fmt::Debug; + +use std::sync::Arc; + +use zenoh_result::ZResult; + +use crate::api::common::types::SegmentID; + +use super::shared_memory_segment::SharedMemorySegment; + +/// SharedMemoryClient - client factory implementation for particular shared memory protocol +#[zenoh_macros::unstable_doc] +pub trait SharedMemoryClient: Debug + Send + Sync { + /// Attach to particular shared memory segment + #[zenoh_macros::unstable_doc] + fn attach(&self, segment: SegmentID) -> ZResult>; +} diff --git a/commons/zenoh-shm/src/api/client/shared_memory_segment.rs b/commons/zenoh-shm/src/api/client/shared_memory_segment.rs new file mode 100644 index 0000000000..88eaf8761f --- /dev/null +++ b/commons/zenoh-shm/src/api/client/shared_memory_segment.rs @@ -0,0 +1,29 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::fmt::Debug; + +use std::sync::atomic::AtomicPtr; + +use zenoh_result::ZResult; + +use crate::api::common::types::ChunkID; + +/// SharedMemorySegment - RAII interface to interact with particular shared memory segment +#[zenoh_macros::unstable_doc] +pub trait SharedMemorySegment: Debug + Send + Sync { + /// Obtain the actual region of memory identified by it's id + #[zenoh_macros::unstable_doc] + fn map(&self, chunk: ChunkID) -> ZResult>; +} diff --git a/commons/zenoh-shm/src/api/client_storage/mod.rs b/commons/zenoh-shm/src/api/client_storage/mod.rs new file mode 100644 index 0000000000..0ce1a8af11 --- /dev/null +++ b/commons/zenoh-shm/src/api/client_storage/mod.rs @@ -0,0 +1,163 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use lazy_static::lazy_static; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +use zenoh_result::{bail, ZResult}; + +use crate::api::{ + client::{ + shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, + }, + common::types::ProtocolID, + protocol_implementations::posix::{ + posix_shared_memory_client::PosixSharedMemoryClient, protocol_id::POSIX_PROTOCOL_ID, + }, +}; + +use crate::reader::{ClientStorage, GlobalDataSegmentID}; + +lazy_static! { + /// A global lazily-initialized SHM client storage. + /// When initialized, contains default client set, + /// see SharedMemoryClientStorage::with_default_client_set + #[zenoh_macros::unstable_doc] + pub static ref GLOBAL_CLIENT_STORAGE: Arc = Arc::new( + SharedMemoryClientStorage::builder() + .with_default_client_set() + .build() + ); +} + +/// Builder to create new client storages +#[zenoh_macros::unstable_doc] +pub struct SharedMemoryClientSetBuilder; + +impl SharedMemoryClientSetBuilder { + /// Add client to the storage (without including the default client set) + #[zenoh_macros::unstable_doc] + pub fn with_client( + self, + id: ProtocolID, + client: Arc, + ) -> SharedMemoryClientStorageBuilder { + let clients = HashMap::from([(id, client)]); + SharedMemoryClientStorageBuilder::new(clients) + } + + /// Add list of clients to the storage (without including the default client set) + #[zenoh_macros::unstable_doc] + pub fn with_clients( + self, + clients: &[(ProtocolID, Arc)], + ) -> SharedMemoryClientStorageBuilder { + let clients = clients.iter().cloned().collect(); + SharedMemoryClientStorageBuilder::new(clients) + } + + /// Include default clients + #[zenoh_macros::unstable_doc] + pub fn with_default_client_set(self) -> SharedMemoryClientStorageBuilder { + let clients = HashMap::from([( + POSIX_PROTOCOL_ID, + Arc::new(PosixSharedMemoryClient {}) as Arc, + )]); + SharedMemoryClientStorageBuilder::new(clients) + } +} + +#[zenoh_macros::unstable_doc] +pub struct SharedMemoryClientStorageBuilder { + clients: HashMap>, +} + +impl SharedMemoryClientStorageBuilder { + fn new(clients: HashMap>) -> Self { + Self { clients } + } + + /// Add client to the storage + #[zenoh_macros::unstable_doc] + pub fn with_client( + mut self, + id: ProtocolID, + client: Arc, + ) -> ZResult { + match self.clients.entry(id) { + std::collections::hash_map::Entry::Occupied(occupied) => { + bail!("Client already exists for id {id}: {:?}!", occupied) + } + std::collections::hash_map::Entry::Vacant(vacant) => { + vacant.insert(client as Arc); + Ok(self) + } + } + } + + /// Add list of clients to the storage + #[zenoh_macros::unstable_doc] + pub fn with_clients(mut self, clients: &[(ProtocolID, Arc)]) -> Self { + self.clients.extend(clients.iter().cloned()); + self + } + + /// Build the storage with parameters specified on previous step + #[zenoh_macros::unstable_doc] + pub fn build(self) -> SharedMemoryClientStorage { + SharedMemoryClientStorage::new(self.clients) + } +} + +/// A storage for SHM clients. +/// Runtime or Session constructed with instance of this type gets capabilities to read +/// SHM buffers for Protocols added to this instance. +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub struct SharedMemoryClientStorage { + pub(crate) clients: ClientStorage>, + pub(crate) segments: RwLock>>, +} + +impl Eq for SharedMemoryClientStorage {} + +impl PartialEq for SharedMemoryClientStorage { + fn eq(&self, other: &Self) -> bool { + std::ptr::eq(self, other) + } +} + +impl SharedMemoryClientStorage { + /// Get the builder to construct a new storage + #[zenoh_macros::unstable_doc] + pub fn builder() -> SharedMemoryClientSetBuilder { + SharedMemoryClientSetBuilder + } + + /// Get the list of supported SHM protocols. + #[zenoh_macros::unstable_doc] + pub fn supported_protocols(&self) -> Vec { + self.clients.get_clients().keys().copied().collect() + } + + fn new(clients: HashMap>) -> Self { + Self { + clients: ClientStorage::new(clients), + segments: RwLock::default(), + } + } +} diff --git a/commons/zenoh-shm/src/api/common/mod.rs b/commons/zenoh-shm/src/api/common/mod.rs new file mode 100644 index 0000000000..222c7286bf --- /dev/null +++ b/commons/zenoh-shm/src/api/common/mod.rs @@ -0,0 +1,15 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod types; diff --git a/commons/zenoh-shm/src/api/common/types.rs b/commons/zenoh-shm/src/api/common/types.rs new file mode 100644 index 0000000000..02e009aff3 --- /dev/null +++ b/commons/zenoh-shm/src/api/common/types.rs @@ -0,0 +1,27 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +/// Unique protocol identifier. +/// Here is a contract: it is up to user to make sure that incompatible SharedMemoryClient +/// and SharedMemoryProviderBackend implementations will never use the same ProtocolID +#[zenoh_macros::unstable_doc] +pub type ProtocolID = u32; + +/// Unique segment identifier +#[zenoh_macros::unstable_doc] +pub type SegmentID = u32; + +/// Chunk id within it's segment +#[zenoh_macros::unstable_doc] +pub type ChunkID = u32; diff --git a/commons/zenoh-shm/src/api/mod.rs b/commons/zenoh-shm/src/api/mod.rs new file mode 100644 index 0000000000..08a5678fa8 --- /dev/null +++ b/commons/zenoh-shm/src/api/mod.rs @@ -0,0 +1,20 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod client; +pub mod client_storage; +pub mod common; +pub mod protocol_implementations; +pub mod provider; +pub mod slice; diff --git a/commons/zenoh-shm/src/api/protocol_implementations/mod.rs b/commons/zenoh-shm/src/api/protocol_implementations/mod.rs new file mode 100644 index 0000000000..df92f63536 --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/mod.rs @@ -0,0 +1,15 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod posix; diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/mod.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/mod.rs new file mode 100644 index 0000000000..12c8aba0b6 --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/mod.rs @@ -0,0 +1,19 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod posix_shared_memory_client; +pub mod posix_shared_memory_provider_backend; +pub mod protocol_id; + +pub(crate) mod posix_shared_memory_segment; diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs new file mode 100644 index 0000000000..0184f50036 --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs @@ -0,0 +1,39 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::Arc; + +use zenoh_result::ZResult; + +use crate::api::{ + client::{ + shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, + }, + common::types::SegmentID, +}; + +use super::posix_shared_memory_segment::PosixSharedMemorySegment; + +/// Client factory implementation for particular shared memory protocol +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub struct PosixSharedMemoryClient; + +impl SharedMemoryClient for PosixSharedMemoryClient { + /// Attach to particular shared memory segment + #[zenoh_macros::unstable_doc] + fn attach(&self, segment: SegmentID) -> ZResult> { + Ok(Arc::new(PosixSharedMemorySegment::open(segment)?)) + } +} diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs new file mode 100644 index 0000000000..89c1b91387 --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs @@ -0,0 +1,286 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + borrow::Borrow, + cmp, + collections::BinaryHeap, + sync::{ + atomic::{AtomicPtr, AtomicUsize, Ordering}, + Mutex, + }, +}; + +use zenoh_core::zlock; +use zenoh_result::ZResult; + +use crate::api::{ + common::types::ChunkID, + provider::{ + chunk::{AllocatedChunk, ChunkDescriptor}, + shared_memory_provider_backend::SharedMemoryProviderBackend, + types::{AllocAlignment, ChunkAllocResult, MemoryLayout, ZAllocError}, + }, +}; + +use super::posix_shared_memory_segment::PosixSharedMemorySegment; + +// TODO: MIN_FREE_CHUNK_SIZE limitation is made to reduce memory fragmentation and lower +// the CPU time needed to defragment() - that's reasonable, and there is additional thing here: +// our SHM\zerocopy functionality outperforms common buffer transmission only starting from 1K +// buffer size. In other words, there should be some minimal size threshold reasonable to use with +// SHM - and it would be good to synchronize this threshold with MIN_FREE_CHUNK_SIZE limitation! +const MIN_FREE_CHUNK_SIZE: usize = 1_024; + +#[derive(Eq, Copy, Clone, Debug)] +struct Chunk { + offset: ChunkID, + size: usize, +} + +impl Ord for Chunk { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.size.cmp(&other.size) + } +} + +impl PartialOrd for Chunk { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for Chunk { + fn eq(&self, other: &Self) -> bool { + self.size == other.size + } +} + +/// Builder to create posix SHM provider +#[zenoh_macros::unstable_doc] +pub struct PosixSharedMemoryProviderBackendBuilder; + +impl PosixSharedMemoryProviderBackendBuilder { + /// Use existing layout + #[zenoh_macros::unstable_doc] + pub fn with_layout>( + self, + layout: Layout, + ) -> LayoutedPosixSharedMemoryProviderBackendBuilder { + LayoutedPosixSharedMemoryProviderBackendBuilder { layout } + } + + /// Construct layout in-place using arguments + #[zenoh_macros::unstable_doc] + pub fn with_layout_args( + self, + size: usize, + alignment: AllocAlignment, + ) -> ZResult> { + let layout = MemoryLayout::new(size, alignment)?; + Ok(LayoutedPosixSharedMemoryProviderBackendBuilder { layout }) + } + + /// Construct layout in-place from size (default alignment will be used) + #[zenoh_macros::unstable_doc] + pub fn with_size( + self, + size: usize, + ) -> ZResult> { + let layout = MemoryLayout::new(size, AllocAlignment::default())?; + Ok(LayoutedPosixSharedMemoryProviderBackendBuilder { layout }) + } +} + +#[zenoh_macros::unstable_doc] +pub struct LayoutedPosixSharedMemoryProviderBackendBuilder> { + layout: Layout, +} + +impl> LayoutedPosixSharedMemoryProviderBackendBuilder { + /// try to create PosixSharedMemoryProviderBackend + #[zenoh_macros::unstable_doc] + pub fn res(self) -> ZResult { + PosixSharedMemoryProviderBackend::new(self.layout.borrow()) + } +} + +/// A backend for SharedMemoryProvider based on POSIX shared memory. +/// This is the default general-purpose backed shipped with Zenoh. +#[zenoh_macros::unstable_doc] +pub struct PosixSharedMemoryProviderBackend { + available: AtomicUsize, + segment: PosixSharedMemorySegment, + free_list: Mutex>, + alignment: AllocAlignment, +} + +impl PosixSharedMemoryProviderBackend { + /// Get the builder to construct a new instance + #[zenoh_macros::unstable_doc] + pub fn builder() -> PosixSharedMemoryProviderBackendBuilder { + PosixSharedMemoryProviderBackendBuilder + } + + fn new(layout: &MemoryLayout) -> ZResult { + let segment = PosixSharedMemorySegment::create(layout.size())?; + + let mut free_list = BinaryHeap::new(); + let root_chunk = Chunk { + offset: 0, + size: layout.size(), + }; + free_list.push(root_chunk); + + tracing::trace!( + "Created PosixSharedMemoryProviderBackend id {}, layout {:?}", + segment.segment.id(), + layout + ); + + Ok(Self { + available: AtomicUsize::new(layout.size()), + segment, + free_list: Mutex::new(free_list), + alignment: layout.alignment(), + }) + } +} + +impl SharedMemoryProviderBackend for PosixSharedMemoryProviderBackend { + fn alloc(&self, layout: &MemoryLayout) -> ChunkAllocResult { + tracing::trace!("PosixSharedMemoryProviderBackend::alloc({:?})", layout); + + let required_len = layout.size(); + + if self.available.load(Ordering::Relaxed) < required_len { + tracing::trace!( "PosixSharedMemoryProviderBackend does not have sufficient free memory to allocate {:?}, try de-fragmenting!", layout); + return Err(ZAllocError::OutOfMemory); + } + + let mut guard = zlock!(self.free_list); + // The strategy taken is the same for some Unix System V implementations -- as described in the + // famous Bach's book -- in essence keep an ordered list of free slot and always look for the + // biggest as that will give the biggest left-over. + match guard.pop() { + Some(mut chunk) if chunk.size >= required_len => { + // NOTE: don't loose any chunks here, as it will lead to memory leak + tracing::trace!("Allocator selected Chunk ({:?})", &chunk); + if chunk.size - required_len >= MIN_FREE_CHUNK_SIZE { + let free_chunk = Chunk { + offset: chunk.offset + required_len as ChunkID, + size: chunk.size - required_len, + }; + tracing::trace!("The allocation will leave a Free Chunk: {:?}", &free_chunk); + guard.push(free_chunk); + chunk.size = required_len; + } + self.available.fetch_sub(chunk.size, Ordering::Relaxed); + + let descriptor = + ChunkDescriptor::new(self.segment.segment.id(), chunk.offset, chunk.size); + + Ok(AllocatedChunk { + descriptor, + data: unsafe { AtomicPtr::new(self.segment.segment.elem_mut(chunk.offset)) }, + }) + } + Some(c) => { + tracing::trace!("PosixSharedMemoryProviderBackend::alloc({:?}) cannot find any big enough chunk\nSharedMemoryManager::free_list = {:?}", layout, self.free_list); + guard.push(c); + Err(ZAllocError::NeedDefragment) + } + None => { + // NOTE: that should never happen! If this happens - there is a critical bug somewhere around! + let err = format!("PosixSharedMemoryProviderBackend::alloc({:?}) cannot find any available chunk\nSharedMemoryManager::free_list = {:?}", layout, self.free_list); + #[cfg(feature = "test")] + panic!("{err}"); + #[cfg(not(feature = "test"))] + { + tracing::error!("{err}"); + Err(ZAllocError::OutOfMemory) + } + } + } + } + + fn free(&self, chunk: &ChunkDescriptor) { + let free_chunk = Chunk { + offset: chunk.chunk, + size: chunk.len, + }; + self.available.fetch_add(free_chunk.size, Ordering::Relaxed); + zlock!(self.free_list).push(free_chunk); + } + + fn defragment(&self) -> usize { + fn try_merge_adjacent_chunks(a: &Chunk, b: &Chunk) -> Option { + let end_offset = a.offset as usize + a.size; + if end_offset == b.offset as usize { + Some(Chunk { + size: a.size + b.size, + offset: a.offset, + }) + } else { + None + } + } + + let mut largest = 0usize; + + // TODO: optimize this! + // this is an old legacy algo for merging adjacent chunks + // we extract chunks to separate container, sort them by offset and then check each chunk for + // adjacence with neighbour. Adjacent chunks are joined and returned back to temporary container. + // If chunk is not adjacent with it's neighbour, it is placed back to self.free_list + let mut guard = zlock!(self.free_list); + if guard.len() > 1 { + let mut fbs: Vec = guard.drain().collect(); + fbs.sort_by(|x, y| x.offset.cmp(&y.offset)); + let mut current = fbs.remove(0); + let mut i = 0; + let n = fbs.len(); + for chunk in fbs.iter() { + i += 1; + let next = *chunk; + match try_merge_adjacent_chunks(¤t, &next) { + Some(c) => { + current = c; + largest = largest.max(current.size); + if i == n { + guard.push(current) + } + } + None => { + guard.push(current); + if i == n { + guard.push(next); + } else { + current = next; + } + } + } + } + } + largest + } + + fn available(&self) -> usize { + self.available.load(Ordering::Relaxed) + } + + fn layout_for(&self, layout: MemoryLayout) -> ZResult { + layout.extend(self.alignment) + } +} diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs new file mode 100644 index 0000000000..eb49d141ca --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs @@ -0,0 +1,47 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::atomic::AtomicPtr; + +use zenoh_result::ZResult; + +use crate::api::common::types::SegmentID; +use crate::api::{client::shared_memory_segment::SharedMemorySegment, common::types::ChunkID}; + +use crate::posix_shm::array::ArrayInSHM; + +const POSIX_SHM_SEGMENT_PREFIX: &str = "posix_shm_provider_segment"; + +#[derive(Debug)] +pub(crate) struct PosixSharedMemorySegment { + pub(crate) segment: ArrayInSHM, +} + +impl PosixSharedMemorySegment { + pub(crate) fn create(alloc_size: usize) -> ZResult { + let segment = ArrayInSHM::create(alloc_size, POSIX_SHM_SEGMENT_PREFIX)?; + Ok(Self { segment }) + } + + pub(crate) fn open(id: SegmentID) -> ZResult { + let segment = ArrayInSHM::open(id, POSIX_SHM_SEGMENT_PREFIX)?; + Ok(Self { segment }) + } +} + +impl SharedMemorySegment for PosixSharedMemorySegment { + fn map(&self, chunk: ChunkID) -> ZResult> { + unsafe { Ok(AtomicPtr::new(self.segment.elem_mut(chunk))) } + } +} diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/protocol_id.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/protocol_id.rs new file mode 100644 index 0000000000..b2eec8d7a5 --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/protocol_id.rs @@ -0,0 +1,19 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use crate::api::common::types::ProtocolID; + +/// Protocol identifier to use when creating SharedMemoryProvider +#[zenoh_macros::unstable_doc] +pub const POSIX_PROTOCOL_ID: ProtocolID = 0; diff --git a/commons/zenoh-shm/src/api/provider/chunk.rs b/commons/zenoh-shm/src/api/provider/chunk.rs new file mode 100644 index 0000000000..939758a345 --- /dev/null +++ b/commons/zenoh-shm/src/api/provider/chunk.rs @@ -0,0 +1,53 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::atomic::AtomicPtr; + +use crate::api::common::types::{ChunkID, SegmentID}; + +/// Uniquely identifies the particular chunk within particular segment +#[zenoh_macros::unstable_doc] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ChunkDescriptor { + pub segment: SegmentID, + pub chunk: ChunkID, + pub len: usize, +} + +impl ChunkDescriptor { + /// Create a new Chunk Descriptor + #[zenoh_macros::unstable_doc] + pub fn new(segment: SegmentID, chunk: ChunkID, len: usize) -> Self { + Self { + segment, + chunk, + len, + } + } +} + +/// A recently-allocated chunk. +#[zenoh_macros::unstable_doc] +pub struct AllocatedChunk { + pub descriptor: ChunkDescriptor, + pub data: AtomicPtr, +} + +impl AllocatedChunk { + /// Create a new Allocated Chunk + #[zenoh_macros::unstable_doc] + pub fn new(descriptor: ChunkDescriptor, data: AtomicPtr) -> Self { + Self { descriptor, data } + } +} diff --git a/commons/zenoh-shm/src/api/provider/mod.rs b/commons/zenoh-shm/src/api/provider/mod.rs new file mode 100644 index 0000000000..a769baacb3 --- /dev/null +++ b/commons/zenoh-shm/src/api/provider/mod.rs @@ -0,0 +1,18 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod chunk; +pub mod shared_memory_provider; +pub mod shared_memory_provider_backend; +pub mod types; diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs new file mode 100644 index 0000000000..c3b8128300 --- /dev/null +++ b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs @@ -0,0 +1,916 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + collections::VecDeque, + marker::PhantomData, + sync::{atomic::Ordering, Arc, Mutex}, + time::Duration, +}; + +use async_trait::async_trait; +use zenoh_result::ZResult; + +use crate::{ + api::{common::types::ProtocolID, slice::zsliceshmmut::ZSliceShmMut}, + header::{ + allocated_descriptor::AllocatedHeaderDescriptor, descriptor::HeaderDescriptor, + storage::GLOBAL_HEADER_STORAGE, + }, + watchdog::{ + allocated_watchdog::AllocatedWatchdog, + confirmator::{ConfirmedDescriptor, GLOBAL_CONFIRMATOR}, + descriptor::Descriptor, + storage::GLOBAL_STORAGE, + validator::GLOBAL_VALIDATOR, + }, + SharedMemoryBuf, SharedMemoryBufInfo, +}; + +use super::{ + chunk::{AllocatedChunk, ChunkDescriptor}, + shared_memory_provider_backend::SharedMemoryProviderBackend, + types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, +}; + +#[derive(Debug)] +struct BusyChunk { + descriptor: ChunkDescriptor, + header: AllocatedHeaderDescriptor, + _watchdog: AllocatedWatchdog, +} + +impl BusyChunk { + fn new( + descriptor: ChunkDescriptor, + header: AllocatedHeaderDescriptor, + watchdog: AllocatedWatchdog, + ) -> Self { + Self { + descriptor, + header, + _watchdog: watchdog, + } + } +} + +/// Builder to create AllocLayout +#[zenoh_macros::unstable_doc] +pub struct AllocLayoutBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + provider: &'a SharedMemoryProvider, +} +impl<'a, IDSource, Backend> AllocLayoutBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// Set size for layout + #[zenoh_macros::unstable_doc] + pub fn size(self, size: usize) -> AllocLayoutSizedBuilder<'a, IDSource, Backend> { + AllocLayoutSizedBuilder { + provider: self.provider, + size, + } + } +} + +#[zenoh_macros::unstable_doc] +pub struct AllocLayoutSizedBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + provider: &'a SharedMemoryProvider, + size: usize, +} +impl<'a, IDSource, Backend> AllocLayoutSizedBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// Set alignment for layout + #[zenoh_macros::unstable_doc] + pub fn alignment( + self, + alignment: AllocAlignment, + ) -> AllocLayoutAlignedBuilder<'a, IDSource, Backend> { + AllocLayoutAlignedBuilder { + provider: self.provider, + size: self.size, + alignment, + } + } + + /// try to build an allocation layout + #[zenoh_macros::unstable_doc] + pub fn res(self) -> ZResult> { + AllocLayout::new(self.size, AllocAlignment::default(), self.provider) + } +} + +#[zenoh_macros::unstable_doc] +pub struct AllocLayoutAlignedBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + provider: &'a SharedMemoryProvider, + size: usize, + alignment: AllocAlignment, +} +impl<'a, IDSource, Backend> AllocLayoutAlignedBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// Try to build layout with specified args + #[zenoh_macros::unstable_doc] + pub fn res(self) -> ZResult> { + AllocLayout::new(self.size, self.alignment, self.provider) + } +} + +/// A layout for allocations. +/// This is a pre-calculated layout suitable for making series of similar allocations +/// adopted for particular SharedMemoryProvider +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub struct AllocLayout<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + size: usize, + provider_layout: MemoryLayout, + provider: &'a SharedMemoryProvider, +} + +impl<'a, IDSource, Backend> AllocLayout<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// Allocate the new buffer with this layout + #[zenoh_macros::unstable_doc] + pub fn alloc(&'a self) -> AllocBuilder<'a, IDSource, Backend> { + AllocBuilder { + layout: self, + _phantom: PhantomData, + } + } + + fn new( + size: usize, + alignment: AllocAlignment, + provider: &'a SharedMemoryProvider, + ) -> ZResult { + // NOTE: Depending on internal implementation, provider's backend might relayout + // the allocations for bigger alignment (ex. 4-byte aligned allocation to 8-bytes aligned) + + // Create layout for specified arguments + let layout = MemoryLayout::new(size, alignment)?; + + // Obtain provider's layout for our layout + let provider_layout = provider.backend.layout_for(layout)?; + + Ok(Self { + size, + provider_layout, + provider, + }) + } +} + +/// Trait for deallocation policies. +#[zenoh_macros::unstable_doc] +pub trait ForceDeallocPolicy { + fn dealloc( + provider: &SharedMemoryProvider, + ) -> bool; +} + +/// Try to dealloc optimal (currently eldest+1) chunk +#[zenoh_macros::unstable_doc] +pub struct DeallocOptimal; +impl ForceDeallocPolicy for DeallocOptimal { + fn dealloc( + provider: &SharedMemoryProvider, + ) -> bool { + let mut guard = provider.busy_list.lock().unwrap(); + let chunk_to_dealloc = match guard.remove(1) { + Some(val) => val, + None => match guard.pop_front() { + Some(val) => val, + None => return false, + }, + }; + drop(guard); + + provider.backend.free(&chunk_to_dealloc.descriptor); + true + } +} + +/// Try to dealloc youngest chunk +#[zenoh_macros::unstable_doc] +pub struct DeallocYoungest; +impl ForceDeallocPolicy for DeallocYoungest { + fn dealloc( + provider: &SharedMemoryProvider, + ) -> bool { + match provider.busy_list.lock().unwrap().pop_back() { + Some(val) => { + provider.backend.free(&val.descriptor); + true + } + None => false, + } + } +} + +/// Try to dealloc eldest chunk +#[zenoh_macros::unstable_doc] +pub struct DeallocEldest; +impl ForceDeallocPolicy for DeallocEldest { + fn dealloc( + provider: &SharedMemoryProvider, + ) -> bool { + match provider.busy_list.lock().unwrap().pop_front() { + Some(val) => { + provider.backend.free(&val.descriptor); + true + } + None => false, + } + } +} + +/// Trait for allocation policies +#[zenoh_macros::unstable_doc] +pub trait AllocPolicy { + fn alloc( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult; +} + +/// Trait for async allocation policies +#[zenoh_macros::unstable_doc] +#[async_trait] +pub trait AsyncAllocPolicy { + async fn alloc_async< + IDSource: ProtocolIDSource + Send + Sync, + Backend: SharedMemoryProviderBackend + Sync, + >( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult; +} + +/// Just try to allocate +#[zenoh_macros::unstable_doc] +pub struct JustAlloc; +impl AllocPolicy for JustAlloc { + fn alloc( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult { + provider.backend.alloc(layout) + } +} + +/// Garbage collection policy. +/// Try to reclaim old buffers if allocation failed and allocate again +/// if the largest reclaimed chuk is not smaller than the one required +#[zenoh_macros::unstable_doc] +pub struct GarbageCollect +where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, +{ + _phantom: PhantomData, + _phantom2: PhantomData, +} +impl AllocPolicy for GarbageCollect +where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, +{ + fn alloc( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult { + let result = InnerPolicy::alloc(layout, provider); + if let Err(ZAllocError::OutOfMemory) = result { + // try to alloc again only if GC managed to reclaim big enough chunk + if provider.garbage_collect() >= layout.size() { + return AltPolicy::alloc(layout, provider); + } + } + result + } +} + +/// Defragmenting policy. +/// Try to defragment if allocation failed and allocate again +/// if the largest defragmented chuk is not smaller than the one required +#[zenoh_macros::unstable_doc] +pub struct Defragment +where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, +{ + _phantom: PhantomData, + _phantom2: PhantomData, +} +impl AllocPolicy for Defragment +where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, +{ + fn alloc( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult { + let result = InnerPolicy::alloc(layout, provider); + if let Err(ZAllocError::NeedDefragment) = result { + // try to alloc again only if big enough chunk was defragmented + if provider.defragment() >= layout.size() { + return AltPolicy::alloc(layout, provider); + } + } + result + } +} + +/// Deallocating policy. +/// Forcely deallocate up to N buffers until allocation succeeds. +#[zenoh_macros::unstable_doc] +pub struct Deallocate< + const N: usize, + InnerPolicy = JustAlloc, + AltPolicy = InnerPolicy, + DeallocatePolicy = DeallocOptimal, +> where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, + DeallocatePolicy: ForceDeallocPolicy, +{ + _phantom: PhantomData, + _phantom2: PhantomData, + _phantom3: PhantomData, +} +impl AllocPolicy + for Deallocate +where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, + DeallocatePolicy: ForceDeallocPolicy, +{ + fn alloc( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult { + let mut result = InnerPolicy::alloc(layout, provider); + for _ in 0..N { + match result { + Err(ZAllocError::NeedDefragment) | Err(ZAllocError::OutOfMemory) => { + if !DeallocatePolicy::dealloc(provider) { + return result; + } + } + _ => { + return result; + } + } + result = AltPolicy::alloc(layout, provider); + } + result + } +} + +/// Blocking allocation policy. +/// This policy will block until the allocation succeeds. +/// Both sync and async modes available. +#[zenoh_macros::unstable_doc] +pub struct BlockOn +where + InnerPolicy: AllocPolicy, +{ + _phantom: PhantomData, +} +#[async_trait] +impl AsyncAllocPolicy for BlockOn +where + InnerPolicy: AllocPolicy, +{ + async fn alloc_async< + IDSource: ProtocolIDSource + Send + Sync, + Backend: SharedMemoryProviderBackend + Sync, + >( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult { + loop { + match InnerPolicy::alloc(layout, provider) { + Err(ZAllocError::NeedDefragment) | Err(ZAllocError::OutOfMemory) => { + // TODO: implement provider's async signalling instead of this! + tokio::time::sleep(Duration::from_millis(1)).await; + } + other_result => { + return other_result; + } + } + } + } +} +impl AllocPolicy for BlockOn +where + InnerPolicy: AllocPolicy, +{ + fn alloc( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult { + loop { + match InnerPolicy::alloc(layout, provider) { + Err(ZAllocError::NeedDefragment) | Err(ZAllocError::OutOfMemory) => { + // TODO: implement provider's async signalling instead of this! + std::thread::sleep(Duration::from_millis(1)); + } + other_result => { + return other_result; + } + } + } + } +} + +// TODO: allocator API +/*pub struct ShmAllocator< + 'a, + Policy: AllocPolicy, + IDSource, + Backend: SharedMemoryProviderBackend, +> { + provider: &'a SharedMemoryProvider, + allocations: lockfree::map::Map, SharedMemoryBuf>, + _phantom: PhantomData, +} + +impl<'a, Policy: AllocPolicy, IDSource, Backend: SharedMemoryProviderBackend> + ShmAllocator<'a, Policy, IDSource, Backend> +{ + fn allocate(&self, layout: std::alloc::Layout) -> BufAllocResult { + self.provider + .alloc_layout() + .size(layout.size()) + .alignment(AllocAlignment::new(layout.align() as u32)) + .res()? + .alloc() + .res() + } +} + +unsafe impl<'a, Policy: AllocPolicy, IDSource, Backend: SharedMemoryProviderBackend> + allocator_api2::alloc::Allocator for ShmAllocator<'a, Policy, IDSource, Backend> +{ + fn allocate( + &self, + layout: std::alloc::Layout, + ) -> Result, allocator_api2::alloc::AllocError> { + let allocation = self + .allocate(layout) + .map_err(|_| allocator_api2::alloc::AllocError)?; + + let inner = allocation.buf.load(Ordering::Relaxed); + let ptr = NonNull::new(inner).ok_or(allocator_api2::alloc::AllocError)?; + let sl = unsafe { std::slice::from_raw_parts(inner, 2) }; + let res = NonNull::from(sl); + + self.allocations.insert(ptr, allocation); + Ok(res) + } + + unsafe fn deallocate(&self, ptr: std::ptr::NonNull, _layout: std::alloc::Layout) { + let _ = self.allocations.remove(&ptr); + } +}*/ + +/// Builder for allocations +#[zenoh_macros::unstable_doc] +pub struct AllocBuilder< + 'a, + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, + Policy = JustAlloc, +> { + layout: &'a AllocLayout<'a, IDSource, Backend>, + _phantom: PhantomData, +} + +// Generic impl +impl<'a, IDSource, Backend, Policy> AllocBuilder<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// Set the allocation policy + #[zenoh_macros::unstable_doc] + pub fn with_policy(self) -> AllocBuilder<'a, IDSource, Backend, OtherPolicy> { + AllocBuilder { + layout: self.layout, + _phantom: PhantomData, + } + } +} + +// Alloc policy +impl<'a, IDSource, Backend, Policy> AllocBuilder<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, + Policy: AllocPolicy, +{ + /// Get the result + #[zenoh_macros::unstable_doc] + pub fn res(self) -> BufAllocResult { + self.layout + .provider + .alloc_inner::(self.layout.size, &self.layout.provider_layout) + } +} + +// Async Alloc policy +impl<'a, IDSource, Backend, Policy> AllocBuilder<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource + Send + Sync, + Backend: SharedMemoryProviderBackend + Sync, + Policy: AsyncAllocPolicy, +{ + /// Get the async result + #[zenoh_macros::unstable_doc] + pub async fn res_async(self) -> BufAllocResult { + self.layout + .provider + .alloc_inner_async::(self.layout.size, &self.layout.provider_layout) + .await + } +} + +#[zenoh_macros::unstable_doc] +pub struct SharedMemoryProviderBuilder; +impl SharedMemoryProviderBuilder { + /// Get the builder to construct SharedMemoryProvider + #[zenoh_macros::unstable_doc] + pub fn builder() -> Self { + Self + } + + /// Set compile-time-evaluated protocol ID (preferred) + #[zenoh_macros::unstable_doc] + pub fn protocol_id( + self, + ) -> SharedMemoryProviderBuilderID> { + SharedMemoryProviderBuilderID::> { + id: StaticProtocolID, + } + } + + /// Set runtime-evaluated protocol ID + #[zenoh_macros::unstable_doc] + pub fn dynamic_protocol_id( + self, + id: ProtocolID, + ) -> SharedMemoryProviderBuilderID { + SharedMemoryProviderBuilderID:: { + id: DynamicProtocolID::new(id), + } + } +} + +#[zenoh_macros::unstable_doc] +pub struct SharedMemoryProviderBuilderID { + id: IDSource, +} +impl SharedMemoryProviderBuilderID { + /// Set the backend + #[zenoh_macros::unstable_doc] + pub fn backend( + self, + backend: Backend, + ) -> SharedMemoryProviderBuilderBackendID { + SharedMemoryProviderBuilderBackendID { + backend, + id: self.id, + } + } +} + +#[zenoh_macros::unstable_doc] +pub struct SharedMemoryProviderBuilderBackendID +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + backend: Backend, + id: IDSource, +} +impl SharedMemoryProviderBuilderBackendID +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// build SharedMemoryProvider + #[zenoh_macros::unstable_doc] + pub fn res(self) -> SharedMemoryProvider { + SharedMemoryProvider::new(self.backend, self.id) + } +} + +/// Trait to create ProtocolID sources for SharedMemoryProvider +#[zenoh_macros::unstable_doc] +pub trait ProtocolIDSource { + fn id(&self) -> ProtocolID; +} + +/// Static ProtocolID source. This is a recommended API to set ProtocolID +/// when creating SharedMemoryProvider as the ID value is statically evaluated +/// at compile-time and can be optimized. +#[zenoh_macros::unstable_doc] +#[derive(Default)] +pub struct StaticProtocolID; +impl ProtocolIDSource for StaticProtocolID { + fn id(&self) -> ProtocolID { + ID + } +} + +/// Dynamic ProtocolID source. This is an alternative API to set ProtocolID +/// when creating SharedMemoryProvider for cases where ProtocolID is unknown +/// at compile-time. +#[zenoh_macros::unstable_doc] +pub struct DynamicProtocolID { + id: ProtocolID, +} +impl DynamicProtocolID { + #[zenoh_macros::unstable_doc] + pub fn new(id: ProtocolID) -> Self { + Self { id } + } +} +impl ProtocolIDSource for DynamicProtocolID { + fn id(&self) -> ProtocolID { + self.id + } +} +unsafe impl Send for DynamicProtocolID {} +unsafe impl Sync for DynamicProtocolID {} + +/// A generalized interface for shared memory data sources +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub struct SharedMemoryProvider +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + backend: Backend, + busy_list: Mutex>, + id: IDSource, +} + +impl SharedMemoryProvider +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// Create layout builder associated with particular SharedMemoryProvider. + /// Layout is a rich interface to make allocations + #[zenoh_macros::unstable_doc] + pub fn alloc_layout(&self) -> AllocLayoutBuilder { + AllocLayoutBuilder { provider: self } + } + + /// Defragment memory + #[zenoh_macros::unstable_doc] + pub fn defragment(&self) -> usize { + self.backend.defragment() + } + + /// Map externally-allocated chunk into ZSliceShmMut. + /// This method is designed to be used with push data sources. + /// Remember that chunk's len may be >= len! + #[zenoh_macros::unstable_doc] + pub fn map(&self, chunk: AllocatedChunk, len: usize) -> ZResult { + // allocate resources for SHM buffer + let (allocated_header, allocated_watchdog, confirmed_watchdog) = Self::alloc_resources()?; + + // wrap everything to SharedMemoryBuf + let wrapped = self.wrap( + chunk, + len, + allocated_header, + allocated_watchdog, + confirmed_watchdog, + ); + Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + } + + /// Try to collect free chunks. + /// Returns the size of largest collected chunk + #[zenoh_macros::unstable_doc] + pub fn garbage_collect(&self) -> usize { + fn is_free_chunk(chunk: &BusyChunk) -> bool { + let header = chunk.header.descriptor.header(); + if header.refcount.load(Ordering::SeqCst) != 0 { + return header.watchdog_invalidated.load(Ordering::SeqCst); + } + true + } + + tracing::trace!("Running Garbage Collector"); + + let mut largest = 0usize; + let mut guard = self.busy_list.lock().unwrap(); + guard.retain(|maybe_free| { + if is_free_chunk(maybe_free) { + tracing::trace!("Garbage Collecting Chunk: {:?}", maybe_free); + self.backend.free(&maybe_free.descriptor); + largest = largest.max(maybe_free.descriptor.len); + return false; + } + true + }); + drop(guard); + + largest + } + + /// Bytes available for use + #[zenoh_macros::unstable_doc] + pub fn available(&self) -> usize { + self.backend.available() + } +} + +// PRIVATE impls +impl SharedMemoryProvider +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + fn new(backend: Backend, id: IDSource) -> Self { + Self { + backend, + busy_list: Mutex::new(VecDeque::default()), + id, + } + } + + fn alloc_inner(&self, size: usize, layout: &MemoryLayout) -> BufAllocResult + where + Policy: AllocPolicy, + { + // allocate resources for SHM buffer + let (allocated_header, allocated_watchdog, confirmed_watchdog) = Self::alloc_resources()?; + + // allocate data chunk + // Perform actions depending on the Policy + // NOTE: it is necessary to properly map this chunk OR free it if mapping fails! + // Don't loose this chunk as it leads to memory leak at the backend side! + // NOTE: self.backend.alloc(len) returns chunk with len >= required len, + // and it is necessary to handle that properly and pass this len to corresponding free(...) + let chunk = Policy::alloc(layout, self)?; + + // wrap allocated chunk to SharedMemoryBuf + let wrapped = self.wrap( + chunk, + size, + allocated_header, + allocated_watchdog, + confirmed_watchdog, + ); + Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + } + + fn alloc_resources() -> ZResult<( + AllocatedHeaderDescriptor, + AllocatedWatchdog, + ConfirmedDescriptor, + )> { + // allocate shared header + let allocated_header = GLOBAL_HEADER_STORAGE.allocate_header()?; + + // allocate watchdog + let allocated_watchdog = GLOBAL_STORAGE.allocate_watchdog()?; + + // add watchdog to confirmator + let confirmed_watchdog = GLOBAL_CONFIRMATOR.add_owned(&allocated_watchdog.descriptor)?; + + Ok((allocated_header, allocated_watchdog, confirmed_watchdog)) + } + + fn wrap( + &self, + chunk: AllocatedChunk, + len: usize, + allocated_header: AllocatedHeaderDescriptor, + allocated_watchdog: AllocatedWatchdog, + confirmed_watchdog: ConfirmedDescriptor, + ) -> SharedMemoryBuf { + let header = allocated_header.descriptor.clone(); + let descriptor = Descriptor::from(&allocated_watchdog.descriptor); + + // add watchdog to validator + let c_header = header.clone(); + GLOBAL_VALIDATOR.add( + allocated_watchdog.descriptor.clone(), + Box::new(move || { + c_header + .header() + .watchdog_invalidated + .store(true, Ordering::SeqCst); + }), + ); + + // Create buffer's info + let info = SharedMemoryBufInfo::new( + chunk.descriptor.clone(), + self.id.id(), + len, + descriptor, + HeaderDescriptor::from(&header), + header.header().generation.load(Ordering::SeqCst), + ); + + // Create buffer + let shmb = SharedMemoryBuf { + header, + buf: chunk.data, + info, + watchdog: Arc::new(confirmed_watchdog), + }; + + // Create and store busy chunk + self.busy_list.lock().unwrap().push_back(BusyChunk::new( + chunk.descriptor, + allocated_header, + allocated_watchdog, + )); + + shmb + } +} + +// PRIVATE impls for Sync backend +impl SharedMemoryProvider +where + IDSource: ProtocolIDSource + Send + Sync, + Backend: SharedMemoryProviderBackend + Sync, +{ + async fn alloc_inner_async( + &self, + size: usize, + backend_layout: &MemoryLayout, + ) -> BufAllocResult + where + Policy: AsyncAllocPolicy, + { + // allocate resources for SHM buffer + let (allocated_header, allocated_watchdog, confirmed_watchdog) = Self::alloc_resources()?; + + // allocate data chunk + // Perform actions depending on the Policy + // NOTE: it is necessary to properly map this chunk OR free it if mapping fails! + // Don't loose this chunk as it leads to memory leak at the backend side! + // NOTE: self.backend.alloc(len) returns chunk with len >= required len, + // and it is necessary to handle that properly and pass this len to corresponding free(...) + let chunk = Policy::alloc_async(backend_layout, self).await?; + + // wrap allocated chunk to SharedMemoryBuf + let wrapped = self.wrap( + chunk, + size, + allocated_header, + allocated_watchdog, + confirmed_watchdog, + ); + Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + } +} diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider_backend.rs b/commons/zenoh-shm/src/api/provider/shared_memory_provider_backend.rs new file mode 100644 index 0000000000..cd15ce3720 --- /dev/null +++ b/commons/zenoh-shm/src/api/provider/shared_memory_provider_backend.rs @@ -0,0 +1,52 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use zenoh_result::ZResult; + +use super::{ + chunk::ChunkDescriptor, + types::{ChunkAllocResult, MemoryLayout}, +}; + +/// The provider backend trait +/// Implemet this interface to create a Zenoh-compatible shared memory provider +#[zenoh_macros::unstable_doc] +pub trait SharedMemoryProviderBackend { + /// Allocate the chunk of desired size. + /// If successful, the result's chunk size will be >= len + #[zenoh_macros::unstable_doc] + fn alloc(&self, layout: &MemoryLayout) -> ChunkAllocResult; + + /// Deallocate the chunk. + /// It is guaranteed that chunk's descriptor will correspond to the one returned from alloc(...) + #[zenoh_macros::unstable_doc] + fn free(&self, chunk: &ChunkDescriptor); + + /// Defragment the memory. + /// Should return the size of largest defragmented chunk + #[zenoh_macros::unstable_doc] + fn defragment(&self) -> usize; + + /// Bytes available for use + #[zenoh_macros::unstable_doc] + fn available(&self) -> usize; + + /// Check and calculate suitable layout for layout. + /// Depending on the implementation, backend may relayout allocations for bigger layouts. + /// This method is used to: + /// - validate, if the provided layout can be used with this backend + /// - adopt the layout for backend capabilities + #[zenoh_macros::unstable_doc] + fn layout_for(&self, layout: MemoryLayout) -> ZResult; +} diff --git a/commons/zenoh-shm/src/api/provider/types.rs b/commons/zenoh-shm/src/api/provider/types.rs new file mode 100644 index 0000000000..662482f567 --- /dev/null +++ b/commons/zenoh-shm/src/api/provider/types.rs @@ -0,0 +1,173 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::fmt::Display; + +use zenoh_result::{bail, ZResult}; + +use crate::api::slice::zsliceshmmut::ZSliceShmMut; + +use super::chunk::AllocatedChunk; + +/// Allocation errors +/// +/// NeedDefragment: defragmentation needed +/// OutOfMemory: the provider is out of memory +/// Other: other error +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub enum ZAllocError { + NeedDefragment, + OutOfMemory, + Other(zenoh_result::Error), +} + +impl From for ZAllocError { + fn from(value: zenoh_result::Error) -> Self { + Self::Other(value) + } +} + +/// alignemnt in powers of 2: 0 == 1-byte alignment, 1 == 2byte, 2 == 4byte, 3 == 8byte etc +#[zenoh_macros::unstable_doc] +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct AllocAlignment { + pow: u8, +} + +impl Display for AllocAlignment { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_fmt(format_args!("[{}]", self.get_alignment_value())) + } +} + +impl Default for AllocAlignment { + fn default() -> Self { + Self { + pow: (std::mem::align_of::() as f64).log2().round() as u8, + } + } +} + +impl AllocAlignment { + #[zenoh_macros::unstable_doc] + pub fn new(pow: u8) -> Self { + Self { pow } + } + + /// Get alignment in normal units (bytes) + #[zenoh_macros::unstable_doc] + pub fn get_alignment_value(&self) -> usize { + 1usize << self.pow + } + + /// Align size according to inner alignment. + /// This call may extend the size (see the example) + /// # Examples + /// + /// ``` + /// use zenoh_shm::api::provider::types::AllocAlignment; + /// + /// let alignment = AllocAlignment::new(2); // 4-byte alignment + /// let initial_size: usize = 7; + /// let aligned_size = alignment.align_size(initial_size); + /// assert_eq!(aligned_size, 8); + /// ``` + #[zenoh_macros::unstable_doc] + pub fn align_size(&self, size: usize) -> usize { + let alignment = self.get_alignment_value(); + match size % alignment { + 0 => size, + remainder => size + (alignment - remainder), + } + } +} + +/// Memory layout representation: alignemnt and size aligned for this alignment +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub struct MemoryLayout { + size: usize, + alignment: AllocAlignment, +} + +impl Display for MemoryLayout { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_fmt(format_args!( + "[size={},alignment={}]", + self.size, self.alignment + )) + } +} + +impl MemoryLayout { + /// Try to create a new memory layout + #[zenoh_macros::unstable_doc] + pub fn new(size: usize, alignment: AllocAlignment) -> ZResult { + // size of an allocation must be a miltiple of it's alignment! + match size % alignment.get_alignment_value() { + 0 => Ok(Self { size, alignment }), + _ => bail!("size of an allocation must be a miltiple of it's alignment!"), + } + } + + #[zenoh_macros::unstable_doc] + pub fn size(&self) -> usize { + self.size + } + + #[zenoh_macros::unstable_doc] + pub fn alignment(&self) -> AllocAlignment { + self.alignment + } + + /// Realign the layout for new alignment. The alignment must be >= of the existing one. + /// # Examples + /// + /// ``` + /// use zenoh_shm::api::provider::types::AllocAlignment; + /// use zenoh_shm::api::provider::types::MemoryLayout; + /// + /// // 8 bytes with 4-byte alignment + /// let layout4b = MemoryLayout::new(8, AllocAlignment::new(2)).unwrap(); + /// + /// // Try to realign with 2-byte alignment + /// let layout2b = layout4b.extend(AllocAlignment::new(1)); + /// assert!(layout2b.is_err()); // fails because new alignment must be >= old + /// + /// // Try to realign with 8-byte alignment + /// let layout8b = layout4b.extend(AllocAlignment::new(3)); + /// assert!(layout8b.is_ok()); // ok + /// ``` + #[zenoh_macros::unstable_doc] + pub fn extend(&self, new_alignment: AllocAlignment) -> ZResult { + if self.alignment <= new_alignment { + let new_size = new_alignment.align_size(self.size); + return MemoryLayout::new(new_size, new_alignment); + } + bail!( + "Cannot extend alignment form {} to {}: new alignment must be >= old!", + self.alignment, + new_alignment + ) + } +} + +/// SHM chunk allocation result +#[zenoh_macros::unstable_doc] +pub type ChunkAllocResult = Result; + +/// SHM buffer allocation result +#[zenoh_macros::unstable_doc] +pub type BufAllocResult = Result; diff --git a/commons/zenoh-shm/src/api/slice/mod.rs b/commons/zenoh-shm/src/api/slice/mod.rs new file mode 100644 index 0000000000..59c793f94a --- /dev/null +++ b/commons/zenoh-shm/src/api/slice/mod.rs @@ -0,0 +1,17 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod traits; +pub mod zsliceshm; +pub mod zsliceshmmut; diff --git a/commons/zenoh-shm/src/api/slice/traits.rs b/commons/zenoh-shm/src/api/slice/traits.rs new file mode 100644 index 0000000000..9104abc4a1 --- /dev/null +++ b/commons/zenoh-shm/src/api/slice/traits.rs @@ -0,0 +1,24 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::ops::{Deref, DerefMut}; + +#[zenoh_macros::unstable_doc] +pub trait SHMBuf: Deref + AsRef<[u8]> { + #[zenoh_macros::unstable_doc] + fn is_valid(&self) -> bool; +} + +#[zenoh_macros::unstable_doc] +pub trait SHMBufMut: SHMBuf + DerefMut + AsMut<[u8]> {} diff --git a/commons/zenoh-shm/src/api/slice/zsliceshm.rs b/commons/zenoh-shm/src/api/slice/zsliceshm.rs new file mode 100644 index 0000000000..86f4395ebb --- /dev/null +++ b/commons/zenoh-shm/src/api/slice/zsliceshm.rs @@ -0,0 +1,172 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use core::ops::Deref; +use std::{ + borrow::{Borrow, BorrowMut}, + ops::DerefMut, +}; + +use zenoh_buffers::{ZBuf, ZSlice}; + +use crate::SharedMemoryBuf; + +use super::{traits::SHMBuf, zsliceshmmut::zsliceshmmut}; + +/// An immutable SHM slice +#[zenoh_macros::unstable_doc] +#[repr(transparent)] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ZSliceShm(pub(crate) SharedMemoryBuf); + +impl SHMBuf for ZSliceShm { + fn is_valid(&self) -> bool { + self.0.is_valid() + } +} + +impl PartialEq<&zsliceshm> for ZSliceShm { + fn eq(&self, other: &&zsliceshm) -> bool { + self.0 == other.0 .0 + } +} + +impl Borrow for ZSliceShm { + fn borrow(&self) -> &zsliceshm { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl BorrowMut for ZSliceShm { + fn borrow_mut(&mut self) -> &mut zsliceshm { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl Deref for ZSliceShm { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + self.0.as_ref() + } +} + +impl AsRef<[u8]> for ZSliceShm { + fn as_ref(&self) -> &[u8] { + self + } +} + +impl From for ZSliceShm { + fn from(value: SharedMemoryBuf) -> Self { + Self(value) + } +} + +impl From for ZSlice { + fn from(value: ZSliceShm) -> Self { + value.0.into() + } +} + +impl From for ZBuf { + fn from(value: ZSliceShm) -> Self { + value.0.into() + } +} + +impl TryFrom<&mut ZSliceShm> for &mut zsliceshmmut { + type Error = (); + + fn try_from(value: &mut ZSliceShm) -> Result { + match value.0.is_unique() && value.0.is_valid() { + true => { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + Ok(unsafe { core::mem::transmute(value) }) + } + false => Err(()), + } + } +} + +/// A borrowed immutable SHM slice +#[zenoh_macros::unstable_doc] +#[derive(Debug, PartialEq, Eq)] +#[allow(non_camel_case_types)] +#[repr(transparent)] +pub struct zsliceshm(ZSliceShm); + +impl ToOwned for zsliceshm { + type Owned = ZSliceShm; + + fn to_owned(&self) -> Self::Owned { + self.0.clone() + } +} + +impl PartialEq for &zsliceshm { + fn eq(&self, other: &ZSliceShm) -> bool { + self.0 .0 == other.0 + } +} + +impl Deref for zsliceshm { + type Target = ZSliceShm; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for zsliceshm { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From<&SharedMemoryBuf> for &zsliceshm { + fn from(value: &SharedMemoryBuf) -> Self { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(value) } + } +} + +impl From<&mut SharedMemoryBuf> for &mut zsliceshm { + fn from(value: &mut SharedMemoryBuf) -> Self { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(value) } + } +} + +impl TryFrom<&mut zsliceshm> for &mut zsliceshmmut { + type Error = (); + + fn try_from(value: &mut zsliceshm) -> Result { + match value.0 .0.is_unique() && value.0 .0.is_valid() { + true => { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + Ok(unsafe { core::mem::transmute(value) }) + } + false => Err(()), + } + } +} diff --git a/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs b/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs new file mode 100644 index 0000000000..62823785da --- /dev/null +++ b/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs @@ -0,0 +1,189 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use core::ops::{Deref, DerefMut}; +use std::borrow::{Borrow, BorrowMut}; + +use zenoh_buffers::{ZBuf, ZSlice}; + +use crate::SharedMemoryBuf; + +use super::{ + traits::{SHMBuf, SHMBufMut}, + zsliceshm::{zsliceshm, ZSliceShm}, +}; + +/// A mutable SHM slice +#[zenoh_macros::unstable_doc] +#[derive(Debug, PartialEq, Eq)] +#[repr(transparent)] +pub struct ZSliceShmMut(SharedMemoryBuf); + +impl SHMBuf for ZSliceShmMut { + fn is_valid(&self) -> bool { + self.0.is_valid() + } +} + +impl SHMBufMut for ZSliceShmMut {} + +impl ZSliceShmMut { + pub(crate) unsafe fn new_unchecked(data: SharedMemoryBuf) -> Self { + Self(data) + } +} + +impl PartialEq for &ZSliceShmMut { + fn eq(&self, other: &zsliceshmmut) -> bool { + self.0 == other.0 .0 + } +} + +impl TryFrom for ZSliceShmMut { + type Error = SharedMemoryBuf; + + fn try_from(value: SharedMemoryBuf) -> Result { + match value.is_unique() && value.is_valid() { + true => Ok(Self(value)), + false => Err(value), + } + } +} + +impl TryFrom for ZSliceShmMut { + type Error = ZSliceShm; + + fn try_from(value: ZSliceShm) -> Result { + match value.0.is_unique() && value.0.is_valid() { + true => Ok(Self(value.0)), + false => Err(value), + } + } +} + +impl Borrow for ZSliceShmMut { + fn borrow(&self) -> &zsliceshm { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl BorrowMut for ZSliceShmMut { + fn borrow_mut(&mut self) -> &mut zsliceshm { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl Borrow for ZSliceShmMut { + fn borrow(&self) -> &zsliceshmmut { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl BorrowMut for ZSliceShmMut { + fn borrow_mut(&mut self) -> &mut zsliceshmmut { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl Deref for ZSliceShmMut { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + self.0.as_ref() + } +} + +impl DerefMut for ZSliceShmMut { + fn deref_mut(&mut self) -> &mut Self::Target { + self.0.as_mut() + } +} + +impl AsRef<[u8]> for ZSliceShmMut { + fn as_ref(&self) -> &[u8] { + self + } +} + +impl AsMut<[u8]> for ZSliceShmMut { + fn as_mut(&mut self) -> &mut [u8] { + self + } +} + +impl From for ZSliceShm { + fn from(value: ZSliceShmMut) -> Self { + value.0.into() + } +} + +impl From for ZSlice { + fn from(value: ZSliceShmMut) -> Self { + value.0.into() + } +} + +impl From for ZBuf { + fn from(value: ZSliceShmMut) -> Self { + value.0.into() + } +} + +/// A borrowed mutable SHM slice +#[zenoh_macros::unstable_doc] +#[derive(Debug, PartialEq, Eq)] +#[allow(non_camel_case_types)] +#[repr(transparent)] +pub struct zsliceshmmut(ZSliceShmMut); + +impl PartialEq for &zsliceshmmut { + fn eq(&self, other: &ZSliceShmMut) -> bool { + self.0 .0 == other.0 + } +} + +impl Deref for zsliceshmmut { + type Target = ZSliceShmMut; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for zsliceshmmut { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl TryFrom<&mut SharedMemoryBuf> for &mut zsliceshmmut { + type Error = (); + + fn try_from(value: &mut SharedMemoryBuf) -> Result { + match value.is_unique() && value.is_valid() { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + true => Ok(unsafe { core::mem::transmute(value) }), + false => Err(()), + } + } +} diff --git a/commons/zenoh-shm/src/header/allocated_descriptor.rs b/commons/zenoh-shm/src/header/allocated_descriptor.rs new file mode 100644 index 0000000000..f800683595 --- /dev/null +++ b/commons/zenoh-shm/src/header/allocated_descriptor.rs @@ -0,0 +1,26 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use super::{descriptor::OwnedHeaderDescriptor, storage::GLOBAL_HEADER_STORAGE}; + +#[derive(Debug)] +pub struct AllocatedHeaderDescriptor { + pub descriptor: OwnedHeaderDescriptor, +} + +impl Drop for AllocatedHeaderDescriptor { + fn drop(&mut self) { + GLOBAL_HEADER_STORAGE.reclaim_header(self.descriptor.clone()); + } +} diff --git a/commons/zenoh-shm/src/header/chunk_header.rs b/commons/zenoh-shm/src/header/chunk_header.rs new file mode 100644 index 0000000000..c5eb11bb7c --- /dev/null +++ b/commons/zenoh-shm/src/header/chunk_header.rs @@ -0,0 +1,28 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::atomic::{AtomicBool, AtomicU32}; + +// Chunk header +#[stabby::stabby] +#[derive(Debug)] +pub struct ChunkHeaderType { + /* + TODO: We don't really need 32 bits here, but access to 16-bit felds with 1 byte alignment is less performant on most of the platforms. + We need to bench and select reasonable integer sizes here once we have an implementation to bench + */ + pub refcount: AtomicU32, + pub watchdog_invalidated: AtomicBool, + pub generation: AtomicU32, +} diff --git a/commons/zenoh-shm/src/header/descriptor.rs b/commons/zenoh-shm/src/header/descriptor.rs new file mode 100644 index 0000000000..7700eb90c6 --- /dev/null +++ b/commons/zenoh-shm/src/header/descriptor.rs @@ -0,0 +1,63 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::Arc; + +use super::{chunk_header::ChunkHeaderType, segment::HeaderSegment}; + +pub type HeaderSegmentID = u16; +pub type HeaderIndex = u16; + +#[derive(Clone, Eq, Hash, PartialEq, PartialOrd, Ord, Debug)] +pub struct HeaderDescriptor { + pub id: HeaderSegmentID, + pub index: HeaderIndex, +} + +impl From<&OwnedHeaderDescriptor> for HeaderDescriptor { + fn from(item: &OwnedHeaderDescriptor) -> Self { + let id = item.segment.array.id(); + let index = unsafe { item.segment.array.index(item.header) }; + + Self { id, index } + } +} + +#[derive(Clone)] +pub struct OwnedHeaderDescriptor { + segment: Arc, + header: *const ChunkHeaderType, +} + +unsafe impl Send for OwnedHeaderDescriptor {} +unsafe impl Sync for OwnedHeaderDescriptor {} + +impl OwnedHeaderDescriptor { + pub(crate) fn new(segment: Arc, header: *const ChunkHeaderType) -> Self { + Self { segment, header } + } + + #[inline(always)] + pub fn header(&self) -> &ChunkHeaderType { + unsafe { &(*self.header) } + } +} + +impl std::fmt::Debug for OwnedHeaderDescriptor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("OwnedHeaderDescriptor") + .field("header", &self.header) + .finish() + } +} diff --git a/commons/zenoh-shm/src/header/mod.rs b/commons/zenoh-shm/src/header/mod.rs new file mode 100644 index 0000000000..84acc86e87 --- /dev/null +++ b/commons/zenoh-shm/src/header/mod.rs @@ -0,0 +1,23 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod descriptor; + +tested_crate_module!(storage); +tested_crate_module!(subscription); + +pub(crate) mod allocated_descriptor; +pub(crate) mod chunk_header; + +mod segment; diff --git a/commons/zenoh-shm/src/header/segment.rs b/commons/zenoh-shm/src/header/segment.rs new file mode 100644 index 0000000000..e36e54a233 --- /dev/null +++ b/commons/zenoh-shm/src/header/segment.rs @@ -0,0 +1,40 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use zenoh_result::ZResult; + +use crate::posix_shm::array::ArrayInSHM; + +use super::{ + chunk_header::ChunkHeaderType, + descriptor::{HeaderIndex, HeaderSegmentID}, +}; + +const HEADER_SEGMENT_PREFIX: &str = "header"; + +pub struct HeaderSegment { + pub array: ArrayInSHM, +} + +impl HeaderSegment { + pub fn create(header_count: usize) -> ZResult { + let array = ArrayInSHM::create(header_count, HEADER_SEGMENT_PREFIX)?; + Ok(Self { array }) + } + + pub fn open(id: HeaderSegmentID) -> ZResult { + let array = ArrayInSHM::open(id, HEADER_SEGMENT_PREFIX)?; + Ok(Self { array }) + } +} diff --git a/commons/zenoh-shm/src/header/storage.rs b/commons/zenoh-shm/src/header/storage.rs new file mode 100644 index 0000000000..c09fa83dba --- /dev/null +++ b/commons/zenoh-shm/src/header/storage.rs @@ -0,0 +1,87 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use lazy_static::lazy_static; +use std::{ + collections::LinkedList, + sync::{Arc, Mutex}, +}; + +use zenoh_result::{zerror, ZResult}; + +use super::{ + allocated_descriptor::AllocatedHeaderDescriptor, + descriptor::{HeaderIndex, OwnedHeaderDescriptor}, + segment::HeaderSegment, +}; + +lazy_static! { + pub static ref GLOBAL_HEADER_STORAGE: HeaderStorage = HeaderStorage::new(32768usize).unwrap(); +} + +pub struct HeaderStorage { + available: Arc>>, +} + +impl HeaderStorage { + fn new(initial_header_count: usize) -> ZResult { + let initial_segment = Arc::new(HeaderSegment::create(initial_header_count)?); + let mut initially_available = LinkedList::::default(); + + for index in 0..initial_header_count { + let header = unsafe { initial_segment.array.elem(index as HeaderIndex) }; + let descriptor = OwnedHeaderDescriptor::new(initial_segment.clone(), header); + + // init generation (this is not really necessary, but we do) + descriptor + .header() + .generation + .store(0, std::sync::atomic::Ordering::SeqCst); + + initially_available.push_back(descriptor); + } + + Ok(Self { + available: Arc::new(Mutex::new(initially_available)), + }) + } + + pub fn allocate_header(&self) -> ZResult { + let mut guard = self.available.lock().map_err(|e| zerror!("{e}"))?; + let popped = guard.pop_front(); + drop(guard); + + let descriptor = popped.ok_or_else(|| zerror!("no free headers available"))?; + + //initialize header fields + let header = descriptor.header(); + header + .refcount + .store(1, std::sync::atomic::Ordering::SeqCst); + header + .watchdog_invalidated + .store(false, std::sync::atomic::Ordering::SeqCst); + + Ok(AllocatedHeaderDescriptor { descriptor }) + } + + pub fn reclaim_header(&self, header: OwnedHeaderDescriptor) { + // header deallocated - increment it's generation to invalidate any existing references + header + .header() + .generation + .fetch_add(1, std::sync::atomic::Ordering::SeqCst); + let mut guard = self.available.lock().unwrap(); + guard.push_front(header); + } +} diff --git a/commons/zenoh-shm/src/header/subscription.rs b/commons/zenoh-shm/src/header/subscription.rs new file mode 100644 index 0000000000..49ad170aea --- /dev/null +++ b/commons/zenoh-shm/src/header/subscription.rs @@ -0,0 +1,61 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use lazy_static::lazy_static; +use std::{ + collections::BTreeMap, + sync::{Arc, Mutex}, +}; + +use zenoh_result::{zerror, ZResult}; + +use super::{ + descriptor::{HeaderDescriptor, HeaderSegmentID, OwnedHeaderDescriptor}, + segment::HeaderSegment, +}; + +lazy_static! { + pub static ref GLOBAL_HEADER_SUBSCRIPTION: Subscription = Subscription::new(); +} + +pub struct Subscription { + linked_table: Mutex>>, +} + +impl Subscription { + fn new() -> Self { + Self { + linked_table: Mutex::default(), + } + } + + pub fn link(&self, descriptor: &HeaderDescriptor) -> ZResult { + let mut guard = self.linked_table.lock().map_err(|e| zerror!("{e}"))?; + // ensure segment + let segment = match guard.entry(descriptor.id) { + std::collections::btree_map::Entry::Vacant(vacant) => { + let segment = Arc::new(HeaderSegment::open(descriptor.id)?); + vacant.insert(segment.clone()); + segment + } + std::collections::btree_map::Entry::Occupied(occupied) => occupied.get().clone(), + }; + drop(guard); + + // construct owned descriptor + // SAFETY: HeaderDescriptor source guarantees that descriptor.index is valid for segment + let header = unsafe { segment.array.elem(descriptor.index) }; + let owned_descriptor = OwnedHeaderDescriptor::new(segment, header); + Ok(owned_descriptor) + } +} diff --git a/commons/zenoh-shm/src/lib.rs b/commons/zenoh-shm/src/lib.rs index 82f3614380..abcdd558fb 100644 --- a/commons/zenoh-shm/src/lib.rs +++ b/commons/zenoh-shm/src/lib.rs @@ -11,79 +11,82 @@ // Contributors: // ZettaScale Zenoh Team, // -use shared_memory::{Shmem, ShmemConf, ShmemError}; +use api::{common::types::ProtocolID, provider::chunk::ChunkDescriptor}; +use header::descriptor::{HeaderDescriptor, OwnedHeaderDescriptor}; use std::{ any::Any, - cmp, - collections::{binary_heap::BinaryHeap, HashMap}, - fmt, mem, - sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, + sync::{ + atomic::{AtomicPtr, Ordering}, + Arc, + }, }; +use watchdog::{confirmator::ConfirmedDescriptor, descriptor::Descriptor}; use zenoh_buffers::ZSliceBuffer; -use zenoh_result::{zerror, ShmError, ZResult}; -const MIN_FREE_CHUNK_SIZE: usize = 1_024; -const ACCOUNTED_OVERHEAD: usize = 4_096; -const ZENOH_SHM_PREFIX: &str = "zenoh_shm_zid"; - -// Chunk header -type ChunkHeaderType = AtomicUsize; -const CHUNK_HEADER_SIZE: usize = std::mem::size_of::(); - -fn align_addr_at(addr: usize, align: usize) -> usize { - match addr % align { - 0 => addr, - r => addr + (align - r), - } +#[macro_export] +macro_rules! tested_module { + ($module:ident) => { + #[cfg(feature = "test")] + pub mod $module; + #[cfg(not(feature = "test"))] + mod $module; + }; } -#[derive(Eq, Copy, Clone, Debug)] -struct Chunk { - base_addr: *mut u8, - offset: usize, - size: usize, -} - -impl Ord for Chunk { - fn cmp(&self, other: &Self) -> cmp::Ordering { - self.size.cmp(&other.size) - } +#[macro_export] +macro_rules! tested_crate_module { + ($module:ident) => { + #[cfg(feature = "test")] + pub mod $module; + #[cfg(not(feature = "test"))] + pub(crate) mod $module; + }; } -impl PartialOrd for Chunk { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl PartialEq for Chunk { - fn eq(&self, other: &Self) -> bool { - self.size == other.size - } -} +pub mod api; +pub mod header; +pub mod posix_shm; +pub mod reader; +pub mod watchdog; /// Informations about a [`SharedMemoryBuf`]. /// /// This that can be serialized and can be used to retrieve the [`SharedMemoryBuf`] in a remote process. #[derive(Clone, Debug, PartialEq, Eq)] pub struct SharedMemoryBufInfo { - /// The index of the beginning of the buffer in the shm segment. - pub offset: usize, - /// The length of the buffer. - pub length: usize, - /// The identifier of the shm manager that manages the shm segment this buffer points to. - pub shm_manager: String, - /// The kind of buffer. - pub kind: u8, + /// The data chunk descriptor + pub data_descriptor: ChunkDescriptor, + /// Protocol identifier for particular SharedMemory implementation + pub shm_protocol: ProtocolID, + /// Actual data length + /// NOTE: data_descriptor's len is >= of this len and describes the actual memory length + /// dedicated in shared memory segment for this particular buffer. + pub data_len: usize, + + /// The watchdog descriptor + pub watchdog_descriptor: Descriptor, + /// The header descriptor + pub header_descriptor: HeaderDescriptor, + /// The generation of the buffer + pub generation: u32, } impl SharedMemoryBufInfo { - pub fn new(offset: usize, length: usize, manager: String, kind: u8) -> SharedMemoryBufInfo { + pub fn new( + data_descriptor: ChunkDescriptor, + shm_protocol: ProtocolID, + data_len: usize, + watchdog_descriptor: Descriptor, + header_descriptor: HeaderDescriptor, + generation: u32, + ) -> SharedMemoryBufInfo { SharedMemoryBufInfo { - offset, - length, - shm_manager: manager, - kind, + data_descriptor, + shm_protocol, + data_len, + watchdog_descriptor, + header_descriptor, + generation, } } } @@ -91,20 +94,27 @@ impl SharedMemoryBufInfo { /// A zenoh buffer in shared memory. #[non_exhaustive] pub struct SharedMemoryBuf { - pub rc_ptr: AtomicPtr, - pub buf: AtomicPtr, - pub len: usize, + pub(crate) header: OwnedHeaderDescriptor, + pub(crate) buf: AtomicPtr, pub info: SharedMemoryBufInfo, + pub(crate) watchdog: Arc, +} + +impl PartialEq for SharedMemoryBuf { + fn eq(&self, other: &Self) -> bool { + // currently there is no API to resize an SHM buffer, but it is intended in the future, + // so I add size comparsion here to avoid future bugs :) + self.buf.load(Ordering::Relaxed) == other.buf.load(Ordering::Relaxed) + && self.info.data_len == other.info.data_len + } } +impl Eq for SharedMemoryBuf {} impl std::fmt::Debug for SharedMemoryBuf { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let ptr = self.rc_ptr.load(Ordering::SeqCst); - let rc = unsafe { (*ptr).load(Ordering::SeqCst) }; f.debug_struct("SharedMemoryBuf") - .field("rc", &rc) + .field("header", &self.header) .field("buf", &self.buf) - .field("len", &self.len) .field("info", &self.info) .finish() } @@ -112,44 +122,47 @@ impl std::fmt::Debug for SharedMemoryBuf { impl SharedMemoryBuf { pub fn len(&self) -> usize { - self.len + self.info.data_len } pub fn is_empty(&self) -> bool { self.len() == 0 } - pub fn get_kind(&self) -> u8 { - self.info.kind + fn is_valid(&self) -> bool { + self.header.header().generation.load(Ordering::SeqCst) == self.info.generation } - pub fn set_kind(&mut self, v: u8) { - self.info.kind = v + fn is_unique(&self) -> bool { + self.ref_count() == 1 } - pub fn owner(&self) -> String { - self.info.shm_manager.clone() + pub fn ref_count(&self) -> u32 { + self.header.header().refcount.load(Ordering::SeqCst) } - pub fn ref_count(&self) -> usize { - let rc = self.rc_ptr.load(Ordering::SeqCst); - unsafe { (*rc).load(Ordering::SeqCst) } - } - - pub fn inc_ref_count(&self) { - let rc = self.rc_ptr.load(Ordering::SeqCst); - unsafe { (*rc).fetch_add(1, Ordering::SeqCst) }; + /// Increments buffer's reference count + /// + /// # Safety + /// You should understand what you are doing, as overestimation + /// of the reference counter can lead to memory being stalled until + /// recovered by watchdog subsystem or forcely deallocated + pub unsafe fn inc_ref_count(&self) { + self.header.header().refcount.fetch_add(1, Ordering::SeqCst); } - pub fn dec_ref_count(&self) { - let rc = self.rc_ptr.load(Ordering::SeqCst); - unsafe { (*rc).fetch_sub(1, Ordering::SeqCst) }; + // PRIVATE: + fn as_slice(&self) -> &[u8] { + tracing::trace!( + "SharedMemoryBuf::as_slice() == len = {:?}", + self.info.data_len + ); + let bp = self.buf.load(Ordering::SeqCst); + unsafe { std::slice::from_raw_parts(bp, self.info.data_len) } } - pub fn as_slice(&self) -> &[u8] { - tracing::trace!("SharedMemoryBuf::as_slice() == len = {:?}", self.len); - let bp = self.buf.load(Ordering::SeqCst); - unsafe { std::slice::from_raw_parts(bp, self.len) } + unsafe fn dec_ref_count(&self) { + self.header.header().refcount.fetch_sub(1, Ordering::SeqCst); } /// Gets a mutable slice. @@ -163,342 +176,35 @@ impl SharedMemoryBuf { /// /// In short, whilst this operation is marked as unsafe, you are safe if you can /// guarantee that your in applications only one process at the time will actually write. - pub unsafe fn as_mut_slice(&mut self) -> &mut [u8] { + unsafe fn as_mut_slice_inner(&mut self) -> &mut [u8] { let bp = self.buf.load(Ordering::SeqCst); - std::slice::from_raw_parts_mut(bp, self.len) + std::slice::from_raw_parts_mut(bp, self.info.data_len) } } impl Drop for SharedMemoryBuf { fn drop(&mut self) { - self.dec_ref_count(); + // # Safety + // obviouly, we need to decrement refcount when dropping SharedMemoryBuf instance + unsafe { self.dec_ref_count() }; } } impl Clone for SharedMemoryBuf { fn clone(&self) -> Self { - self.inc_ref_count(); - let rc = self.rc_ptr.load(Ordering::SeqCst); + // # Safety + // obviouly, we need to increment refcount when cloning SharedMemoryBuf instance + unsafe { self.inc_ref_count() }; let bp = self.buf.load(Ordering::SeqCst); SharedMemoryBuf { - rc_ptr: AtomicPtr::new(rc), + header: self.header.clone(), buf: AtomicPtr::new(bp), - len: self.len, info: self.info.clone(), + watchdog: self.watchdog.clone(), } } } -/*************************************/ -/* SHARED MEMORY READER */ -/*************************************/ -pub struct SharedMemoryReader { - segments: HashMap, -} - -unsafe impl Send for SharedMemoryReader {} -unsafe impl Sync for SharedMemoryReader {} - -impl SharedMemoryReader { - pub fn new() -> Self { - Self { - segments: HashMap::new(), - } - } - - pub fn connect_map_to_shm(&mut self, info: &SharedMemoryBufInfo) -> ZResult<()> { - match ShmemConf::new().flink(&info.shm_manager).open() { - Ok(shm) => { - self.segments.insert(info.shm_manager.clone(), shm); - Ok(()) - } - Err(e) => { - let e = zerror!( - "Unable to bind shared memory segment {}: {:?}", - info.shm_manager, - e - ); - tracing::trace!("{}", e); - Err(ShmError(e).into()) - } - } - } - - pub fn try_read_shmbuf(&self, info: &SharedMemoryBufInfo) -> ZResult { - // Try read does not increment the reference count as it is assumed - // that the sender of this buffer has incremented for us. - match self.segments.get(&info.shm_manager) { - Some(shm) => { - let base_ptr = shm.as_ptr(); - let rc = unsafe { base_ptr.add(info.offset) as *mut ChunkHeaderType }; - let rc_ptr = AtomicPtr::::new(rc); - let buf = unsafe { base_ptr.add(info.offset + CHUNK_HEADER_SIZE) }; - let shmb = SharedMemoryBuf { - rc_ptr, - buf: AtomicPtr::new(buf), - len: info.length - CHUNK_HEADER_SIZE, - info: info.clone(), - }; - Ok(shmb) - } - None => { - let e = zerror!("Unable to find shared memory segment: {}", info.shm_manager); - tracing::trace!("{}", e); - Err(ShmError(e).into()) - } - } - } - - pub fn read_shmbuf(&mut self, info: &SharedMemoryBufInfo) -> ZResult { - // Read does not increment the reference count as it is assumed - // that the sender of this buffer has incremented for us. - self.try_read_shmbuf(info).or_else(|_| { - self.connect_map_to_shm(info)?; - self.try_read_shmbuf(info) - }) - } -} - -impl Default for SharedMemoryReader { - fn default() -> Self { - Self::new() - } -} - -impl fmt::Debug for SharedMemoryReader { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("SharedMemoryReader").finish()?; - f.debug_list().entries(self.segments.keys()).finish() - } -} - -/// A shared memory segment manager. -/// -/// Allows to access a shared memory segment and reserve some parts of this segment for writting. -pub struct SharedMemoryManager { - segment_path: String, - size: usize, - available: usize, - own_segment: Shmem, - free_list: BinaryHeap, - busy_list: Vec, - alignment: usize, -} - -unsafe impl Send for SharedMemoryManager {} - -impl SharedMemoryManager { - /// Creates a new SharedMemoryManager managing allocations of a region of the - /// given size. - pub fn make(id: String, size: usize) -> ZResult { - let mut temp_dir = std::env::temp_dir(); - let file_name: String = format!("{ZENOH_SHM_PREFIX}_{id}"); - temp_dir.push(file_name); - let path: String = temp_dir - .to_str() - .ok_or_else(|| ShmError(zerror!("Unable to parse tmp directory: {:?}", temp_dir)))? - .to_string(); - tracing::trace!("Creating file at: {}", path); - let real_size = size + ACCOUNTED_OVERHEAD; - let shmem = match ShmemConf::new() - .size(real_size) - .flink(path.clone()) - .create() - { - Ok(m) => m, - Err(ShmemError::LinkExists) => { - return Err(ShmError(zerror!( - "Unable to open SharedMemoryManager: SharedMemory already exists" - )) - .into()) - } - Err(e) => { - return Err(ShmError(zerror!("Unable to open SharedMemoryManager: {}", e)).into()) - } - }; - let base_ptr = shmem.as_ptr(); - - let mut free_list = BinaryHeap::new(); - let chunk = Chunk { - base_addr: base_ptr, - offset: 0, - size: real_size, - }; - free_list.push(chunk); - let busy_list = vec![]; - let shm = SharedMemoryManager { - segment_path: path, - size, - available: real_size, - own_segment: shmem, - free_list, - busy_list, - alignment: mem::align_of::(), - }; - tracing::trace!( - "Created SharedMemoryManager for {:?}", - shm.own_segment.as_ptr() - ); - Ok(shm) - } - - fn free_chunk_map_to_shmbuf(&self, chunk: &Chunk) -> SharedMemoryBuf { - let info = SharedMemoryBufInfo { - offset: chunk.offset, - length: chunk.size, - shm_manager: self.segment_path.clone(), - kind: 0, - }; - let rc = chunk.base_addr as *mut ChunkHeaderType; - unsafe { (*rc).store(1, Ordering::SeqCst) }; - let rc_ptr = AtomicPtr::::new(rc); - SharedMemoryBuf { - rc_ptr, - buf: AtomicPtr::::new(unsafe { chunk.base_addr.add(CHUNK_HEADER_SIZE) }), - len: chunk.size - CHUNK_HEADER_SIZE, - info, - } - } - - pub fn alloc(&mut self, len: usize) -> ZResult { - tracing::trace!("SharedMemoryManager::alloc({})", len); - // Always allocate a size that will keep the proper alignment requirements - let required_len = align_addr_at(len + CHUNK_HEADER_SIZE, self.alignment); - if self.available < required_len { - self.garbage_collect(); - } - if self.available >= required_len { - // The strategy taken is the same for some Unix System V implementations -- as described in the - // famous Bach's book -- in essence keep an ordered list of free slot and always look for the - // biggest as that will give the biggest left-over. - match self.free_list.pop() { - Some(mut chunk) if chunk.size >= required_len => { - self.available -= required_len; - tracing::trace!("Allocator selected Chunk ({:?})", &chunk); - if chunk.size - required_len >= MIN_FREE_CHUNK_SIZE { - let free_chunk = Chunk { - base_addr: unsafe { chunk.base_addr.add(required_len) }, - offset: chunk.offset + required_len, - size: chunk.size - required_len, - }; - tracing::trace!( - "The allocation will leave a Free Chunk: {:?}", - &free_chunk - ); - self.free_list.push(free_chunk); - } - chunk.size = required_len; - let shm_buf = self.free_chunk_map_to_shmbuf(&chunk); - tracing::trace!("The allocated Chunk is ({:?})", &chunk); - tracing::trace!("Allocated Shared Memory Buffer: {:?}", &shm_buf); - self.busy_list.push(chunk); - Ok(shm_buf) - } - Some(c) => { - self.free_list.push(c); - let e = zerror!("SharedMemoryManager::alloc({}) cannot find any available chunk\nSharedMemoryManager::free_list = {:?}", len, self.free_list); - Err(e.into()) - } - None => { - let e = zerror!("SharedMemoryManager::alloc({}) cannot find any available chunk\nSharedMemoryManager::free_list = {:?}", len, self.free_list); - tracing::trace!("{}", e); - Err(e.into()) - } - } - } else { - let e = zerror!( "SharedMemoryManager does not have sufficient free memory to allocate {} bytes, try de-fragmenting!", len); - tracing::warn!("{}", e); - Err(e.into()) - } - } - - fn is_free_chunk(chunk: &Chunk) -> bool { - let rc_ptr = chunk.base_addr as *mut ChunkHeaderType; - let rc = unsafe { (*rc_ptr).load(Ordering::SeqCst) }; - rc == 0 - } - - fn try_merge_adjacent_chunks(a: &Chunk, b: &Chunk) -> Option { - let end_addr = unsafe { a.base_addr.add(a.size) }; - if end_addr == b.base_addr { - Some(Chunk { - base_addr: a.base_addr, - size: a.size + b.size, - offset: a.offset, - }) - } else { - None - } - } - // Returns the amount of memory that it was able to de-fragment - pub fn defragment(&mut self) -> usize { - if self.free_list.len() > 1 { - let mut fbs: Vec = self.free_list.drain().collect(); - fbs.sort_by(|x, y| x.offset.partial_cmp(&y.offset).unwrap()); - let mut current = fbs.remove(0); - let mut defrag_mem = 0; - let mut i = 0; - let n = fbs.len(); - for chunk in fbs.iter() { - i += 1; - let next = *chunk; - match SharedMemoryManager::try_merge_adjacent_chunks(¤t, &next) { - Some(c) => { - current = c; - defrag_mem += current.size; - if i == n { - self.free_list.push(current) - } - } - None => { - self.free_list.push(current); - if i == n { - self.free_list.push(next); - } else { - current = next; - } - } - } - } - defrag_mem - } else { - 0 - } - } - - /// Returns the amount of memory freed - pub fn garbage_collect(&mut self) -> usize { - tracing::trace!("Running Garbage Collector"); - - let mut freed = 0; - let (free, busy) = self - .busy_list - .iter() - .partition(|&c| SharedMemoryManager::is_free_chunk(c)); - self.busy_list = busy; - - for f in free { - freed += f.size; - tracing::trace!("Garbage Collecting Chunk: {:?}", f); - self.free_list.push(f) - } - self.available += freed; - freed - } -} - -impl fmt::Debug for SharedMemoryManager { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("SharedMemoryManager") - .field("segment_path", &self.segment_path) - .field("size", &self.size) - .field("available", &self.available) - .field("free_list.len", &self.free_list.len()) - .field("busy_list.len", &self.busy_list.len()) - .finish() - } -} - // Buffer impls // - SharedMemoryBuf impl AsRef<[u8]> for SharedMemoryBuf { @@ -509,7 +215,7 @@ impl AsRef<[u8]> for SharedMemoryBuf { impl AsMut<[u8]> for SharedMemoryBuf { fn as_mut(&mut self) -> &mut [u8] { - unsafe { self.as_mut_slice() } + unsafe { self.as_mut_slice_inner() } } } @@ -517,10 +223,12 @@ impl ZSliceBuffer for SharedMemoryBuf { fn as_slice(&self) -> &[u8] { self.as_ref() } - fn as_mut_slice(&mut self) -> &mut [u8] { - self.as_mut() - } + fn as_any(&self) -> &dyn Any { self } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } diff --git a/commons/zenoh-shm/src/posix_shm/array.rs b/commons/zenoh-shm/src/posix_shm/array.rs new file mode 100644 index 0000000000..d092c579b5 --- /dev/null +++ b/commons/zenoh-shm/src/posix_shm/array.rs @@ -0,0 +1,124 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{fmt::Display, marker::PhantomData, mem::size_of}; + +use num_traits::{AsPrimitive, PrimInt, Unsigned}; +use stabby::IStable; +use zenoh_result::{bail, ZResult}; + +use super::segment::Segment; + +/// An SHM segment that is intended to be an array of elements of some certain type +#[derive(Debug)] +pub struct ArrayInSHM +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ + inner: Segment, + _phantom: PhantomData<(Elem, ElemIndex)>, +} + +unsafe impl Sync for ArrayInSHM +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ +} +unsafe impl Send for ArrayInSHM +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ +} + +impl ArrayInSHM +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, + ElemIndex: Unsigned + PrimInt + 'static + AsPrimitive, + Elem: IStable, + isize: AsPrimitive, +{ + // Perform compile time check that Elem is not a ZST in such a way `elem_count` can not panic. + const _S: () = if size_of::() == 0 { + panic!("Elem is a ZST. ZSTs are not allowed as ArrayInSHM generic"); + }; + + pub fn create(elem_count: usize, file_prefix: &str) -> ZResult { + if elem_count == 0 { + bail!("Unable to create SHM array segment of 0 elements") + } + + let max: usize = ElemIndex::max_value().as_(); + if elem_count - 1 > max { + bail!("Unable to create SHM array segment of {elem_count} elements: out of range for ElemIndex!") + } + + let alloc_size = elem_count * size_of::(); + let inner = Segment::create(alloc_size, file_prefix)?; + Ok(Self { + inner, + _phantom: PhantomData, + }) + } + + pub fn open(id: ID, file_prefix: &str) -> ZResult { + let inner = Segment::open(id, file_prefix)?; + Ok(Self { + inner, + _phantom: PhantomData, + }) + } + + pub fn id(&self) -> ID { + self.inner.id() + } + + pub fn elem_count(&self) -> usize { + self.inner.len() / size_of::() + } + + /// # Safety + /// Retrieves const element by it's index. This is safe if the index doesn't go out of underlying array. + /// Additional assert to check the index validity is added for "test" feature + pub unsafe fn elem(&self, index: ElemIndex) -> *const Elem { + #[cfg(feature = "test")] + assert!(self.inner.len() > index.as_() * size_of::()); + (self.inner.as_ptr() as *const Elem).add(index.as_()) + } + + /// # Safety + /// Retrieves mut element by it's index. This is safe if the index doesn't go out of underlying array. + /// Additional assert to check the index validity is added for "test" feature + pub unsafe fn elem_mut(&self, index: ElemIndex) -> *mut Elem { + #[cfg(feature = "test")] + assert!(self.inner.len() > index.as_() * size_of::()); + (self.inner.as_ptr() as *mut Elem).add(index.as_()) + } + + /// # Safety + /// Calculates element's index. This is safe if the element belongs to underlying array. + /// Additional assert is added for "test" feature + pub unsafe fn index(&self, elem: *const Elem) -> ElemIndex { + let index = elem.offset_from(self.inner.as_ptr() as *const Elem); + #[cfg(feature = "test")] + { + assert!(index >= 0); + assert!(self.inner.len() > index as usize * size_of::()); + } + index.as_() + } +} diff --git a/commons/zenoh-shm/src/posix_shm/mod.rs b/commons/zenoh-shm/src/posix_shm/mod.rs new file mode 100644 index 0000000000..a63b1c9e6d --- /dev/null +++ b/commons/zenoh-shm/src/posix_shm/mod.rs @@ -0,0 +1,16 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod array; +tested_crate_module!(segment); diff --git a/commons/zenoh-shm/src/posix_shm/segment.rs b/commons/zenoh-shm/src/posix_shm/segment.rs new file mode 100644 index 0000000000..d987bad7a9 --- /dev/null +++ b/commons/zenoh-shm/src/posix_shm/segment.rs @@ -0,0 +1,127 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + fmt::{Debug, Display}, + mem::size_of, +}; + +use rand::Rng; +use shared_memory::{Shmem, ShmemConf, ShmemError}; +use zenoh_result::{bail, zerror, ZResult}; + +const SEGMENT_DEDICATE_TRIES: usize = 100; +const ECMA: crc::Crc = crc::Crc::::new(&crc::CRC_64_ECMA_182); + +/// Segment of shared memory identified by an ID +pub struct Segment +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ + shmem: Shmem, + id: ID, +} + +impl Debug for Segment +where + ID: Debug, + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Segment") + .field("shmem", &self.shmem.as_ptr()) + .field("id", &self.id) + .finish() + } +} + +impl Segment +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ + // Automatically generate free id and create a new segment identified by this id + pub fn create(alloc_size: usize, id_prefix: &str) -> ZResult { + for _ in 0..SEGMENT_DEDICATE_TRIES { + // Generate random id + let id: ID = rand::thread_rng().gen(); + + // Try to create a new segment identified by prefix and generated id. + // If creation fails because segment already exists for this id, + // the creation attempt will be repeated with another id + match ShmemConf::new() + .size(alloc_size + size_of::()) + .os_id(Self::os_id(id.clone(), id_prefix)) + .create() + { + Ok(shmem) => { + tracing::debug!( + "Created SHM segment, size: {alloc_size}, prefix: {id_prefix}, id: {id}" + ); + unsafe { *(shmem.as_ptr() as *mut usize) = alloc_size }; + return Ok(Segment { shmem, id }); + } + Err(ShmemError::LinkExists) => {} + Err(ShmemError::MappingIdExists) => {} + Err(e) => bail!("Unable to create POSIX shm segment: {}", e), + } + } + bail!("Unable to dedicate POSIX shm segment file after {SEGMENT_DEDICATE_TRIES} tries!"); + } + + // Open an existing segment identified by id + pub fn open(id: ID, id_prefix: &str) -> ZResult { + let shmem = ShmemConf::new() + .os_id(Self::os_id(id.clone(), id_prefix)) + .open() + .map_err(|e| { + zerror!( + "Error opening POSIX shm segment id {id}, prefix: {id_prefix}: {}", + e + ) + })?; + + if shmem.len() <= size_of::() { + bail!("SHM segment too small") + } + + tracing::debug!("Opened SHM segment, prefix: {id_prefix}, id: {id}"); + + Ok(Self { shmem, id }) + } + + fn os_id(id: ID, id_prefix: &str) -> String { + let os_id_str = format!("{id_prefix}_{id}"); + let crc_os_id_str = ECMA.checksum(os_id_str.as_bytes()); + format!("{:x}", crc_os_id_str) + } + + pub fn as_ptr(&self) -> *mut u8 { + unsafe { self.shmem.as_ptr().add(size_of::()) } + } + + pub fn len(&self) -> usize { + unsafe { *(self.shmem.as_ptr() as *mut usize) } + } + + pub fn is_empty(&self) -> bool { + unsafe { *(self.shmem.as_ptr() as *mut usize) == 0 } + } + + pub fn id(&self) -> ID { + self.id.clone() + } +} diff --git a/commons/zenoh-shm/src/reader.rs b/commons/zenoh-shm/src/reader.rs new file mode 100644 index 0000000000..c2ce2303a9 --- /dev/null +++ b/commons/zenoh-shm/src/reader.rs @@ -0,0 +1,147 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{collections::HashMap, ops::Deref, sync::Arc}; + +use zenoh_core::{bail, zerror}; +use zenoh_result::ZResult; + +use crate::{ + api::{ + client::shared_memory_segment::SharedMemorySegment, + client_storage::SharedMemoryClientStorage, + common::types::{ProtocolID, SegmentID}, + }, + header::subscription::GLOBAL_HEADER_SUBSCRIPTION, + watchdog::confirmator::GLOBAL_CONFIRMATOR, + SharedMemoryBuf, SharedMemoryBufInfo, +}; + +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct SharedMemoryReader { + client_storage: Arc, +} + +impl Deref for SharedMemoryReader { + type Target = SharedMemoryClientStorage; + + fn deref(&self) -> &Self::Target { + &self.client_storage + } +} + +impl SharedMemoryReader { + pub fn new(client_storage: Arc) -> Self { + Self { client_storage } + } + + pub fn read_shmbuf(&self, info: &SharedMemoryBufInfo) -> ZResult { + // Read does not increment the reference count as it is assumed + // that the sender of this buffer has incremented it for us. + + // attach to the watchdog before doing other things + let watchdog = Arc::new(GLOBAL_CONFIRMATOR.add(&info.watchdog_descriptor)?); + + let segment = self.ensure_segment(info)?; + let shmb = SharedMemoryBuf { + header: GLOBAL_HEADER_SUBSCRIPTION.link(&info.header_descriptor)?, + buf: segment.map(info.data_descriptor.chunk)?, + info: info.clone(), + watchdog, + }; + + // Validate buffer + match shmb.is_valid() { + true => Ok(shmb), + false => bail!("Buffer is invalidated"), + } + } + + fn ensure_segment(&self, info: &SharedMemoryBufInfo) -> ZResult> { + let id = GlobalDataSegmentID::new(info.shm_protocol, info.data_descriptor.segment); + + // fastest path: try to get access to already mounted SHM segment + // read lock allows concurrent execution of multiple requests + let r_guard = self.segments.read().unwrap(); + if let Some(val) = r_guard.get(&id) { + return Ok(val.clone()); + } + // fastest path failed: need to mount a new segment + + // drop read lock because we're gonna obtain write lock further + drop(r_guard); + + // find appropriate client + let client = self + .clients + .get_clients() + .get(&id.protocol) + .ok_or_else(|| zerror!("Unsupported SHM protocol: {}", id.protocol))?; + + // obtain write lock... + let mut w_guard = self.segments.write().unwrap(); + + // many concurrent threads may be racing for mounting this particular segment, so we must check again if the segment exists + match w_guard.entry(id) { + // (rare case) segment already mounted + std::collections::hash_map::Entry::Occupied(occupied) => Ok(occupied.get().clone()), + + // (common case) mount a new segment and add it to the map + std::collections::hash_map::Entry::Vacant(vacant) => { + let new_segment = client.attach(info.data_descriptor.segment)?; + Ok(vacant.insert(new_segment).clone()) + } + } + } +} + +#[derive(Debug)] +pub(crate) struct ClientStorage +where + Inner: Sized, +{ + clients: HashMap, +} + +impl ClientStorage { + pub(crate) fn new(clients: HashMap) -> Self { + Self { clients } + } + + pub(crate) fn get_clients(&self) -> &HashMap { + &self.clients + } +} + +/// # Safety +/// Only immutable access to internal container is allowed, +/// so we are Send if the contained type is Send +unsafe impl Send for ClientStorage {} + +/// # Safety +/// Only immutable access to internal container is allowed, +/// so we are Sync if the contained type is Sync +unsafe impl Sync for ClientStorage {} + +#[derive(Debug, PartialEq, Eq, Hash)] +pub(crate) struct GlobalDataSegmentID { + protocol: ProtocolID, + segment: SegmentID, +} + +impl GlobalDataSegmentID { + fn new(protocol: ProtocolID, segment: SegmentID) -> Self { + Self { protocol, segment } + } +} diff --git a/commons/zenoh-shm/src/watchdog/allocated_watchdog.rs b/commons/zenoh-shm/src/watchdog/allocated_watchdog.rs new file mode 100644 index 0000000000..45917d5bdc --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/allocated_watchdog.rs @@ -0,0 +1,35 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use super::{descriptor::OwnedDescriptor, storage::GLOBAL_STORAGE, validator::GLOBAL_VALIDATOR}; + +#[derive(Debug)] +pub struct AllocatedWatchdog { + pub descriptor: OwnedDescriptor, +} + +impl AllocatedWatchdog { + pub(crate) fn new(descriptor: OwnedDescriptor) -> Self { + // reset descriptor on allocation + descriptor.validate(); + Self { descriptor } + } +} + +impl Drop for AllocatedWatchdog { + fn drop(&mut self) { + GLOBAL_VALIDATOR.remove(self.descriptor.clone()); + GLOBAL_STORAGE.free_watchdog(self.descriptor.clone()); + } +} diff --git a/commons/zenoh-shm/src/watchdog/confirmator.rs b/commons/zenoh-shm/src/watchdog/confirmator.rs new file mode 100644 index 0000000000..54c2d233dc --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/confirmator.rs @@ -0,0 +1,192 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + collections::BTreeMap, + sync::{Arc, RwLock}, + time::Duration, +}; + +use lazy_static::lazy_static; +use zenoh_result::{zerror, ZResult}; + +use super::{ + periodic_task::PeriodicTask, + descriptor::{Descriptor, OwnedDescriptor, SegmentID}, + segment::Segment, +}; + +lazy_static! { + pub static ref GLOBAL_CONFIRMATOR: WatchdogConfirmator = + WatchdogConfirmator::new(Duration::from_millis(50)); +} + +pub struct ConfirmedDescriptor { + pub owned: OwnedDescriptor, + confirmed: Arc, +} + +impl Drop for ConfirmedDescriptor { + fn drop(&mut self) { + self.confirmed.remove(self.owned.clone()); + } +} + +impl ConfirmedDescriptor { + fn new(owned: OwnedDescriptor, confirmed: Arc) -> Self { + owned.confirm(); + confirmed.add(owned.clone()); + Self { owned, confirmed } + } +} + +#[derive(PartialEq)] +enum Transaction { + Add, + Remove, +} + +struct ConfirmedSegment { + segment: Arc, + transactions: lockfree::queue::Queue<(Transaction, OwnedDescriptor)>, +} + +impl ConfirmedSegment { + fn new(segment: Arc) -> Self { + Self { + segment, + transactions: lockfree::queue::Queue::default(), + } + } + + fn add(&self, descriptor: OwnedDescriptor) { + self.transactions.push((Transaction::Add, descriptor)); + } + + fn remove(&self, descriptor: OwnedDescriptor) { + self.transactions.push((Transaction::Remove, descriptor)); + } + + fn collect_transactions(&self, watchdogs: &mut BTreeMap) { + while let Some((transaction, descriptor)) = self.transactions.pop() { + // collect transactions + match watchdogs.entry(descriptor) { + std::collections::btree_map::Entry::Vacant(vacant) => { + #[cfg(feature = "test")] + assert!(transaction == Transaction::Add); + vacant.insert(1); + } + std::collections::btree_map::Entry::Occupied(mut occupied) => match transaction { + Transaction::Add => { + *occupied.get_mut() += 1; + } + Transaction::Remove => { + if *occupied.get() == 1 { + occupied.remove(); + } else { + *occupied.get_mut() -= 1; + } + } + }, + } + } + } +} +unsafe impl Send for ConfirmedSegment {} +unsafe impl Sync for ConfirmedSegment {} + +// TODO: optimize confirmation by packing descriptors AND linked table together +// TODO: think about linked table cleanup +pub struct WatchdogConfirmator { + confirmed: RwLock>>, + segment_transactions: Arc>>, + _task: PeriodicTask, +} + +impl WatchdogConfirmator { + fn new(interval: Duration) -> Self { + let segment_transactions = Arc::>>::default(); + + let c_segment_transactions = segment_transactions.clone(); + let mut segments: Vec<(Arc, BTreeMap)> = vec![]; + let task = PeriodicTask::new("Watchdog Confirmator".to_owned(), interval, move || { + // add new segments + while let Some(new_segment) = c_segment_transactions.as_ref().pop() { + segments.push((new_segment, BTreeMap::default())); + } + + // collect all existing transactions + for (segment, watchdogs) in &mut segments { + segment.collect_transactions(watchdogs); + } + + // confirm all tracked watchdogs + for (_, watchdogs) in &segments { + for watchdog in watchdogs { + watchdog.0.confirm(); + } + } + }); + + Self { + confirmed: RwLock::default(), + segment_transactions, + _task: task, + } + } + + pub fn add_owned(&self, descriptor: &OwnedDescriptor) -> ZResult { + self.add(&Descriptor::from(descriptor)) + } + + pub fn add(&self, descriptor: &Descriptor) -> ZResult { + let guard = self.confirmed.read().map_err(|e| zerror!("{e}"))?; + if let Some(segment) = guard.get(&descriptor.id) { + return self.link(descriptor, segment); + } + drop(guard); + + let segment = Arc::new(Segment::open(descriptor.id)?); + let confirmed_segment = Arc::new(ConfirmedSegment::new(segment)); + let confirmed_descriptoir = self.link(descriptor, &confirmed_segment); + + let mut guard = self.confirmed.write().map_err(|e| zerror!("{e}"))?; + match guard.entry(descriptor.id) { + std::collections::btree_map::Entry::Vacant(vacant) => { + vacant.insert(confirmed_segment.clone()); + self.segment_transactions.push(confirmed_segment); + confirmed_descriptoir + } + std::collections::btree_map::Entry::Occupied(occupied) => { + self.link(descriptor, occupied.get()) + } + } + } + + fn link( + &self, + descriptor: &Descriptor, + segment: &Arc, + ) -> ZResult { + let index = descriptor.index_and_bitpos >> 6; + let bitpos = descriptor.index_and_bitpos & 0x3f; + + let atomic = unsafe { segment.segment.array.elem(index) }; + let mask = 1u64 << bitpos; + + let owned = OwnedDescriptor::new(segment.segment.clone(), atomic, mask); + let confirmed = ConfirmedDescriptor::new(owned, segment.clone()); + Ok(confirmed) + } +} diff --git a/commons/zenoh-shm/src/watchdog/descriptor.rs b/commons/zenoh-shm/src/watchdog/descriptor.rs new file mode 100644 index 0000000000..38fddd61e8 --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/descriptor.rs @@ -0,0 +1,116 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + hash::Hash, + sync::{atomic::AtomicU64, Arc}, +}; + +use super::segment::Segment; + +pub type SegmentID = u32; + +#[derive(Clone, Eq, Hash, PartialEq, PartialOrd, Ord, Debug)] +pub struct Descriptor { + pub id: SegmentID, + pub index_and_bitpos: u32, +} + +impl From<&OwnedDescriptor> for Descriptor { + fn from(item: &OwnedDescriptor) -> Self { + let bitpos = { + // TODO: can be optimized + let mut v = item.mask; + let mut bitpos = 0u32; + while v > 1 { + bitpos += 1; + v >>= 1; + } + bitpos + }; + let index = unsafe { item.segment.array.index(item.atomic) }; + let index_and_bitpos = (index << 6) | bitpos; + Descriptor { + id: item.segment.array.id(), + index_and_bitpos, + } + } +} + +#[derive(Clone, Debug)] +pub struct OwnedDescriptor { + segment: Arc, + pub atomic: *const AtomicU64, + pub mask: u64, +} + +unsafe impl Send for OwnedDescriptor {} +unsafe impl Sync for OwnedDescriptor {} + +impl Hash for OwnedDescriptor { + fn hash(&self, state: &mut H) { + self.atomic.hash(state); + self.mask.hash(state); + } +} + +impl OwnedDescriptor { + pub(crate) fn new(segment: Arc, atomic: *const AtomicU64, mask: u64) -> Self { + Self { + segment, + atomic, + mask, + } + } + + pub fn confirm(&self) { + unsafe { + (*self.atomic).fetch_or(self.mask, std::sync::atomic::Ordering::SeqCst); + }; + } + + pub(crate) fn validate(&self) -> u64 { + unsafe { + (*self.atomic).fetch_and(!self.mask, std::sync::atomic::Ordering::SeqCst) & self.mask + } + } + + #[cfg(feature = "test")] + pub fn test_validate(&self) -> u64 { + self.validate() + } +} + +impl Ord for OwnedDescriptor { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + match self.atomic.cmp(&other.atomic) { + core::cmp::Ordering::Equal => {} + ord => return ord, + } + self.mask.cmp(&other.mask) + } +} + +impl PartialOrd for OwnedDescriptor { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for OwnedDescriptor { + fn eq(&self, other: &Self) -> bool { + self.atomic == other.atomic && self.mask == other.mask + } +} +impl Eq for OwnedDescriptor {} diff --git a/commons/zenoh-shm/src/watchdog/mod.rs b/commons/zenoh-shm/src/watchdog/mod.rs new file mode 100644 index 0000000000..55267a5442 --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/mod.rs @@ -0,0 +1,24 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod descriptor; + +tested_crate_module!(periodic_task); +tested_crate_module!(storage); +tested_crate_module!(validator); +tested_crate_module!(confirmator); + +pub(crate) mod allocated_watchdog; + +mod segment; diff --git a/commons/zenoh-shm/src/watchdog/periodic_task.rs b/commons/zenoh-shm/src/watchdog/periodic_task.rs new file mode 100644 index 0000000000..98cf8fbba7 --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/periodic_task.rs @@ -0,0 +1,100 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; + +use thread_priority::ThreadBuilder; +#[cfg(unix)] +use thread_priority::{ + set_current_thread_priority, RealtimeThreadSchedulePolicy, ThreadPriority, ThreadPriorityValue, ThreadSchedulePolicy::Realtime +}; + +pub struct PeriodicTask { + running: Arc, +} + +impl Drop for PeriodicTask { + fn drop(&mut self) { + self.running.store(false, Ordering::Relaxed) + } +} + +impl PeriodicTask { + pub fn new(name: String, interval: Duration, mut f: F) -> Self + where + F: FnMut() + Send + 'static, + { + let running = Arc::new(AtomicBool::new(true)); + + let c_running = running.clone(); + + #[cfg(unix)] + let builder = ThreadBuilder::default() + .name(name) + .policy(Realtime(RealtimeThreadSchedulePolicy::Fifo)) + .priority(ThreadPriority::Min); + + // TODO: deal with windows realtime scheduling + #[cfg(windows)] + let builder = ThreadBuilder::default().name(name); + + let _ = builder.spawn(move |result| { + if let Err(e) = result { + #[cfg(windows)] + tracing::warn!("{:?}: error setting scheduling priority for thread: {:?}, will run with the default one...", std::thread::current().name(), e); + #[cfg(unix)] + { + tracing::warn!("{:?}: error setting realtime FIFO scheduling policy for thread: {:?}, will run with the default one...", std::thread::current().name(), e); + for priotity in (ThreadPriorityValue::MIN..ThreadPriorityValue::MAX).rev() { + if let Ok(p) = priotity.try_into() { + if set_current_thread_priority(ThreadPriority::Crossplatform(p)).is_ok() { + tracing::warn!("{:?}: will use priority {}", std::thread::current().name(), priotity); + break; + } + } + } + } + } + + //TODO: need mlock here! + + while c_running.load(Ordering::Relaxed) { + let cycle_start = std::time::Instant::now(); + + f(); + + // sleep for next iteration + let elapsed = cycle_start.elapsed(); + if elapsed < interval { + let sleep_interval = interval - elapsed; + std::thread::sleep(sleep_interval); + } else { + let err = format!("{:?}: timer overrun", std::thread::current().name()); + #[cfg(not(feature = "test"))] + tracing::error!("{err}"); + #[cfg(feature = "test")] + panic!("{err}"); + } + } + }); + + Self { running } + } +} diff --git a/commons/zenoh-shm/src/watchdog/segment.rs b/commons/zenoh-shm/src/watchdog/segment.rs new file mode 100644 index 0000000000..b4a273c01c --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/segment.rs @@ -0,0 +1,41 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::atomic::AtomicU64; + +use zenoh_result::ZResult; + +use crate::posix_shm::array::ArrayInSHM; + +use super::descriptor::SegmentID; + +const WATCHDOG_SEGMENT_PREFIX: &str = "watchdog"; + +#[derive(Debug)] +pub struct Segment { + pub array: ArrayInSHM, +} + +impl Segment { + pub fn create(watchdog_count: usize) -> ZResult { + let elem_count = (watchdog_count + 63) / 64; + let array = ArrayInSHM::create(elem_count, WATCHDOG_SEGMENT_PREFIX)?; + Ok(Self { array }) + } + + pub fn open(id: SegmentID) -> ZResult { + let array = ArrayInSHM::open(id, WATCHDOG_SEGMENT_PREFIX)?; + Ok(Self { array }) + } +} diff --git a/commons/zenoh-shm/src/watchdog/storage.rs b/commons/zenoh-shm/src/watchdog/storage.rs new file mode 100644 index 0000000000..5744a273a0 --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/storage.rs @@ -0,0 +1,76 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use lazy_static::lazy_static; +use std::{ + collections::BTreeSet, + sync::{Arc, Mutex}, +}; + +use zenoh_result::{zerror, ZResult}; + +use super::{allocated_watchdog::AllocatedWatchdog, descriptor::OwnedDescriptor, segment::Segment}; + +lazy_static! { + pub static ref GLOBAL_STORAGE: WatchdogStorage = WatchdogStorage::new(32768usize).unwrap(); +} + +pub struct WatchdogStorage { + available: Arc>>, +} + +// TODO: expand and shrink Storage when needed +// OR +// support multiple descriptor assignment (allow multiple buffers to be assigned to the same watchdog) +impl WatchdogStorage { + pub fn new(initial_watchdog_count: usize) -> ZResult { + let segment = Arc::new(Segment::create(initial_watchdog_count)?); + + let mut initially_available = BTreeSet::default(); + let subsegments = segment.array.elem_count(); + for subsegment in 0..subsegments { + let atomic = unsafe { segment.array.elem(subsegment as u32) }; + + for bit in 0..64 { + let mask = 1u64 << bit; + let descriptor = OwnedDescriptor::new(segment.clone(), atomic, mask); + let _new_insert = initially_available.insert(descriptor); + #[cfg(feature = "test")] + assert!(_new_insert); + } + } + + Ok(Self { + available: Arc::new(Mutex::new(initially_available)), + }) + } + + pub fn allocate_watchdog(&self) -> ZResult { + let mut guard = self.available.lock().map_err(|e| zerror!("{e}"))?; + let popped = guard.pop_first(); + drop(guard); + + let allocated = + AllocatedWatchdog::new(popped.ok_or_else(|| zerror!("no free watchdogs available"))?); + + Ok(allocated) + } + + pub(crate) fn free_watchdog(&self, descriptor: OwnedDescriptor) { + if let Ok(mut guard) = self.available.lock() { + let _new_insert = guard.insert(descriptor); + #[cfg(feature = "test")] + assert!(_new_insert); + } + } +} diff --git a/commons/zenoh-shm/src/watchdog/validator.rs b/commons/zenoh-shm/src/watchdog/validator.rs new file mode 100644 index 0000000000..d28dfa8e3c --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/validator.rs @@ -0,0 +1,102 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{collections::BTreeMap, sync::Arc, time::Duration}; + +use lazy_static::lazy_static; + +use super::{descriptor::OwnedDescriptor, periodic_task::PeriodicTask}; + +pub(super) type InvalidateCallback = Box; + +lazy_static! { + pub static ref GLOBAL_VALIDATOR: WatchdogValidator = + WatchdogValidator::new(Duration::from_millis(100)); +} + +enum Transaction { + Add(InvalidateCallback), + Remove, +} + +#[derive(Default)] +struct ValidatedStorage { + transactions: lockfree::queue::Queue<(Transaction, OwnedDescriptor)>, +} + +impl ValidatedStorage { + fn add(&self, descriptor: OwnedDescriptor, on_invalidated: InvalidateCallback) { + self.transactions + .push((Transaction::Add(on_invalidated), descriptor)); + } + + fn remove(&self, descriptor: OwnedDescriptor) { + self.transactions.push((Transaction::Remove, descriptor)); + } + + fn collect_transactions(&self, storage: &mut BTreeMap) { + while let Some((transaction, descriptor)) = self.transactions.pop() { + match transaction { + Transaction::Add(on_invalidated) => { + let _old = storage.insert(descriptor, on_invalidated); + #[cfg(feature = "test")] + assert!(_old.is_none()); + } + Transaction::Remove => { + let _ = storage.remove(&descriptor); + } + } + } + } +} + +// TODO: optimize validation by packing descriptors +pub struct WatchdogValidator { + storage: Arc, + _task: PeriodicTask, +} + +impl WatchdogValidator { + pub fn new(interval: Duration) -> Self { + let storage = Arc::new(ValidatedStorage::default()); + + let c_storage = storage.clone(); + let mut watchdogs = BTreeMap::default(); + let task = PeriodicTask::new("Watchdog Validator".to_owned(), interval, move || { + c_storage.collect_transactions(&mut watchdogs); + + watchdogs.retain(|watchdog, on_invalidated| { + let old_val = watchdog.validate(); + if old_val == 0 { + on_invalidated(); + return false; + } + true + }); + }); + + Self { + storage, + _task: task, + } + } + + pub fn add(&self, watchdog: OwnedDescriptor, on_invalidated: InvalidateCallback) { + self.storage.add(watchdog, on_invalidated); + } + + pub fn remove(&self, watchdog: OwnedDescriptor) { + self.storage.remove(watchdog); + } +} diff --git a/commons/zenoh-shm/tests/common/mod.rs b/commons/zenoh-shm/tests/common/mod.rs new file mode 100644 index 0000000000..a97773f686 --- /dev/null +++ b/commons/zenoh-shm/tests/common/mod.rs @@ -0,0 +1,105 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + sync::{atomic::AtomicBool, Arc}, + thread::JoinHandle, +}; + +use zenoh_result::ZResult; + +pub const TEST_SEGMENT_PREFIX: &str = "test"; + +pub fn validate_memory(mem1: &mut [u8], mem2: &[u8]) { + assert!(mem1.len() == mem2.len()); + for cycle in 0..255u8 { + // sequentially fill segment1 with values checking segment2 having these changes + for i in 0..mem1.len() { + mem1[i] = cycle; + assert!(mem2[i] == cycle); + } + + // check the whole segment2 having proper values + for i in mem2 { + assert!(*i == cycle); + } + } +} + +pub fn execute_concurrent(concurrent_tasks: usize, iterations: usize, task_fun: TaskFun) +where + TaskFun: Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static, +{ + let mut tasks = vec![]; + for task_index in 0..concurrent_tasks { + let c_task_fun = task_fun.clone(); + let task_handle = std::thread::spawn(move || { + for iteration in 0..iterations { + if let Err(e) = c_task_fun(task_index, iteration) { + panic!("task {task_index}: iteration {iteration}: {e}") + } + } + }); + tasks.push(task_handle); + } + for task in tasks { + task.join().expect("Error joining thread!"); + } +} + +pub fn load_fn( + working: Arc, +) -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + move |_task_index: usize, _iteration: usize| -> ZResult<()> { + while working.load(std::sync::atomic::Ordering::SeqCst) {} + Ok(()) + } +} + +pub struct CpuLoad { + handle: Option>, + flag: Arc, +} + +impl Drop for CpuLoad { + fn drop(&mut self) { + self.flag.store(false, std::sync::atomic::Ordering::SeqCst); + let _ = self.handle.take().unwrap().join(); + } +} + +impl CpuLoad { + pub fn exessive() -> Self { + Self::new(1000) + } + + pub fn optimal_high() -> Self { + Self::new(num_cpus::get()) + } + + pub fn low() -> Self { + Self::new(1) + } + + fn new(thread_count: usize) -> Self { + let flag = Arc::new(AtomicBool::new(true)); + + let c_flag = flag.clone(); + let handle = Some(std::thread::spawn(move || { + execute_concurrent(thread_count, 1, load_fn(c_flag)); + })); + + Self { handle, flag } + } +} diff --git a/commons/zenoh-shm/tests/header.rs b/commons/zenoh-shm/tests/header.rs new file mode 100644 index 0000000000..a734abf108 --- /dev/null +++ b/commons/zenoh-shm/tests/header.rs @@ -0,0 +1,130 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::atomic::Ordering::Relaxed; + +use rand::Rng; +use zenoh_result::ZResult; +use zenoh_shm::header::{ + descriptor::HeaderDescriptor, storage::GLOBAL_HEADER_STORAGE, + subscription::GLOBAL_HEADER_SUBSCRIPTION, +}; + +pub mod common; +use common::execute_concurrent; + +fn header_alloc_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let _allocated_header = GLOBAL_HEADER_STORAGE.allocate_header()?; + Ok(()) + } +} + +#[test] +fn header_alloc() { + execute_concurrent(1, 1000, header_alloc_fn()); +} + +#[test] +fn header_alloc_concurrent() { + execute_concurrent(100, 1000, header_alloc_fn()); +} + +fn header_link_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| { + let allocated_header = GLOBAL_HEADER_STORAGE.allocate_header()?; + let descr = HeaderDescriptor::from(&allocated_header.descriptor); + let _linked_header = GLOBAL_HEADER_SUBSCRIPTION.link(&descr)?; + Ok(()) + } +} + +#[test] +fn header_link() { + execute_concurrent(1, 1000, header_link_fn()); +} + +#[test] +fn header_link_concurrent() { + execute_concurrent(100, 1000, header_link_fn()); +} + +fn header_link_failure_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static +{ + |_task_index: usize, _iteration: usize| { + let allocated_header = GLOBAL_HEADER_STORAGE.allocate_header()?; + let descr = HeaderDescriptor::from(&allocated_header.descriptor); + drop(allocated_header); + + // Some comments on this behaviour... + // Even though the allocated_header is dropped, it's SHM segment still exists in GLOBAL_HEADER_STORAGE, + // so there is no way to detect that header is "deallocated" and the code below succeeds. The invalidation + // funcionality is implemented on higher level by means of generation mechanism and protects from both header + // and watchdog link-to-deallocated issues. This generation mechanism depends on the behaviour below, so + // everything is fair :) + let _linked_header = GLOBAL_HEADER_SUBSCRIPTION.link(&descr)?; + Ok(()) + } +} + +#[test] +fn header_link_failure() { + execute_concurrent(1, 1000, header_link_failure_fn()); +} + +#[test] +fn header_link_failure_concurrent() { + execute_concurrent(100, 1000, header_link_failure_fn()); +} + +fn header_check_memory_fn(parallel_tasks: usize, iterations: usize) { + let task_fun = |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated_header = GLOBAL_HEADER_STORAGE.allocate_header()?; + let descr = HeaderDescriptor::from(&allocated_header.descriptor); + let linked_header = GLOBAL_HEADER_SUBSCRIPTION.link(&descr)?; + + let mut rng = rand::thread_rng(); + let allocated = allocated_header.descriptor.header(); + let linked = linked_header.header(); + for _ in 0..100 { + let gen = rng.gen(); + allocated.generation.store(gen, Relaxed); + assert_eq!(gen, linked.generation.load(Relaxed)); + + let rc = rng.gen(); + allocated.refcount.store(rc, Relaxed); + assert_eq!(rc, linked.refcount.load(Relaxed)); + + let watchdog_inv = rng.gen(); + allocated.watchdog_invalidated.store(watchdog_inv, Relaxed); + assert_eq!(watchdog_inv, linked.watchdog_invalidated.load(Relaxed)); + + assert_eq!(gen, linked.generation.load(Relaxed)); + assert_eq!(rc, linked.refcount.load(Relaxed)); + assert_eq!(watchdog_inv, linked.watchdog_invalidated.load(Relaxed)); + } + Ok(()) + }; + execute_concurrent(parallel_tasks, iterations, task_fun); +} + +#[test] +fn header_check_memory() { + header_check_memory_fn(1, 1000); +} + +#[test] +fn header_check_memory_concurrent() { + header_check_memory_fn(100, 100); +} diff --git a/commons/zenoh-shm/tests/periodic_task.rs b/commons/zenoh-shm/tests/periodic_task.rs new file mode 100644 index 0000000000..dcfd560d7d --- /dev/null +++ b/commons/zenoh-shm/tests/periodic_task.rs @@ -0,0 +1,172 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + sync::{Arc, Mutex}, + time::{Duration, Instant}, +}; + +use zenoh_shm::watchdog::periodic_task::PeriodicTask; + +pub mod common; +use common::CpuLoad; + +const TASK_PERIOD: Duration = Duration::from_millis(50); +const TASK_DELTA: Duration = Duration::from_millis(5); +const TEST_TASK: Duration = Duration::from_millis(10); + +fn intensive_payload(duration: Duration) -> impl Fn() + Send + 'static { + move || { + let start = Instant::now(); + while start.elapsed() < duration { + for _i in 0..100 {} + } + } +} + +fn blocking_payload(duration: Duration) -> impl Fn() + Send + 'static { + move || { + std::thread::sleep(duration); + } +} + +fn check_duration(duration: &Duration) { + let min = TASK_PERIOD - TASK_DELTA; + let max = TASK_PERIOD + TASK_DELTA; + + assert!(min <= *duration && *duration <= max); +} + +fn make_task(task_payload: F) -> (PeriodicTask, Arc>>) +where + F: Fn() + Send + 'static, +{ + let intervals = Arc::new(Mutex::new(vec![])); + + let c_intervals = intervals.clone(); + let mut start: Option = None; + let task = PeriodicTask::new("test".to_owned(), TASK_PERIOD, move || { + if let Some(val) = &start { + let elapsed = val.elapsed(); + c_intervals.lock().unwrap().push(elapsed); + } + start = Some(Instant::now()); + task_payload(); + }); + + (task, intervals) +} + +#[test] +#[ignore] +fn periodic_task_create() { + let (_task, _intervals) = make_task(|| {}); +} + +fn check_task(task_payload: F) +where + F: Fn() + Send + 'static, +{ + let n = 100; + let (task, intervals) = make_task(task_payload); + + std::thread::sleep(TASK_PERIOD * n); + drop(task); + + let guard = intervals.lock().unwrap(); + for duration in &*guard { + check_duration(duration); + } +} + +#[test] +#[ignore] +fn periodic_task_lightweight() { + check_task(|| {}); +} + +#[test] +#[ignore] +fn periodic_task_blocking() { + check_task(blocking_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_intensive() { + check_task(intensive_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_low_load_lightweight() { + let _load = CpuLoad::low(); + check_task(|| {}); +} + +#[test] +#[ignore] +fn periodic_task_low_load_blocking() { + let _load = CpuLoad::low(); + check_task(blocking_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_low_load_intensive() { + let _load = CpuLoad::low(); + check_task(intensive_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_optimal_high_load_lightweight() { + let _load = CpuLoad::optimal_high(); + check_task(|| {}); +} + +#[test] +#[ignore] +fn periodic_task_optimal_high_load_blocking() { + let _load = CpuLoad::optimal_high(); + check_task(blocking_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_optimal_high_load_intensive() { + let _load = CpuLoad::optimal_high(); + check_task(intensive_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_exessive_load_lightweight() { + let _load = CpuLoad::exessive(); + check_task(|| {}); +} + +#[test] +#[ignore] +fn periodic_task_exessive_load_blocking() { + let _load = CpuLoad::exessive(); + check_task(blocking_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_exessive_load_intensive() { + let _load = CpuLoad::exessive(); + check_task(intensive_payload(TEST_TASK)); +} diff --git a/commons/zenoh-shm/tests/posix_array.rs b/commons/zenoh-shm/tests/posix_array.rs new file mode 100644 index 0000000000..562102ea17 --- /dev/null +++ b/commons/zenoh-shm/tests/posix_array.rs @@ -0,0 +1,161 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{fmt::Debug, mem::size_of}; + +use num_traits::{AsPrimitive, PrimInt, Unsigned}; +use zenoh_shm::posix_shm::array::ArrayInSHM; + +pub mod common; +use common::TEST_SEGMENT_PREFIX; + +type TestSegmentID = u32; + +#[derive(Debug)] +#[stabby::stabby] +struct TestElem { + value: u32, +} + +impl TestElem { + fn fill(&mut self, counter: &mut u32) { + self.value = *counter; + *counter += 1; + } + + fn validate(&self, counter: &mut u32) { + assert_eq!(self.value, *counter); + *counter += 1; + } +} + +fn validate_array( + array1: &mut ArrayInSHM, + array2: &ArrayInSHM, + expected_elem_count: usize, +) where + ElemIndex: Unsigned + PrimInt + 'static + AsPrimitive, + isize: AsPrimitive, + usize: AsPrimitive, +{ + assert!(array1.elem_count() == expected_elem_count); + assert!(array2.elem_count() == expected_elem_count); + + let mut fill_ctr = 0; + let mut validate_ctr = 0; + + // first of all, fill and validate elements sequentially + for i in 0..array1.elem_count() { + unsafe { + let elem1 = &mut *array1.elem_mut(i.as_()); + let elem2 = &*array2.elem(i.as_()); + + elem1.fill(&mut fill_ctr); + elem2.validate(&mut validate_ctr); + } + } + + // then fill all the elements... + for i in 0..array1.elem_count() { + unsafe { + let elem1 = &mut *array1.elem_mut(i.as_()); + elem1.fill(&mut fill_ctr); + } + } + + // ...and validate all the elements + for i in 0..array2.elem_count() { + unsafe { + let elem2 = &*array2.elem(i.as_()); + elem2.validate(&mut validate_ctr); + } + } +} + +fn test_array() +where + ElemIndex: Unsigned + PrimInt + 'static + AsPrimitive, + isize: AsPrimitive, + usize: AsPrimitive, +{ + // Estimate elem count to test + // NOTE: for index sizes <= 16 bit we use the whole index range to test, + // and for bigger indexes we use limited index range + let elem_count = { + match size_of::() > size_of::() { + true => 100, + false => ElemIndex::max_value().as_() + 1, + } + }; + + let mut new_arr: ArrayInSHM = + ArrayInSHM::create(elem_count, TEST_SEGMENT_PREFIX).expect("error creating new array!"); + + let opened_arr: ArrayInSHM<_, TestElem, ElemIndex> = + ArrayInSHM::open(new_arr.id(), TEST_SEGMENT_PREFIX).expect("error opening existing array!"); + + validate_array(&mut new_arr, &opened_arr, elem_count); +} + +/// MEMORY CHECKS /// + +#[test] +fn arr_u8_index_memory_test() { + test_array::(); +} + +#[test] +fn arr_u16_index_memory_test() { + test_array::(); +} + +#[test] +fn arr_u32_index_memory_test() { + test_array::(); +} + +/// ELEM COUNT CHECKS /// + +fn test_invalid_elem_index() +where + ElemIndex: Unsigned + PrimInt + 'static + AsPrimitive + Debug, + isize: AsPrimitive, + usize: AsPrimitive, +{ + let invalid_elem_count = ElemIndex::max_value().as_() + 2; + + let _ = ArrayInSHM::::create( + invalid_elem_count, + TEST_SEGMENT_PREFIX, + ) + .expect_err( + format!("must fail: element count {invalid_elem_count} is out of range for ElemIndex!") + .as_str(), + ); +} + +#[test] +fn arr_u8_index_invalid_elem_count() { + test_invalid_elem_index::(); +} + +#[test] +fn arr_u16_index_invalid_elem_count() { + test_invalid_elem_index::(); +} + +#[test] +fn arr_u32_index_invalid_elem_count() { + test_invalid_elem_index::(); +} diff --git a/commons/zenoh-shm/tests/posix_segment.rs b/commons/zenoh-shm/tests/posix_segment.rs new file mode 100644 index 0000000000..907f70cc4e --- /dev/null +++ b/commons/zenoh-shm/tests/posix_segment.rs @@ -0,0 +1,136 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{fmt::Display, slice}; + +use zenoh_shm::posix_shm::segment::Segment; + +pub mod common; +use common::{validate_memory, TEST_SEGMENT_PREFIX}; + +fn validate_segment(segment1: &Segment, segment2: &Segment) +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ + assert!(segment1.len() == segment2.len()); + + let ptr1 = segment1.as_ptr(); + let ptr2 = segment2.as_ptr(); + + let slice1 = unsafe { slice::from_raw_parts_mut(ptr1, segment1.len()) }; + let slice2 = unsafe { slice::from_raw_parts(ptr2, segment2.len()) }; + + validate_memory(slice1, slice2); +} + +fn test_segment() +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Copy + Clone + Display, +{ + let new_segment: Segment = + Segment::create(900, TEST_SEGMENT_PREFIX).expect("error creating new segment"); + + let opened_segment_instance_1 = Segment::open(new_segment.id(), TEST_SEGMENT_PREFIX) + .expect("error opening existing segment!"); + + validate_segment(&new_segment, &opened_segment_instance_1); + + let opened_segment_instance_2 = Segment::open(new_segment.id(), TEST_SEGMENT_PREFIX) + .expect("error opening existing segment!"); + + validate_segment(&new_segment, &opened_segment_instance_1); + validate_segment(&new_segment, &opened_segment_instance_2); + + drop(opened_segment_instance_1); + validate_segment(&new_segment, &opened_segment_instance_2); +} + +/// UNSIGNED /// + +#[test] +fn segment_u8_id() { + test_segment::() +} + +#[test] +fn segment_u16_id() { + test_segment::() +} + +#[test] +fn segment_u32_id() { + test_segment::() +} + +#[test] +fn segment_u64_id() { + test_segment::() +} + +#[test] +fn segment_u128_id() { + test_segment::() +} + +/// SIGNED /// + +#[test] +fn segment_i8_id() { + test_segment::() +} + +#[test] +fn segment_i16_id() { + test_segment::() +} + +#[test] +fn segment_i32_id() { + test_segment::() +} + +#[test] +fn segment_i64_id() { + test_segment::() +} + +#[test] +fn segment_i128_id() { + test_segment::() +} + +/// Behaviour checks /// + +#[test] +fn segment_open() { + let new_segment: Segment = + Segment::create(900, TEST_SEGMENT_PREFIX).expect("error creating new segment"); + + let _opened_segment = Segment::open(new_segment.id(), TEST_SEGMENT_PREFIX) + .expect("error opening existing segment!"); +} + +#[test] +fn segment_open_error() { + let id = { + let new_segment: Segment = + Segment::create(900, TEST_SEGMENT_PREFIX).expect("error creating new segment"); + new_segment.id() + }; + + let _opened_segment = Segment::open(id, TEST_SEGMENT_PREFIX) + .expect_err("must fail: opened not existing segment!"); +} diff --git a/commons/zenoh-shm/tests/posix_shm_provider.rs b/commons/zenoh-shm/tests/posix_shm_provider.rs new file mode 100644 index 0000000000..4c27879623 --- /dev/null +++ b/commons/zenoh-shm/tests/posix_shm_provider.rs @@ -0,0 +1,117 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use zenoh_shm::api::{ + client::shared_memory_client::SharedMemoryClient, + protocol_implementations::posix::{ + posix_shared_memory_client::PosixSharedMemoryClient, + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + }, + provider::{ + shared_memory_provider_backend::SharedMemoryProviderBackend, + types::{AllocAlignment, MemoryLayout}, + }, +}; + +static BUFFER_NUM: usize = 100; +static BUFFER_SIZE: usize = 1024; + +#[test] +fn posix_shm_provider_create() { + let _backend = PosixSharedMemoryProviderBackend::builder() + .with_size(1024) + .expect("Error creating Layout!") + .res() + .expect("Error creating PosixSharedMemoryProviderBackend!"); +} + +#[test] +fn posix_shm_provider_alloc() { + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(1024) + .expect("Error creating Layout!") + .res() + .expect("Error creating PosixSharedMemoryProviderBackend!"); + + let layout = MemoryLayout::new(100, AllocAlignment::default()).unwrap(); + + let _buf = backend + .alloc(&layout) + .expect("PosixSharedMemoryProviderBackend: error allocating buffer"); +} + +#[test] +fn posix_shm_provider_open() { + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(1024) + .expect("Error creating Layout!") + .res() + .expect("Error creating PosixSharedMemoryProviderBackend!"); + + let layout = MemoryLayout::new(100, AllocAlignment::default()).unwrap(); + + let buf = backend + .alloc(&layout) + .expect("PosixSharedMemoryProviderBackend: error allocating buffer"); + + let client = PosixSharedMemoryClient {}; + + let _segment = client + .attach(buf.descriptor.segment) + .expect("Error attaching to segment"); +} + +#[test] +fn posix_shm_provider_allocator() { + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(BUFFER_SIZE * BUFFER_NUM) + .expect("Error creating Layout!") + .res() + .expect("Error creating PosixSharedMemoryProviderBackend!"); + + let layout = MemoryLayout::new(BUFFER_SIZE, AllocAlignment::default()).unwrap(); + + // exaust memory by allocating it all + let mut buffers = vec![]; + for _ in 0..BUFFER_NUM { + let buf = backend + .alloc(&layout) + .expect("PosixSharedMemoryProviderBackend: error allocating buffer"); + buffers.push(buf); + } + + for _ in 0..BUFFER_NUM { + // there is nothing to allocate at this point + assert_eq!(backend.available(), 0); + assert!(backend.alloc(&layout).is_err()); + + // free buffer + let to_free = buffers.pop().unwrap().descriptor; + backend.free(&to_free); + + // allocate new one + let buf = backend + .alloc(&layout) + .expect("PosixSharedMemoryProviderBackend: error allocating buffer"); + buffers.push(buf); + } + + // free everything + while let Some(buffer) = buffers.pop() { + backend.free(&buffer.descriptor); + } + + // confirm that allocator is free + assert_eq!(backend.available(), BUFFER_NUM * BUFFER_SIZE); +} diff --git a/commons/zenoh-shm/tests/watchdog.rs b/commons/zenoh-shm/tests/watchdog.rs new file mode 100644 index 0000000000..fe1ccd2ab2 --- /dev/null +++ b/commons/zenoh-shm/tests/watchdog.rs @@ -0,0 +1,311 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + sync::{atomic::AtomicBool, Arc}, + time::Duration, +}; + +use zenoh_result::{bail, ZResult}; +use zenoh_shm::watchdog::{ + confirmator::GLOBAL_CONFIRMATOR, storage::GLOBAL_STORAGE, validator::GLOBAL_VALIDATOR, +}; + +pub mod common; +use common::{execute_concurrent, CpuLoad}; + +const VALIDATION_PERIOD: Duration = Duration::from_millis(100); +const CONFIRMATION_PERIOD: Duration = Duration::from_millis(50); + +fn watchdog_alloc_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let _allocated = GLOBAL_STORAGE.allocate_watchdog()?; + Ok(()) + } +} + +#[test] +fn watchdog_alloc() { + execute_concurrent(1, 10000, watchdog_alloc_fn()); +} + +#[test] +fn watchdog_alloc_concurrent() { + execute_concurrent(1000, 10000, watchdog_alloc_fn()); +} + +fn watchdog_confirmed_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated = GLOBAL_STORAGE.allocate_watchdog()?; + let confirmed = GLOBAL_CONFIRMATOR.add_owned(&allocated.descriptor)?; + + // check that the confirmed watchdog stays valid + for i in 0..10 { + std::thread::sleep(VALIDATION_PERIOD); + let valid = confirmed.owned.test_validate() != 0; + if !valid { + bail!("Invalid watchdog, iteration {i}"); + } + } + Ok(()) + } +} + +#[test] +#[ignore] +fn watchdog_confirmed() { + execute_concurrent(1, 10, watchdog_confirmed_fn()); +} + +#[test] +#[ignore] +fn watchdog_confirmed_concurrent() { + execute_concurrent(1000, 10, watchdog_confirmed_fn()); +} + +// TODO: confirmation to dangling watchdog actually writes to potentially-existing +// other watchdog instance from other test running in the same process and changes it's behaviour, +// so we cannot run dangling test in parallel with anything else +#[test] +#[ignore] +fn watchdog_confirmed_dangling() { + let allocated = GLOBAL_STORAGE + .allocate_watchdog() + .expect("error allocating watchdog!"); + let confirmed = GLOBAL_CONFIRMATOR + .add_owned(&allocated.descriptor) + .expect("error adding watchdog to confirmator!"); + drop(allocated); + + // confirm dangling (not allocated) watchdog + for _ in 0..10 { + std::thread::sleep(VALIDATION_PERIOD); + confirmed.owned.confirm(); + } +} + +fn watchdog_validated_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated = GLOBAL_STORAGE.allocate_watchdog()?; + let confirmed = GLOBAL_CONFIRMATOR.add_owned(&allocated.descriptor)?; + + let valid = Arc::new(AtomicBool::new(true)); + { + let c_valid = valid.clone(); + GLOBAL_VALIDATOR.add( + allocated.descriptor.clone(), + Box::new(move || { + c_valid.store(false, std::sync::atomic::Ordering::SeqCst); + }), + ); + } + + // check that the watchdog stays valid as it is confirmed + for i in 0..10 { + std::thread::sleep(VALIDATION_PERIOD); + if !valid.load(std::sync::atomic::Ordering::SeqCst) { + bail!("Invalid watchdog, iteration {i}"); + } + } + + // Worst-case timings: + // validation: |___________|___________|___________|___________| + // confirmation: __|_____|_____|_____|_____| + // drop(confirmed): ^ + // It means that the worst-case latency for the watchdog to become invalid is VALIDATION_PERIOD*2 + + // check that the watchdog becomes invalid once we stop it's confirmation + drop(confirmed); + std::thread::sleep(VALIDATION_PERIOD * 3 + CONFIRMATION_PERIOD); + assert!(!valid.load(std::sync::atomic::Ordering::SeqCst)); + + Ok(()) + } +} + +#[test] +#[ignore] +fn watchdog_validated() { + execute_concurrent(1, 10, watchdog_validated_fn()); +} + +#[test] +#[ignore] +fn watchdog_validated_concurrent() { + execute_concurrent(1000, 10, watchdog_validated_fn()); +} + +fn watchdog_validated_invalid_without_confirmator_fn( +) -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated = GLOBAL_STORAGE + .allocate_watchdog() + .expect("error allocating watchdog!"); + + let valid = Arc::new(AtomicBool::new(true)); + { + let c_valid = valid.clone(); + GLOBAL_VALIDATOR.add( + allocated.descriptor.clone(), + Box::new(move || { + c_valid.store(false, std::sync::atomic::Ordering::SeqCst); + }), + ); + } + + assert!(allocated.descriptor.test_validate() == 0); + + // check that the watchdog becomes invalid because we do not confirm it + std::thread::sleep(VALIDATION_PERIOD * 2 + CONFIRMATION_PERIOD); + assert!(!valid.load(std::sync::atomic::Ordering::SeqCst)); + Ok(()) + } +} + +#[test] +#[ignore] +fn watchdog_validated_invalid_without_confirmator() { + execute_concurrent(1, 10, watchdog_validated_invalid_without_confirmator_fn()); +} + +#[test] +#[ignore] +fn watchdog_validated_invalid_without_confirmator_concurrent() { + execute_concurrent( + 1000, + 10, + watchdog_validated_invalid_without_confirmator_fn(), + ); +} + +fn watchdog_validated_additional_confirmation_fn( +) -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated = GLOBAL_STORAGE + .allocate_watchdog() + .expect("error allocating watchdog!"); + let confirmed = GLOBAL_CONFIRMATOR + .add_owned(&allocated.descriptor) + .expect("error adding watchdog to confirmator!"); + + let allow_invalid = Arc::new(AtomicBool::new(false)); + { + let c_allow_invalid = allow_invalid.clone(); + GLOBAL_VALIDATOR.add( + allocated.descriptor.clone(), + Box::new(move || { + assert!(c_allow_invalid.load(std::sync::atomic::Ordering::SeqCst)); + c_allow_invalid.store(false, std::sync::atomic::Ordering::SeqCst); + }), + ); + } + + // make additional confirmations + for _ in 0..100 { + std::thread::sleep(VALIDATION_PERIOD / 10); + confirmed.owned.confirm(); + } + + // check that the watchdog stays valid as we stop additional confirmation + std::thread::sleep(VALIDATION_PERIOD * 10); + + // Worst-case timings: + // validation: |___________|___________|___________|___________| + // confirmation: __|_____|_____|_____|_____| + // drop(confirmed): ^ + // It means that the worst-case latency for the watchdog to become invalid is VALIDATION_PERIOD*2 + + // check that the watchdog becomes invalid once we stop it's regular confirmation + drop(confirmed); + allow_invalid.store(true, std::sync::atomic::Ordering::SeqCst); + std::thread::sleep(VALIDATION_PERIOD * 2 + CONFIRMATION_PERIOD); + // check that invalidation event happened! + assert!(!allow_invalid.load(std::sync::atomic::Ordering::SeqCst)); + Ok(()) + } +} + +#[test] +#[ignore] +fn watchdog_validated_additional_confirmation() { + execute_concurrent(1, 10, watchdog_validated_additional_confirmation_fn()); +} + +#[test] +#[ignore] +fn watchdog_validated_additional_confirmation_concurrent() { + execute_concurrent(1000, 10, watchdog_validated_additional_confirmation_fn()); +} + +fn watchdog_validated_overloaded_system_fn( +) -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated = GLOBAL_STORAGE + .allocate_watchdog() + .expect("error allocating watchdog!"); + let confirmed = GLOBAL_CONFIRMATOR + .add_owned(&allocated.descriptor) + .expect("error adding watchdog to confirmator!"); + + let allow_invalid = Arc::new(AtomicBool::new(false)); + { + let c_allow_invalid = allow_invalid.clone(); + GLOBAL_VALIDATOR.add( + allocated.descriptor.clone(), + Box::new(move || { + assert!(c_allow_invalid.load(std::sync::atomic::Ordering::SeqCst)); + c_allow_invalid.store(false, std::sync::atomic::Ordering::SeqCst); + }), + ); + } + + // check that the watchdog stays valid + std::thread::sleep(VALIDATION_PERIOD * 10); + + // Worst-case timings: + // validation: |___________|___________|___________|___________| + // confirmation: __|_____|_____|_____|_____| + // drop(confirmed): ^ + // It means that the worst-case latency for the watchdog to become invalid is VALIDATION_PERIOD*2 + + // check that the watchdog becomes invalid once we stop it's regular confirmation + drop(confirmed); + allow_invalid.store(true, std::sync::atomic::Ordering::SeqCst); + std::thread::sleep(VALIDATION_PERIOD * 2 + CONFIRMATION_PERIOD); + // check that invalidation event happened! + assert!(!allow_invalid.load(std::sync::atomic::Ordering::SeqCst)); + Ok(()) + } +} + +#[test] +#[ignore] +fn watchdog_validated_low_load() { + let _load = CpuLoad::low(); + execute_concurrent(1000, 10, watchdog_validated_overloaded_system_fn()); +} + +#[test] +#[ignore] +fn watchdog_validated_high_load() { + let _load = CpuLoad::optimal_high(); + execute_concurrent(1000, 10, watchdog_validated_overloaded_system_fn()); +} + +#[test] +#[ignore] +fn watchdog_validated_overloaded_system() { + let _load = CpuLoad::exessive(); + execute_concurrent(1000, 10, watchdog_validated_overloaded_system_fn()); +} diff --git a/commons/zenoh-sync/src/object_pool.rs b/commons/zenoh-sync/src/object_pool.rs index 83b673c449..3386b2058b 100644 --- a/commons/zenoh-sync/src/object_pool.rs +++ b/commons/zenoh-sync/src/object_pool.rs @@ -141,10 +141,12 @@ impl ZSliceBuffer for RecyclingObject> { fn as_slice(&self) -> &[u8] { self.as_ref() } - fn as_mut_slice(&mut self) -> &mut [u8] { - self.as_mut() - } + fn as_any(&self) -> &dyn Any { self } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 2027133a1e..e117507ae9 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -27,19 +27,10 @@ readme = "README.md" publish = false [features] -shared-memory = ["zenoh-shm", "zenoh/shared-memory"] +shared-memory = ["zenoh/shared-memory"] unstable = ["zenoh/unstable"] transport_unixpipe = ["zenoh/transport_unixpipe"] -# Unfortunately, the feature "transport_unixpipe" is always -# enabled for the lines below. It looks like a Cargo bug :( -# -# [target.'cfg(unix)'.dependencies] -# zenoh = { workspace = true, features = ["transport_unixpipe"] } -# -# [target.'cfg(not(unix))'.dependencies] -# zenoh = { workspace = true } - [dependencies] tokio = { workspace = true, features = ["rt-multi-thread", "time", "io-std"] } clap = { workspace = true, features = ["derive"] } @@ -52,7 +43,6 @@ zenoh-collections = { workspace = true } tracing = { workspace = true } zenoh = { workspace = true, default-features = true } zenoh-ext = { workspace = true } -zenoh-shm = { workspace = true, optional = true } [dev-dependencies] rand = { workspace = true, features = ["default"] } @@ -91,7 +81,7 @@ path = "examples/z_pub.rs" [[example]] name = "z_pub_shm" path = "examples/z_pub_shm.rs" -required-features = ["shared-memory"] +required-features = ["unstable", "shared-memory"] [[example]] name = "z_sub" @@ -100,7 +90,7 @@ path = "examples/z_sub.rs" [[example]] name = "z_sub_shm" path = "examples/z_sub_shm.rs" -required-features = ["shared-memory"] +required-features = ["unstable", "shared-memory"] [[example]] name = "z_pull" @@ -148,12 +138,22 @@ path = "examples/z_sub_thr.rs" [[example]] name = "z_pub_shm_thr" path = "examples/z_pub_shm_thr.rs" -required-features = ["shared-memory"] +required-features = ["unstable", "shared-memory"] [[example]] name = "z_ping" path = "examples/z_ping.rs" +[[example]] +name = "z_ping_shm" +path = "examples/z_ping_shm.rs" +required-features = ["unstable", "shared-memory"] + [[example]] name = "z_pong" path = "examples/z_pong.rs" + +[[example]] +name = "z_alloc_shm" +path = "examples/z_alloc_shm.rs" +required-features = ["unstable", "shared-memory"] \ No newline at end of file diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs new file mode 100644 index 0000000000..a6afb1190c --- /dev/null +++ b/examples/examples/z_alloc_shm.rs @@ -0,0 +1,136 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh::prelude::r#async::*; +use zenoh::shm::protocol_implementations::posix::posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend; +use zenoh::shm::protocol_implementations::posix::protocol_id::POSIX_PROTOCOL_ID; +use zenoh::shm::provider::shared_memory_provider::{ + BlockOn, GarbageCollect, SharedMemoryProviderBuilder, +}; +use zenoh::shm::provider::shared_memory_provider::{Deallocate, Defragment}; +use zenoh::shm::provider::types::{AllocAlignment, MemoryLayout}; +use zenoh::Result; + +#[tokio::main] +async fn main() { + // Initiate logging + zenoh_util::try_init_log_from_env(); + run().await.unwrap() +} + +async fn run() -> Result<()> { + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + // The initialisation of SHM backend is completely backend-specific and user is free to do + // anything reasonable here. This code is execuated at the provider's first use + + // Alignment for POSIX SHM provider + // All allocations will be aligned corresponding to this alignment - + // that means that the provider will be able to satisfy allocation layouts + // with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // Create layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(65536, provider_alignment).unwrap(); + + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Create a layout for particular allocation arguments and particular SHM provider + // The layout is validated for argument correctness and also is checked + // against particular SHM provider's layouting capabilities. + // This layout is reusable and can handle series of similar allocations + let buffer_layout = { + // OPTION 1: Simple (default) configuration: + let simple_layout = shared_memory_provider + .alloc_layout() + .size(512) + .res() + .unwrap(); + + // OPTION 2: Comprehensive configuration: + let _comprehensive_layout = shared_memory_provider + .alloc_layout() + .size(512) + .alignment(AllocAlignment::new(2)) + .res() + .unwrap(); + + simple_layout + }; + + // Allocate SharedMemoryBuf + // Policy is a generics-based API to describe necessary allocation behaviour + // that will be higly optimized at compile-time. + // Policy resolvable can be sync and async. + // The basic policies are: + // -JustAlloc (sync) + // -GarbageCollect (sync) + // -Deallocate (sync) + // --contains own set of dealloc policy generics: + // ---DeallocateYoungest + // ---DeallocateEldest + // ---DeallocateOptimal + // -BlockOn (sync and async) + let mut sbuf = async { + // Some examples on how to use layout's interface: + + // The default allocation with default JustAlloc policy + let default_alloc = buffer_layout.alloc().res().unwrap(); + + // The async allocation + let _async_alloc = buffer_layout + .alloc() + .with_policy::() + .res_async() + .await + .unwrap(); + + // The comprehensive allocation policy that blocks if provider is not able to allocate + let _comprehensive_alloc = buffer_layout + .alloc() + .with_policy::>>() + .res() + .unwrap(); + + // The comprehensive allocation policy that deallocates up to 1000 buffers if provider is not able to allocate + let _comprehensive_alloc = buffer_layout + .alloc() + .with_policy::>>() + .res() + .unwrap(); + + default_alloc + } + .await; + + // Fill recently-allocated buffer with data + sbuf[0..8].fill(0); + + // Declare Session and Publisher (common code) + let session = zenoh::open(Config::default()).res_async().await?; + let publisher = session.declare_publisher("my/key/expr").res_async().await?; + + // Publish SHM buffer + publisher.put(sbuf).res_async().await +} diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs new file mode 100644 index 0000000000..08c08276d4 --- /dev/null +++ b/examples/examples/z_ping_shm.rs @@ -0,0 +1,147 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use clap::Parser; +use std::time::{Duration, Instant}; +use zenoh::buffers::ZSlice; +use zenoh::config::Config; +use zenoh::prelude::sync::*; +use zenoh::publication::CongestionControl; +use zenoh::shm::protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, +}; +use zenoh::shm::provider::shared_memory_provider::SharedMemoryProviderBuilder; +use zenoh::shm::provider::types::AllocAlignment; +use zenoh::shm::provider::types::MemoryLayout; +use zenoh_examples::CommonArgs; + +fn main() { + // Initiate logging + zenoh_util::try_init_log_from_env(); + + let (mut config, warmup, size, n) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_ping_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); + + let session = zenoh::open(config).res().unwrap(); + + // The key expression to publish data on + let key_expr_ping = keyexpr::new("test/ping").unwrap(); + + // The key expression to wait the response back + let key_expr_pong = keyexpr::new("test/pong").unwrap(); + + let sub = session.declare_subscriber(key_expr_pong).res().unwrap(); + let publisher = session + .declare_publisher(key_expr_ping) + .congestion_control(CongestionControl::Block) + .res() + .unwrap(); + + let mut samples = Vec::with_capacity(n); + + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + // The initialisation of SHM backend is completely backend-specific and user is free to do + // anything reasonable here. This code is execuated at the provider's first use + + // Alignment for POSIX SHM provider + // All allocations will be aligned corresponding to this alignment - + // that means that the provider will be able to satisfy allocation layouts + // with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // Create layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(size, provider_alignment).unwrap(); + + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + let buf = shared_memory_provider + .alloc_layout() + .size(size) + .res() + .unwrap() + .alloc() + .res() + .unwrap(); + + // convert ZSliceShmMut into ZSlice as ZSliceShmMut does not support Clone + let buf: ZSlice = buf.into(); + + // -- warmup -- + println!("Warming up for {warmup:?}..."); + let now = Instant::now(); + while now.elapsed() < warmup { + publisher.put(buf.clone()).res().unwrap(); + let _ = sub.recv().unwrap(); + } + + for _ in 0..n { + let buf = buf.clone(); + let write_time = Instant::now(); + publisher.put(buf).res().unwrap(); + + let _ = sub.recv(); + let ts = write_time.elapsed().as_micros(); + samples.push(ts); + } + + for (i, rtt) in samples.iter().enumerate().take(n) { + println!( + "{} bytes: seq={} rtt={:?}µs lat={:?}µs", + size, + i, + rtt, + rtt / 2 + ); + } +} + +#[derive(Parser)] +struct Args { + #[arg(short, long, default_value = "1")] + /// The number of seconds to warm up (float) + warmup: f64, + #[arg(short = 'n', long, default_value = "100")] + /// The number of round-trips to measure + samples: usize, + /// Sets the size of the payload to publish + payload_size: usize, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> (Config, Duration, usize, usize) { + let args = Args::parse(); + ( + args.common.into(), + Duration::from_secs_f64(args.warmup), + args.payload_size, + args.samples, + ) +} diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index aa691f0425..4c2e41ab18 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -21,7 +21,12 @@ fn main() { // initiate logging zenoh_util::try_init_log_from_env(); - let (config, express) = parse_args(); + let (mut config, express) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_ping_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); let session = zenoh::open(config).res().unwrap().into_arc(); diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 542cff3b6d..8287509f1b 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -12,10 +12,16 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; -use zenoh::shm::SharedMemoryManager; +use zenoh::shm::protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, +}; +use zenoh::shm::provider::shared_memory_provider::SharedMemoryProviderBuilder; +use zenoh::shm::provider::shared_memory_provider::{BlockOn, GarbageCollect}; +use zenoh::shm::provider::types::AllocAlignment; +use zenoh::shm::provider::types::MemoryLayout; use zenoh_examples::CommonArgs; const N: usize = 10; @@ -36,72 +42,71 @@ async fn main() -> Result<(), zenoh::Error> { println!("Opening session..."); let session = zenoh::open(config).res().await.unwrap(); - println!("Creating Shared Memory Manager..."); - let id = session.zid(); - let mut shm = SharedMemoryManager::make(id.to_string(), N * 1024).unwrap(); + println!("Creating POSIX SHM backend..."); + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + // The initialisation of SHM backend is completely backend-specific and user is free to do + // anything reasonable here. This code is execuated at the provider's first use + + // Alignment for POSIX SHM provider + // All allocations will be aligned corresponding to this alignment - + // that means that the provider will be able to satisfy allocation layouts + // with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // Create layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(N * 1024, provider_alignment).unwrap(); + + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + println!("Creating SHM Provider with POSIX backend..."); + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); - println!("Allocating Shared Memory Buffer..."); let publisher = session.declare_publisher(&path).res().await.unwrap(); + println!("Allocating Shared Memory Buffer..."); + let layout = shared_memory_provider + .alloc_layout() + .size(1024) + .res() + .unwrap(); + println!("Press CTRL-C to quit..."); for idx in 0..(K * N as u32) { - tokio::time::sleep(Duration::from_secs(1)).await; - let mut sbuf = match shm.alloc(1024) { - Ok(buf) => buf, - Err(_) => { - tokio::time::sleep(Duration::from_millis(100)).await; - println!( - "Afer failing allocation the GC collected: {} bytes -- retrying", - shm.garbage_collect() - ); - println!( - "Trying to de-fragment memory... De-fragmented {} bytes", - shm.defragment() - ); - shm.alloc(1024).unwrap() - } - }; + let mut sbuf = layout + .alloc() + .with_policy::>() + .res_async() + .await + .unwrap(); // We reserve a small space at the beginning of the buffer to include the iteration index // of the write. This is simply to have the same format as zn_pub. let prefix = format!("[{idx:4}] "); let prefix_len = prefix.as_bytes().len(); - - // Retrive a mutable slice from the SharedMemoryBuf. - // - // This operation is marked unsafe since we cannot guarantee a single mutable reference - // across multiple processes. Thus if you use it, and you'll inevitable have to use it, - // you have to keep in mind that if you have multiple process retrieving a mutable slice - // you may get into concurrent writes. That said, if you have a serial pipeline and - // the buffer is flowing through the pipeline this will not create any issues. - // - // In short, whilst this operation is marked as unsafe, you are safe if you can - // guarantee that in your application only one process at the time will actually write. - let slice = unsafe { sbuf.as_mut_slice() }; let slice_len = prefix_len + value.as_bytes().len(); - slice[0..prefix_len].copy_from_slice(prefix.as_bytes()); - slice[prefix_len..slice_len].copy_from_slice(value.as_bytes()); + + sbuf[0..prefix_len].copy_from_slice(prefix.as_bytes()); + sbuf[prefix_len..slice_len].copy_from_slice(value.as_bytes()); // Write the data println!( "Put SHM Data ('{}': '{}')", path, - String::from_utf8_lossy(&slice[0..slice_len]) + String::from_utf8_lossy(&sbuf[0..slice_len]) ); - publisher.put(sbuf.clone()).res().await?; - if idx % K == 0 { - let freed = shm.garbage_collect(); - println!("The Gargabe collector freed {freed} bytes"); - let defrag = shm.defragment(); - println!("De-framented {defrag} bytes"); - } - // Dropping the SharedMemoryBuf means to free it. - drop(sbuf); + publisher.put(sbuf).res().await?; } - // Signal the SharedMemoryManager to garbage collect all the freed SharedMemoryBuf. - let _freed = shm.garbage_collect(); - Ok(()) } diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 437f6b2d6d..90c1707765 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -12,10 +12,17 @@ // ZettaScale Zenoh Team, // use clap::Parser; +use zenoh::buffers::ZSlice; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; -use zenoh::shm::SharedMemoryManager; +use zenoh::shm::protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, +}; +use zenoh::shm::provider::shared_memory_provider::SharedMemoryProviderBuilder; +use zenoh::shm::provider::types::AllocAlignment; +use zenoh::shm::provider::types::MemoryLayout; use zenoh_examples::CommonArgs; #[tokio::main] @@ -30,11 +37,44 @@ async fn main() { config.transport.shared_memory.set_enabled(true).unwrap(); let z = zenoh::open(config).res().await.unwrap(); - let id = z.zid(); - let mut shm = SharedMemoryManager::make(id.to_string(), sm_size).unwrap(); - let mut buf = shm.alloc(size).unwrap(); - let bs = unsafe { buf.as_mut_slice() }; - for b in bs { + + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + // The initialisation of SHM backend is completely backend-specific and user is free to do + // anything reasonable here. This code is execuated at the provider's first use + + // Alignment for POSIX SHM provider + // All allocations will be aligned corresponding to this alignment - + // that means that the provider will be able to satisfy allocation layouts + // with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // Create layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(sm_size, provider_alignment).unwrap(); + + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + let mut buf = shared_memory_provider + .alloc_layout() + .size(size) + .res() + .unwrap() + .alloc() + .res() + .unwrap(); + + for b in buf.as_mut() { *b = rand::random::(); } @@ -42,6 +82,8 @@ async fn main() { // Make sure to not drop messages because of congestion control .congestion_control(CongestionControl::Block).res().await.unwrap(); + let buf: ZSlice = buf.into(); + println!("Press CTRL-C to quit..."); loop { publisher.put(buf.clone()).res().await.unwrap(); diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index d304d6a7f6..aa3967becd 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -14,8 +14,8 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh::shm::slice::zsliceshm::zsliceshm; use zenoh_examples::CommonArgs; -use zenoh_shm::SharedMemoryBuf; #[tokio::main] async fn main() { @@ -37,12 +37,12 @@ async fn main() { println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { - match sample.payload().deserialize::() { + match sample.payload().deserialize::<&zsliceshm>() { Ok(payload) => println!( ">> [Subscriber] Received {} ('{}': '{:02x?}')", sample.kind(), sample.key_expr().as_str(), - payload.as_slice() + payload ), Err(e) => { println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); diff --git a/io/zenoh-transport/Cargo.toml b/io/zenoh-transport/Cargo.toml index b3a299e8be..7efaabb719 100644 --- a/io/zenoh-transport/Cargo.toml +++ b/io/zenoh-transport/Cargo.toml @@ -29,6 +29,7 @@ shared-memory = [ "zenoh-protocol/shared-memory", "zenoh-shm", "zenoh-codec/shared-memory", + "zenoh-buffers/shared-memory", ] auth_pubkey = ["transport_auth", "rsa"] auth_usrpwd = ["transport_auth"] diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index e923a7e1af..8048d9ff49 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -423,7 +423,7 @@ impl RBatch { pub fn initialize(&mut self, #[allow(unused_variables)] buff: C) -> ZResult<()> where C: Fn() -> T + Copy, - T: ZSliceBuffer + 'static, + T: AsMut<[u8]> + ZSliceBuffer + 'static, { #[allow(unused_variables)] let (l, h, p) = Self::split(self.buffer.as_slice(), &self.config); @@ -455,10 +455,10 @@ impl RBatch { #[cfg(feature = "transport_compression")] fn decompress(&self, payload: &[u8], mut buff: impl FnMut() -> T) -> ZResult where - T: ZSliceBuffer + 'static, + T: AsMut<[u8]> + ZSliceBuffer + 'static, { let mut into = (buff)(); - let n = lz4_flex::block::decompress_into(payload, into.as_mut_slice()) + let n = lz4_flex::block::decompress_into(payload, into.as_mut()) .map_err(|_| zerror!("Decompression error"))?; let zslice = ZSlice::new(Arc::new(into), 0, n) .map_err(|_| zerror!("Invalid decompression buffer length"))?; diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index 73a38545db..ddf1fe23c1 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -33,6 +33,10 @@ use zenoh_protocol::{ VERSION, }; use zenoh_result::{bail, ZResult}; +#[cfg(feature = "shared-memory")] +use zenoh_shm::api::client_storage::GLOBAL_CLIENT_STORAGE; +#[cfg(feature = "shared-memory")] +use zenoh_shm::reader::SharedMemoryReader; use zenoh_task::TaskController; /// # Examples @@ -133,9 +137,17 @@ pub struct TransportManagerBuilder { endpoints: HashMap, // (protocol, config) tx_threads: usize, protocols: Option>, + #[cfg(feature = "shared-memory")] + shm_reader: Option, } impl TransportManagerBuilder { + #[cfg(feature = "shared-memory")] + pub fn shm_reader(mut self, shm_reader: Option) -> Self { + self.shm_reader = shm_reader; + self + } + pub fn zid(mut self, zid: ZenohId) -> Self { self.zid = zid; self @@ -251,7 +263,16 @@ impl TransportManagerBuilder { // Initialize the PRNG and the Cipher let mut prng = PseudoRng::from_entropy(); - let unicast = self.unicast.build(&mut prng)?; + #[cfg(feature = "shared-memory")] + let shm_reader = self + .shm_reader + .unwrap_or_else(|| SharedMemoryReader::new(GLOBAL_CLIENT_STORAGE.clone())); + + let unicast = self.unicast.build( + &mut prng, + #[cfg(feature = "shared-memory")] + &shm_reader, + )?; let multicast = self.multicast.build()?; let mut queue_size = [0; Priority::NUM]; @@ -295,7 +316,12 @@ impl TransportManagerBuilder { let params = TransportManagerParams { config, state }; - Ok(TransportManager::new(params, prng)) + Ok(TransportManager::new( + params, + prng, + #[cfg(feature = "shared-memory")] + shm_reader, + )) } } @@ -321,6 +347,8 @@ impl Default for TransportManagerBuilder { multicast: TransportManagerBuilderMulticast::default(), tx_threads: 1, protocols: None, + #[cfg(feature = "shared-memory")] + shm_reader: None, } } } @@ -333,13 +361,19 @@ pub struct TransportManager { pub(crate) cipher: Arc, pub(crate) locator_inspector: zenoh_link::LocatorInspector, pub(crate) new_unicast_link_sender: NewLinkChannelSender, + #[cfg(feature = "shared-memory")] + pub(crate) shmr: SharedMemoryReader, #[cfg(feature = "stats")] pub(crate) stats: Arc, pub(crate) task_controller: TaskController, } impl TransportManager { - pub fn new(params: TransportManagerParams, mut prng: PseudoRng) -> TransportManager { + pub fn new( + params: TransportManagerParams, + mut prng: PseudoRng, + #[cfg(feature = "shared-memory")] shmr: SharedMemoryReader, + ) -> TransportManager { // Initialize the Cipher let mut key = [0_u8; BlockCipher::BLOCK_SIZE]; prng.fill_bytes(&mut key); @@ -357,6 +391,8 @@ impl TransportManager { new_unicast_link_sender, #[cfg(feature = "stats")] stats: std::sync::Arc::new(crate::stats::TransportStats::default()), + #[cfg(feature = "shared-memory")] + shmr, task_controller: TaskController::default(), }; diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 30a166b273..883f978684 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -205,12 +205,12 @@ impl TransportLinkMulticastRx { pub async fn recv_batch(&self, buff: C) -> ZResult<(RBatch, Locator)> where C: Fn() -> T + Copy, - T: ZSliceBuffer + 'static, + T: AsMut<[u8]> + ZSliceBuffer + 'static, { const ERR: &str = "Read error from link: "; let mut into = (buff)(); - let (n, locator) = self.inner.link.read(into.as_mut_slice()).await?; + let (n, locator) = self.inner.link.read(into.as_mut()).await?; let buffer = ZSlice::new(Arc::new(into), 0, n).map_err(|_| zerror!("Error"))?; let mut batch = RBatch::new(self.inner.config.batch, buffer); batch.initialize(buff).map_err(|_| zerror!("{ERR}{self}"))?; @@ -539,7 +539,7 @@ async fn rx_task( where T: ZSliceBuffer + 'static, F: Fn() -> T, - RecyclingObject: ZSliceBuffer, + RecyclingObject: AsMut<[u8]> + ZSliceBuffer, { let (rbatch, locator) = link .recv_batch(|| pool.try_take().unwrap_or_else(|| pool.alloc())) diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index c2f0642579..ebc51a2ec6 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -11,8 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "shared-memory")] -use crate::multicast::shm::SharedMemoryMulticast; use crate::multicast::{transport::TransportMulticastInner, TransportMulticast}; use crate::TransportManager; use std::collections::HashMap; @@ -61,9 +59,6 @@ pub struct TransportManagerStateMulticast { pub(crate) protocols: Arc>>, // Established transports pub(crate) transports: Arc>>>, - // Shared memory - #[cfg(feature = "shared-memory")] - pub(super) shm: Arc, } pub struct TransportManagerParamsMulticast { @@ -143,8 +138,6 @@ impl TransportManagerBuilderMulticast { let state = TransportManagerStateMulticast { protocols: Arc::new(Mutex::new(HashMap::new())), transports: Arc::new(Mutex::new(HashMap::new())), - #[cfg(feature = "shared-memory")] - shm: Arc::new(SharedMemoryMulticast::make()?), }; let params = TransportManagerParamsMulticast { config, state }; diff --git a/io/zenoh-transport/src/multicast/mod.rs b/io/zenoh-transport/src/multicast/mod.rs index daf9b069ff..e205125b39 100644 --- a/io/zenoh-transport/src/multicast/mod.rs +++ b/io/zenoh-transport/src/multicast/mod.rs @@ -14,8 +14,6 @@ pub(crate) mod establishment; pub(crate) mod link; pub(crate) mod manager; pub(crate) mod rx; -#[cfg(feature = "shared-memory")] -pub(crate) mod shm; pub(crate) mod transport; pub(crate) mod tx; diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index c7b1d65e59..1576d65cd6 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -42,7 +42,7 @@ impl TransportMulticastInner { #[cfg(feature = "shared-memory")] { if self.manager.config.multicast.is_shm { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.state.multicast.shm.reader)?; + crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr)?; } } diff --git a/io/zenoh-transport/src/multicast/shm.rs b/io/zenoh-transport/src/multicast/shm.rs deleted file mode 100644 index 060198d927..0000000000 --- a/io/zenoh-transport/src/multicast/shm.rs +++ /dev/null @@ -1,44 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use rand::{Rng, SeedableRng}; -use tokio::sync::RwLock; -use zenoh_crypto::PseudoRng; -use zenoh_result::ZResult; -use zenoh_shm::{SharedMemoryManager, SharedMemoryReader}; - -pub(crate) type Challenge = u64; -const NAME: &str = "zshm_mcast"; - -pub(crate) struct SharedMemoryMulticast { - pub(crate) _manager: SharedMemoryManager, - pub(crate) reader: RwLock, -} - -unsafe impl Sync for SharedMemoryMulticast {} - -impl SharedMemoryMulticast { - pub fn make() -> ZResult { - let mut prng = PseudoRng::from_entropy(); - let nonce = prng.gen::(); - let size = std::mem::size_of::(); - - let mut _manager = SharedMemoryManager::make(format!("{NAME}.{nonce}"), size)?; - - let shmauth = SharedMemoryMulticast { - _manager, - reader: RwLock::new(SharedMemoryReader::new()), - }; - Ok(shmauth) - } -} diff --git a/io/zenoh-transport/src/multicast/transport.rs b/io/zenoh-transport/src/multicast/transport.rs index 155b6b5568..babf68ce61 100644 --- a/io/zenoh-transport/src/multicast/transport.rs +++ b/io/zenoh-transport/src/multicast/transport.rs @@ -13,6 +13,8 @@ // use super::common::priority::{TransportPriorityRx, TransportPriorityTx}; use super::link::{TransportLinkMulticastConfigUniversal, TransportLinkMulticastUniversal}; +#[cfg(feature = "shared-memory")] +use crate::shm::MulticastTransportShmConfig; #[cfg(feature = "stats")] use crate::stats::TransportStats; use crate::{ @@ -88,6 +90,8 @@ pub(crate) struct TransportMulticastInner { // Transport statistics #[cfg(feature = "stats")] pub(super) stats: Arc, + #[cfg(feature = "shared-memory")] + pub(super) shm: Option, } impl TransportMulticastInner { @@ -109,6 +113,12 @@ impl TransportMulticastInner { #[cfg(feature = "stats")] let stats = Arc::new(TransportStats::new(Some(manager.get_stats().clone()))); + #[cfg(feature = "shared-memory")] + let shm = match manager.config.multicast.is_shm { + true => Some(MulticastTransportShmConfig), + false => None, + }; + let ti = TransportMulticastInner { manager, priority_tx: priority_tx.into_boxed_slice().into(), @@ -119,6 +129,8 @@ impl TransportMulticastInner { task_controller: TaskController::default(), #[cfg(feature = "stats")] stats, + #[cfg(feature = "shared-memory")] + shm, }; let link = TransportLinkMulticastUniversal::new(ti.clone(), config.link); diff --git a/io/zenoh-transport/src/multicast/tx.rs b/io/zenoh-transport/src/multicast/tx.rs index 3b58277402..ee7715d38b 100644 --- a/io/zenoh-transport/src/multicast/tx.rs +++ b/io/zenoh-transport/src/multicast/tx.rs @@ -15,6 +15,9 @@ use super::transport::TransportMulticastInner; use zenoh_core::zread; use zenoh_protocol::network::NetworkMessage; +#[cfg(feature = "shared-memory")] +use crate::shm::map_zmsg_to_partner; + //noinspection ALL impl TransportMulticastInner { fn schedule_on_link(&self, msg: NetworkMessage) -> bool { @@ -53,12 +56,7 @@ impl TransportMulticastInner { pub(super) fn schedule(&self, mut msg: NetworkMessage) -> bool { #[cfg(feature = "shared-memory")] { - let res = if self.manager.config.multicast.is_shm { - crate::shm::map_zmsg_to_shminfo(&mut msg) - } else { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.state.multicast.shm.reader) - }; - if let Err(e) = res { + if let Err(e) = map_zmsg_to_partner(&mut msg, &self.shm) { tracing::trace!("Failed SHM conversion: {}", e); return false; } diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 09edde884e..6dd65aab16 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -11,70 +11,178 @@ // Contributors: // ZettaScale Zenoh Team, // -use tokio::sync::RwLock; +use std::collections::HashSet; use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZBuf, ZSlice, ZSliceKind}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_core::{zasyncread, zasyncwrite, zerror}; +use zenoh_core::zerror; use zenoh_protocol::{ network::{NetworkBody, NetworkMessage, Push, Request, Response}, zenoh::{ err::Err, ext::ShmType, query::{ext::QueryBodyType, Query}, - reply::ReplyBody, PushBody, Put, Reply, RequestBody, ResponseBody, }, }; use zenoh_result::ZResult; -use zenoh_shm::{SharedMemoryBuf, SharedMemoryBufInfo, SharedMemoryReader}; +use zenoh_shm::{ + api::common::types::ProtocolID, reader::SharedMemoryReader, SharedMemoryBuf, + SharedMemoryBufInfo, +}; + +use crate::unicast::establishment::ext::shm::AuthSegment; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TransportShmConfig { + partner_protocols: HashSet, +} + +impl PartnerShmConfig for TransportShmConfig { + fn supports_protocol(&self, protocol: ProtocolID) -> bool { + self.partner_protocols.contains(&protocol) + } +} -// Traits +impl TransportShmConfig { + pub fn new(partner_segment: AuthSegment) -> Self { + Self { + partner_protocols: partner_segment.protocols().iter().cloned().collect(), + } + } +} + +#[derive(Clone)] +pub struct MulticastTransportShmConfig; + +impl PartnerShmConfig for MulticastTransportShmConfig { + fn supports_protocol(&self, _protocol: ProtocolID) -> bool { + true + } +} + +pub fn map_zmsg_to_partner( + msg: &mut NetworkMessage, + partner_shm_cfg: &Option, +) -> ZResult<()> { + match &mut msg.body { + NetworkBody::Push(Push { payload, .. }) => match payload { + PushBody::Put(b) => b.map_to_partner(partner_shm_cfg), + PushBody::Del(_) => Ok(()), + }, + NetworkBody::Request(Request { payload, .. }) => match payload { + RequestBody::Query(b) => b.map_to_partner(partner_shm_cfg), + }, + NetworkBody::Response(Response { payload, .. }) => match payload { + ResponseBody::Reply(b) => b.map_to_partner(partner_shm_cfg), + ResponseBody::Err(b) => b.map_to_partner(partner_shm_cfg), + }, + NetworkBody::ResponseFinal(_) + | NetworkBody::Interest(_) + | NetworkBody::Declare(_) + | NetworkBody::OAM(_) => Ok(()), + } +} + +pub fn map_zmsg_to_shmbuf(msg: &mut NetworkMessage, shmr: &SharedMemoryReader) -> ZResult<()> { + match &mut msg.body { + NetworkBody::Push(Push { payload, .. }) => match payload { + PushBody::Put(b) => b.map_to_shmbuf(shmr), + PushBody::Del(_) => Ok(()), + }, + NetworkBody::Request(Request { payload, .. }) => match payload { + RequestBody::Query(b) => b.map_to_shmbuf(shmr), + }, + NetworkBody::Response(Response { payload, .. }) => match payload { + ResponseBody::Err(b) => b.map_to_shmbuf(shmr), + ResponseBody::Reply(b) => b.map_to_shmbuf(shmr), + }, + NetworkBody::ResponseFinal(_) + | NetworkBody::Interest(_) + | NetworkBody::Declare(_) + | NetworkBody::OAM(_) => Ok(()), + } +} + +pub trait PartnerShmConfig { + fn supports_protocol(&self, protocol: ProtocolID) -> bool; +} + +// Currently, there can be three forms of ZSlice: +// rawbuf - usual non-shm buffer +// shminfo - small SHM info that can be used to mount SHM buffer and get access to it's contents +// shmbuf - mounted SHM buffer +// On RX and TX we need to do the following conversion: trait MapShm { - fn map_to_shminfo(&mut self) -> ZResult; - fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult; + // RX: + // - shminfo -> shmbuf + // - rawbuf -> rawbuf (no changes) + fn map_to_shmbuf(&mut self, shmr: &SharedMemoryReader) -> ZResult<()>; + + // TX: + // - shmbuf -> shminfo if partner supports shmbuf's SHM protocol + // - shmbuf -> rawbuf if partner does not support shmbuf's SHM protocol + // - rawbuf -> rawbuf (no changes) + fn map_to_partner( + &mut self, + partner_shm_cfg: &Option, + ) -> ZResult<()>; } -macro_rules! map_to_shminfo { - ($zbuf:expr, $ext_shm:expr) => {{ - let res = map_zbuf_to_shminfo($zbuf)?; - if res { - *$ext_shm = Some(ShmType::new()); +macro_rules! map_to_partner { + ($zbuf:expr, $ext_shm:expr, $partner_shm_cfg:expr) => {{ + match $partner_shm_cfg { + Some(shm_cfg) => { + let res = to_shm_partner($zbuf, shm_cfg)?; + if res { + *$ext_shm = Some(ShmType::new()); + } + } + None => { + to_non_shm_partner($zbuf); + } } - Ok(res) + + Ok(()) }}; } -macro_rules! map_to_shmbuf { +macro_rules! map_zbuf_to_shmbuf { ($zbuf:expr, $ext_shm:expr, $shmr:expr) => {{ if $ext_shm.is_some() { *$ext_shm = None; map_zbuf_to_shmbuf($zbuf, $shmr) } else { - Ok(false) + Ok(()) } }}; } // Impl - Put impl MapShm for Put { - fn map_to_shminfo(&mut self) -> ZResult { + fn map_to_partner( + &mut self, + partner_shm_cfg: &Option, + ) -> ZResult<()> { let Self { payload, ext_shm, .. } = self; - map_to_shminfo!(payload, ext_shm) + map_to_partner!(payload, ext_shm, partner_shm_cfg) } - fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { + fn map_to_shmbuf(&mut self, shmr: &SharedMemoryReader) -> ZResult<()> { let Self { payload, ext_shm, .. } = self; - map_to_shmbuf!(payload, ext_shm, shmr) + map_zbuf_to_shmbuf!(payload, ext_shm, shmr) } } // Impl - Query impl MapShm for Query { - fn map_to_shminfo(&mut self) -> ZResult { + fn map_to_partner( + &mut self, + partner_shm_cfg: &Option, + ) -> ZResult<()> { if let Self { ext_body: Some(QueryBodyType { payload, ext_shm, .. @@ -82,13 +190,13 @@ impl MapShm for Query { .. } = self { - map_to_shminfo!(payload, ext_shm) + map_to_partner!(payload, ext_shm, partner_shm_cfg) } else { - Ok(false) + Ok(()) } } - fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { + fn map_to_shmbuf(&mut self, shmr: &SharedMemoryReader) -> ZResult<()> { if let Self { ext_body: Some(QueryBodyType { payload, ext_shm, .. @@ -96,77 +204,75 @@ impl MapShm for Query { .. } = self { - map_to_shmbuf!(payload, ext_shm, shmr) + map_zbuf_to_shmbuf!(payload, ext_shm, shmr) } else { - Ok(false) + Ok(()) } } } // Impl - Reply impl MapShm for Reply { - fn map_to_shminfo(&mut self) -> ZResult { + fn map_to_partner( + &mut self, + partner_shm_cfg: &Option, + ) -> ZResult<()> { match &mut self.payload { - ReplyBody::Put(b) => b.map_to_shminfo(), - _ => Ok(false), + PushBody::Put(put) => { + let Put { + payload, ext_shm, .. + } = put; + map_to_partner!(payload, ext_shm, partner_shm_cfg) + } + PushBody::Del(_) => Ok(()), } } - fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { + fn map_to_shmbuf(&mut self, shmr: &SharedMemoryReader) -> ZResult<()> { match &mut self.payload { - ReplyBody::Put(b) => b.map_to_shmbuf(shmr), - _ => Ok(false), + PushBody::Put(put) => { + let Put { + payload, ext_shm, .. + } = put; + map_zbuf_to_shmbuf!(payload, ext_shm, shmr) + } + PushBody::Del(_) => Ok(()), } } } // Impl - Err impl MapShm for Err { - fn map_to_shminfo(&mut self) -> ZResult { - Ok(false) - } - - fn map_to_shmbuf(&mut self, _shmr: &RwLock) -> ZResult { - Ok(false) + fn map_to_partner( + &mut self, + partner_shm_cfg: &Option, + ) -> ZResult<()> { + let Self { + payload, ext_shm, .. + } = self; + map_to_partner!(payload, ext_shm, partner_shm_cfg) } -} -// ShmBuf -> ShmInfo -pub fn map_zmsg_to_shminfo(msg: &mut NetworkMessage) -> ZResult { - match &mut msg.body { - NetworkBody::Push(Push { payload, .. }) => match payload { - PushBody::Put(b) => b.map_to_shminfo(), - PushBody::Del(_) => Ok(false), - }, - NetworkBody::Request(Request { payload, .. }) => match payload { - RequestBody::Query(b) => b.map_to_shminfo(), - }, - NetworkBody::Response(Response { payload, .. }) => match payload { - ResponseBody::Reply(b) => b.map_to_shminfo(), - ResponseBody::Err(b) => b.map_to_shminfo(), - }, - NetworkBody::ResponseFinal(_) - | NetworkBody::Interest(_) - | NetworkBody::Declare(_) - | NetworkBody::OAM(_) => Ok(false), + fn map_to_shmbuf(&mut self, shmr: &SharedMemoryReader) -> ZResult<()> { + let Self { + payload, ext_shm, .. + } = self; + map_zbuf_to_shmbuf!(payload, ext_shm, shmr) } } -// Mapping -pub fn map_zbuf_to_shminfo(zbuf: &mut ZBuf) -> ZResult { - let mut res = false; - for zs in zbuf.zslices_mut() { - if let Some(shmb) = zs.downcast_ref::() { - *zs = map_zslice_to_shminfo(shmb)?; - res = true; - } - } - Ok(res) +#[cold] +#[inline(never)] +pub fn shmbuf_to_rawbuf(shmb: &SharedMemoryBuf) -> ZSlice { + // Convert shmb to raw buffer + // TODO: optimize this! We should not make additional buffer copy here, + // but we need to make serializer serialize SHM buffer as raw buffer. + shmb.as_ref().to_vec().into() } #[cold] #[inline(never)] -pub fn map_zslice_to_shminfo(shmb: &SharedMemoryBuf) -> ZResult { +pub fn shmbuf_to_shminfo(shmb: &SharedMemoryBuf) -> ZResult { // Serialize the shmb info let codec = Zenoh080::new(); let mut info = vec![]; @@ -175,73 +281,63 @@ pub fn map_zslice_to_shminfo(shmb: &SharedMemoryBuf) -> ZResult { .write(&mut writer, &shmb.info) .map_err(|e| zerror!("{:?}", e))?; // Increase the reference count so to keep the SharedMemoryBuf valid - shmb.inc_ref_count(); + unsafe { shmb.inc_ref_count() }; // Replace the content of the slice let mut zslice: ZSlice = info.into(); zslice.kind = ZSliceKind::ShmPtr; Ok(zslice) } -// ShmInfo -> ShmBuf -pub fn map_zmsg_to_shmbuf( - msg: &mut NetworkMessage, - shmr: &RwLock, +fn to_shm_partner( + zbuf: &mut ZBuf, + partner_shm_cfg: &ShmCfg, ) -> ZResult { - match &mut msg.body { - NetworkBody::Push(Push { payload, .. }) => match payload { - PushBody::Put(b) => b.map_to_shmbuf(shmr), - PushBody::Del(_) => Ok(false), - }, - NetworkBody::Request(Request { payload, .. }) => match payload { - RequestBody::Query(b) => b.map_to_shmbuf(shmr), - }, - NetworkBody::Response(Response { payload, .. }) => match payload { - ResponseBody::Reply(b) => b.map_to_shmbuf(shmr), - ResponseBody::Err(b) => b.map_to_shmbuf(shmr), - }, - NetworkBody::ResponseFinal(_) - | NetworkBody::Interest(_) - | NetworkBody::Declare(_) - | NetworkBody::OAM(_) => Ok(false), + let mut res = false; + for zs in zbuf.zslices_mut() { + if let Some(shmb) = zs.downcast_ref::() { + if partner_shm_cfg.supports_protocol(shmb.info.shm_protocol) { + *zs = shmbuf_to_shminfo(shmb)?; + res = true; + } else { + // Replace the content of the slice with rawbuf + *zs = shmbuf_to_rawbuf(shmb) + } + } } + Ok(res) } -// Mapping -pub fn map_zbuf_to_shmbuf(zbuf: &mut ZBuf, shmr: &RwLock) -> ZResult { - let mut res = false; +fn to_non_shm_partner(zbuf: &mut ZBuf) { + for zs in zbuf.zslices_mut() { + if let Some(shmb) = zs.downcast_ref::() { + // Replace the content of the slice with rawbuf + *zs = shmbuf_to_rawbuf(shmb) + } + } +} + +pub fn map_zbuf_to_shmbuf(zbuf: &mut ZBuf, shmr: &SharedMemoryReader) -> ZResult<()> { for zs in zbuf.zslices_mut().filter(|x| x.kind == ZSliceKind::ShmPtr) { - res |= map_zslice_to_shmbuf(zs, shmr)?; + map_zslice_to_shmbuf(zs, shmr)?; } - Ok(res) + Ok(()) } #[cold] #[inline(never)] -pub fn map_zslice_to_shmbuf( - zslice: &mut ZSlice, - shmr: &RwLock, -) -> ZResult { - // Deserialize the shmb info into shm buff +pub fn map_zslice_to_shmbuf(zslice: &mut ZSlice, shmr: &SharedMemoryReader) -> ZResult<()> { let codec = Zenoh080::new(); let mut reader = zslice.reader(); + // Deserialize the shminfo let shmbinfo: SharedMemoryBufInfo = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; - // First, try in read mode allowing concurrenct lookups - let r_guard = tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { zasyncread!(shmr) }) - }); - let smb = r_guard.try_read_shmbuf(&shmbinfo).or_else(|_| { - drop(r_guard); - let mut w_guard = tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { zasyncwrite!(shmr) }) - }); - w_guard.read_shmbuf(&shmbinfo) - })?; + // Mount shmbuf + let smb = shmr.read_shmbuf(&shmbinfo)?; // Replace the content of the slice let zs: ZSlice = smb.into(); *zslice = zs; - Ok(true) + Ok(()) } diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index d7c64c3034..48638834e0 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -12,7 +12,10 @@ // ZettaScale Zenoh Team, // #[cfg(feature = "shared-memory")] -use crate::unicast::shared_memory_unicast::Challenge; +use super::ext::shm::AuthSegment; +#[cfg(feature = "shared-memory")] +use crate::shm::TransportShmConfig; + use crate::{ common::batch::BatchConfig, unicast::{ @@ -79,7 +82,7 @@ struct RecvInitSynOut { other_zid: ZenohId, other_whatami: WhatAmI, #[cfg(feature = "shared-memory")] - ext_shm: Challenge, + ext_shm: Option, } // InitAck @@ -90,10 +93,12 @@ struct SendInitAckIn { other_zid: ZenohId, other_whatami: WhatAmI, #[cfg(feature = "shared-memory")] - ext_shm: Challenge, + ext_shm: Option, } struct SendInitAckOut { cookie_nonce: u64, + #[cfg(feature = "shared-memory")] + ext_shm: Option, } // OpenSyn @@ -126,7 +131,8 @@ struct AcceptLink<'a> { #[cfg(feature = "transport_multilink")] ext_mlink: ext::multilink::MultiLinkFsm<'a>, #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::ShmFsm<'a>, + // Will be None if SHM operation is disabled by Config + ext_shm: Option>, #[cfg(feature = "transport_auth")] ext_auth: ext::auth::AuthFsm<'a>, ext_lowlatency: ext::lowlatency::LowLatencyFsm<'a>, @@ -208,11 +214,13 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Extension Shm #[cfg(feature = "shared-memory")] - let ext_shm = self - .ext_shm - .recv_init_syn((&mut state.transport.ext_shm, init_syn.ext_shm)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; + let ext_shm = match &self.ext_shm { + Some(my_shm) => my_shm + .recv_init_syn(init_syn.ext_shm) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?, + _ => None, + }; // Extension Auth #[cfg(feature = "transport_auth")] @@ -267,14 +275,14 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm - let ext_shm = zcondfeat!( - "shared-memory", - self.ext_shm - .send_init_ack((&mut state.transport.ext_shm, input.ext_shm)) + #[cfg(feature = "shared-memory")] + let ext_shm = match self.ext_shm.as_ref() { + Some(my_shm) => my_shm + .send_init_ack(&input.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, - None - ); + _ => None, + }; // Extension Auth let ext_auth = zcondfeat!( @@ -357,6 +365,7 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { batch_size: state.transport.batch_size, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -371,7 +380,11 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; - let output = SendInitAckOut { cookie_nonce }; + let output = SendInitAckOut { + cookie_nonce, + #[cfg(feature = "shared-memory")] + ext_shm: input.ext_shm, + }; Ok(output) } @@ -464,10 +477,12 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Extension Shm #[cfg(feature = "shared-memory")] - self.ext_shm - .recv_open_syn((&mut state.transport.ext_shm, open_syn.ext_shm)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; + if let Some(my_shm) = self.ext_shm.as_ref() { + my_shm + .recv_open_syn((&mut state.transport.ext_shm, open_syn.ext_shm)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + } // Extension Auth #[cfg(feature = "transport_auth")] @@ -528,14 +543,14 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm - let ext_shm = zcondfeat!( - "shared-memory", - self.ext_shm - .send_open_ack(&mut state.transport.ext_shm) + #[cfg(feature = "shared-memory")] + let ext_shm = match self.ext_shm.as_ref() { + Some(my_shm) => my_shm + .send_open_ack(&state.transport.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, - None - ); + None => None, + }; // Extension Auth let ext_auth = zcondfeat!( @@ -574,6 +589,7 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { lease: input.mine_lease, initial_sn: mine_initial_sn, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -607,7 +623,12 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - cipher: &manager.cipher, ext_qos: ext::qos::QoSFsm::new(), #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::ShmFsm::new(&manager.state.unicast.shm), + ext_shm: manager + .state + .unicast + .auth_shm + .as_ref() + .map(ext::shm::ShmFsm::new), #[cfg(feature = "transport_multilink")] ext_mlink: manager.state.unicast.multilink.fsm(&manager.prng), #[cfg(feature = "transport_auth")] @@ -644,7 +665,7 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - .multilink .accept(manager.config.unicast.max_links > 1), #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::StateAccept::new(manager.config.unicast.is_shm), + ext_shm: ext::shm::StateAccept::new(), ext_lowlatency: ext::lowlatency::StateAccept::new( manager.config.unicast.is_lowlatency, ), @@ -708,7 +729,10 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - #[cfg(feature = "transport_multilink")] multilink: state.transport.ext_mlink.multilink(), #[cfg(feature = "shared-memory")] - is_shm: state.transport.ext_shm.is_shm(), + shm: match state.transport.ext_shm.negotiated_to_use_shm() { + true => iack_out.ext_shm.map(TransportShmConfig::new), + false => None, + }, is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), }; diff --git a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs index 2aec0cf508..1287095a51 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs @@ -11,31 +11,108 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::{ - establishment::{AcceptFsm, OpenFsm}, - shared_memory_unicast::{Challenge, SharedMemoryUnicast}, -}; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; use async_trait::async_trait; -use std::convert::TryInto; +use rand::{Rng, SeedableRng}; +use std::ops::Deref; use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, writer::{DidntWrite, HasWriter, Writer}, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_core::zasyncwrite; +use zenoh_core::bail; +use zenoh_crypto::PseudoRng; use zenoh_protocol::transport::{init, open}; -use zenoh_result::{zerror, Error as ZError}; -use zenoh_shm::SharedMemoryBufInfo; +use zenoh_result::{zerror, Error as ZError, ZResult}; +use zenoh_shm::{api::common::types::ProtocolID, posix_shm::array::ArrayInSHM}; + +/*************************************/ +/* Segment */ +/*************************************/ +const AUTH_SEGMENT_PREFIX: &str = "auth"; + +pub(crate) type AuthSegmentID = u32; +pub(crate) type AuthChallenge = u64; + +#[derive(Debug)] +pub struct AuthSegment { + array: ArrayInSHM, +} + +impl AuthSegment { + pub fn create(challenge: AuthChallenge, shm_protocols: &[ProtocolID]) -> ZResult { + let array = ArrayInSHM::::create( + 1 + shm_protocols.len(), + AUTH_SEGMENT_PREFIX, + )?; + unsafe { + (*array.elem_mut(0)) = challenge; + for elem in 1..array.elem_count() { + (*array.elem_mut(elem)) = shm_protocols[elem - 1] as u64; + } + }; + Ok(Self { array }) + } + + pub fn open(id: AuthSegmentID) -> ZResult { + let array = ArrayInSHM::open(id, AUTH_SEGMENT_PREFIX)?; + Ok(Self { array }) + } + + pub fn challenge(&self) -> AuthChallenge { + unsafe { *self.array.elem(0) } + } + + pub fn protocols(&self) -> Vec { + let mut result = vec![]; + for elem in 1..self.array.elem_count() { + result.push(unsafe { *self.array.elem(elem) as u32 }); + } + result + } + + pub fn id(&self) -> AuthSegmentID { + self.array.id() + } +} + +/*************************************/ +/* Authenticator */ +/*************************************/ +pub(crate) struct AuthUnicast { + segment: AuthSegment, +} + +impl Deref for AuthUnicast { + type Target = AuthSegment; + + fn deref(&self) -> &Self::Target { + &self.segment + } +} + +impl AuthUnicast { + pub fn new(shm_protocols: &[ProtocolID]) -> ZResult { + // Create a challenge for session establishment + let mut prng = PseudoRng::from_entropy(); + let nonce = prng.gen(); + + // allocate SHM segment with challenge + let segment = AuthSegment::create(nonce, shm_protocols)?; + + Ok(Self { segment }) + } +} /*************************************/ /* InitSyn */ /*************************************/ /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// ~ ShmMemBufInfo ~ +/// ~ Segment id ~ /// +---------------+ pub(crate) struct InitSyn { - pub(crate) alice_info: SharedMemoryBufInfo, + pub(crate) alice_segment: AuthSegmentID, } // Codec @@ -46,7 +123,7 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &InitSyn) -> Self::Output { - self.write(&mut *writer, &x.alice_info)?; + self.write(&mut *writer, &x.alice_segment)?; Ok(()) } } @@ -58,8 +135,8 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let alice_info: SharedMemoryBufInfo = self.read(&mut *reader)?; - Ok(InitSyn { alice_info }) + let alice_segment = self.read(&mut *reader)?; + Ok(InitSyn { alice_segment }) } } @@ -70,11 +147,11 @@ where /// +-+-+-+-+-+-+-+-+ /// ~ challenge ~ /// +---------------+ -/// ~ ShmMemBufInfo ~ +/// ~ Segment id ~ /// +---------------+ struct InitAck { alice_challenge: u64, - bob_info: SharedMemoryBufInfo, + bob_segment: AuthSegmentID, } impl WCodec<&InitAck, &mut W> for Zenoh080 @@ -85,7 +162,7 @@ where fn write(self, writer: &mut W, x: &InitAck) -> Self::Output { self.write(&mut *writer, x.alice_challenge)?; - self.write(&mut *writer, &x.bob_info)?; + self.write(&mut *writer, &x.bob_segment)?; Ok(()) } } @@ -98,10 +175,10 @@ where fn read(self, reader: &mut R) -> Result { let alice_challenge: u64 = self.read(&mut *reader)?; - let bob_info: SharedMemoryBufInfo = self.read(&mut *reader)?; + let bob_segment = self.read(&mut *reader)?; Ok(InitAck { alice_challenge, - bob_info, + bob_segment, }) } } @@ -124,11 +201,11 @@ where // Extension Fsm pub(crate) struct ShmFsm<'a> { - inner: &'a SharedMemoryUnicast, + inner: &'a AuthUnicast, } impl<'a> ShmFsm<'a> { - pub(crate) const fn new(inner: &'a SharedMemoryUnicast) -> Self { + pub(crate) const fn new(inner: &'a AuthUnicast) -> Self { Self { inner } } } @@ -136,18 +213,29 @@ impl<'a> ShmFsm<'a> { /*************************************/ /* OPEN */ /*************************************/ -#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct StateOpen { - is_shm: bool, + // false by default, will be switched to true at the end of open_ack + negotiated_to_use_shm: bool, } impl StateOpen { - pub(crate) const fn new(is_shm: bool) -> Self { - Self { is_shm } + pub(crate) const fn new() -> Self { + Self { + negotiated_to_use_shm: false, + } } - pub(crate) const fn is_shm(&self) -> bool { - self.is_shm + pub(crate) const fn negotiated_to_use_shm(&self) -> bool { + self.negotiated_to_use_shm + } + + #[cfg(test)] + pub(crate) fn rand() -> Self { + let mut rng = rand::thread_rng(); + Self { + negotiated_to_use_shm: rng.gen_bool(0.5), + } } } @@ -159,16 +247,12 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { type SendInitSynOut = Option; async fn send_init_syn( self, - state: Self::SendInitSynIn, + _state: Self::SendInitSynIn, ) -> Result { const S: &str = "Shm extension - Send InitSyn."; - if !state.is_shm() { - return Ok(None); - } - let init_syn = InitSyn { - alice_info: self.inner.challenge.info.clone(), + alice_segment: self.inner.id(), }; let codec = Zenoh080::new(); @@ -181,22 +265,16 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { Ok(Some(init::ext::Shm::new(buff.into()))) } - type RecvInitAckIn = (&'a mut StateOpen, Option); - type RecvInitAckOut = Challenge; + type RecvInitAckIn = Option; + type RecvInitAckOut = Option; async fn recv_init_ack( self, - input: Self::RecvInitAckIn, + mut input: Self::RecvInitAckIn, ) -> Result { const S: &str = "Shm extension - Recv InitAck."; - let (state, mut ext) = input; - if !state.is_shm() { - return Ok(0); - } - - let Some(ext) = ext.take() else { - state.is_shm = false; - return Ok(0); + let Some(ext) = input.take() else { + return Ok(None); }; // Decode the extension @@ -204,18 +282,11 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { let mut reader = ext.value.reader(); let Ok(init_ack): Result = codec.read(&mut reader) else { tracing::trace!("{} Decoding error.", S); - state.is_shm = false; - return Ok(0); + return Ok(None); }; // Alice challenge as seen by Alice - let bytes: [u8; std::mem::size_of::()] = self - .inner - .challenge - .as_slice() - .try_into() - .map_err(|e| zerror!("{}", e))?; - let challenge = u64::from_le_bytes(bytes); + let challenge = self.inner.challenge(); // Verify that Bob has correctly read Alice challenge if challenge != init_ack.alice_challenge { @@ -225,35 +296,22 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { init_ack.alice_challenge, challenge ); - state.is_shm = false; - return Ok(0); + return Ok(None); } - // Read Bob's SharedMemoryBuf - let shm_buff = match zasyncwrite!(self.inner.reader).read_shmbuf(&init_ack.bob_info) { + // Read Bob's SHM Segment + let bob_segment = match AuthSegment::open(init_ack.bob_segment) { Ok(buff) => buff, Err(e) => { tracing::trace!("{} {}", S, e); - state.is_shm = false; - return Ok(0); - } - }; - - // Bob challenge as seen by Alice - let bytes: [u8; std::mem::size_of::()] = match shm_buff.as_slice().try_into() { - Ok(bytes) => bytes, - Err(_) => { - tracing::trace!("{} Failed to read remote Shm.", S); - state.is_shm = false; - return Ok(0); + return Ok(None); } }; - let bob_challenge = u64::from_le_bytes(bytes); - Ok(bob_challenge) + Ok(Some(bob_segment)) } - type SendOpenSynIn = (&'a StateOpen, Self::RecvInitAckOut); + type SendOpenSynIn = &'a Self::RecvInitAckOut; type SendOpenSynOut = Option; async fn send_open_syn( self, @@ -261,12 +319,9 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { ) -> Result { // const S: &str = "Shm extension - Send OpenSyn."; - let (state, bob_challenge) = input; - if !state.is_shm() { - return Ok(None); - } - - Ok(Some(open::ext::Shm::new(bob_challenge))) + Ok(input + .as_ref() + .map(|val| open::ext::Shm::new(val.challenge()))) } type RecvOpenAckIn = (&'a mut StateOpen, Option); @@ -278,22 +333,17 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { const S: &str = "Shm extension - Recv OpenAck."; let (state, mut ext) = input; - if !state.is_shm() { - return Ok(()); - } let Some(ext) = ext.take() else { - state.is_shm = false; return Ok(()); }; if ext.value != 1 { tracing::trace!("{} Invalid value.", S); - state.is_shm = false; return Ok(()); } - state.is_shm = true; + state.negotiated_to_use_shm = true; Ok(()) } } @@ -302,27 +352,7 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { /* ACCEPT */ /*************************************/ -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub(crate) struct StateAccept { - is_shm: bool, -} - -impl StateAccept { - pub(crate) const fn new(is_shm: bool) -> Self { - Self { is_shm } - } - - pub(crate) const fn is_shm(&self) -> bool { - self.is_shm - } - - #[cfg(test)] - pub(crate) fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - Self::new(rng.gen_bool(0.5)) - } -} +pub(crate) type StateAccept = StateOpen; // Codec impl WCodec<&StateAccept, &mut W> for Zenoh080 @@ -332,8 +362,8 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &StateAccept) -> Self::Output { - let is_shm = u8::from(x.is_shm); - self.write(&mut *writer, is_shm)?; + let negotiated_to_use_shm = u8::from(x.negotiated_to_use_shm); + self.write(&mut *writer, negotiated_to_use_shm)?; Ok(()) } } @@ -345,9 +375,11 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let is_shm: u8 = self.read(&mut *reader)?; - let is_shm = is_shm == 1; - Ok(StateAccept { is_shm }) + let negotiated_to_use_shm: u8 = self.read(&mut *reader)?; + let negotiated_to_use_shm: bool = negotiated_to_use_shm == 1; + Ok(StateAccept { + negotiated_to_use_shm, + }) } } @@ -355,22 +387,16 @@ where impl<'a> AcceptFsm for &'a ShmFsm<'a> { type Error = ZError; - type RecvInitSynIn = (&'a mut StateAccept, Option); - type RecvInitSynOut = Challenge; + type RecvInitSynIn = Option; + type RecvInitSynOut = Option; async fn recv_init_syn( self, input: Self::RecvInitSynIn, ) -> Result { const S: &str = "Shm extension - Recv InitSyn."; - let (state, mut ext) = input; - if !state.is_shm() { - return Ok(0); - } - - let Some(ext) = ext.take() else { - state.is_shm = false; - return Ok(0); + let Some(ext) = input.as_ref() else { + return Ok(None); }; // Decode the extension @@ -378,35 +404,16 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { let mut reader = ext.value.reader(); let Ok(init_syn): Result = codec.read(&mut reader) else { tracing::trace!("{} Decoding error.", S); - state.is_shm = false; - return Ok(0); + bail!(""); }; - // Read Alice's SharedMemoryBuf - let shm_buff = match zasyncwrite!(self.inner.reader).read_shmbuf(&init_syn.alice_info) { - Ok(buff) => buff, - Err(e) => { - tracing::trace!("{} {}", S, e); - state.is_shm = false; - return Ok(0); - } - }; + // Read Alice's SHM Segment + let alice_segment = AuthSegment::open(init_syn.alice_segment)?; - // Alice challenge as seen by Bob - let bytes: [u8; std::mem::size_of::()] = match shm_buff.as_slice().try_into() { - Ok(bytes) => bytes, - Err(_) => { - tracing::trace!("{} Failed to read remote Shm.", S); - state.is_shm = false; - return Ok(0); - } - }; - let alice_challenge = u64::from_le_bytes(bytes); - - Ok(alice_challenge) + Ok(Some(alice_segment)) } - type SendInitAckIn = (&'a StateAccept, Self::RecvInitSynOut); + type SendInitAckIn = &'a Self::RecvInitSynOut; type SendInitAckOut = Option; async fn send_init_ack( self, @@ -414,14 +421,13 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { ) -> Result { const S: &str = "Shm extension - Send InitAck."; - let (state, alice_challenge) = input; - if !state.is_shm() { + let Some(alice_segment) = input.as_ref() else { return Ok(None); - } + }; let init_syn = InitAck { - alice_challenge, - bob_info: self.inner.challenge.info.clone(), + alice_challenge: alice_segment.challenge(), + bob_segment: self.inner.id(), }; let codec = Zenoh080::new(); @@ -443,23 +449,13 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { const S: &str = "Shm extension - Recv OpenSyn."; let (state, mut ext) = input; - if !state.is_shm() { - return Ok(()); - } let Some(ext) = ext.take() else { - state.is_shm = false; return Ok(()); }; // Bob challenge as seen by Bob - let bytes: [u8; std::mem::size_of::()] = self - .inner - .challenge - .as_slice() - .try_into() - .map_err(|e| zerror!("{}", e))?; - let challenge = u64::from_le_bytes(bytes); + let challenge = self.inner.challenge(); // Verify that Alice has correctly read Bob challenge let bob_challnge = ext.value; @@ -470,26 +466,25 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { bob_challnge, challenge ); - state.is_shm = false; return Ok(()); } + state.negotiated_to_use_shm = true; + Ok(()) } - type SendOpenAckIn = &'a mut StateAccept; + type SendOpenAckIn = &'a StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( self, - state: Self::SendOpenAckIn, + input: Self::SendOpenAckIn, ) -> Result { // const S: &str = "Shm extension - Send OpenAck."; - if !state.is_shm() { - return Ok(None); - } - - state.is_shm = true; - Ok(Some(open::ext::Shm::new(1))) + Ok(match input.negotiated_to_use_shm { + true => Some(open::ext::Shm::new(1)), + false => None, + }) } } diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index bb5db2336e..40aa959d10 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -12,7 +12,9 @@ // ZettaScale Zenoh Team, // #[cfg(feature = "shared-memory")] -use crate::unicast::shared_memory_unicast::Challenge; +use super::ext::shm::AuthSegment; +#[cfg(feature = "shared-memory")] +use crate::shm::TransportShmConfig; use crate::{ common::batch::BatchConfig, unicast::{ @@ -81,7 +83,7 @@ struct RecvInitAckOut { other_whatami: WhatAmI, other_cookie: ZSlice, #[cfg(feature = "shared-memory")] - ext_shm: Challenge, + ext_shm: Option, } // OpenSyn @@ -91,11 +93,13 @@ struct SendOpenSynIn { other_zid: ZenohId, other_cookie: ZSlice, #[cfg(feature = "shared-memory")] - ext_shm: Challenge, + ext_shm: Option, } struct SendOpenSynOut { mine_initial_sn: TransportSn, + #[cfg(feature = "shared-memory")] + ext_shm: Option, } // OpenAck @@ -110,7 +114,7 @@ struct OpenLink<'a> { #[cfg(feature = "transport_multilink")] ext_mlink: ext::multilink::MultiLinkFsm<'a>, #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::ShmFsm<'a>, + ext_shm: Option>, #[cfg(feature = "transport_auth")] ext_auth: ext::auth::AuthFsm<'a>, ext_lowlatency: ext::lowlatency::LowLatencyFsm<'a>, @@ -138,14 +142,14 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm - let ext_shm = zcondfeat!( - "shared-memory", - self.ext_shm + #[cfg(feature = "shared-memory")] + let ext_shm = match self.ext_shm.as_ref() { + Some(ext) => ext .send_init_syn(&state.transport.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, - None - ); + None => None, + }; // Extension Auth let ext_auth = zcondfeat!( @@ -191,6 +195,7 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { batch_size: state.transport.batch_size, resolution: state.transport.resolution, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -295,11 +300,13 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { // Extension Shm #[cfg(feature = "shared-memory")] - let shm_challenge = self - .ext_shm - .recv_init_ack((&mut state.transport.ext_shm, init_ack.ext_shm)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; + let shm_segment = match self.ext_shm.as_ref() { + Some(ext) => ext + .recv_init_ack(init_ack.ext_shm) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?, + None => None, + }; // Extension Auth #[cfg(feature = "transport_auth")] @@ -333,7 +340,7 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { other_whatami: init_ack.whatami, other_cookie: init_ack.cookie, #[cfg(feature = "shared-memory")] - ext_shm: shm_challenge, + ext_shm: shm_segment, }; Ok(output) } @@ -354,14 +361,14 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm - let ext_shm = zcondfeat!( - "shared-memory", - self.ext_shm - .send_open_syn((&state.transport.ext_shm, input.ext_shm)) + #[cfg(feature = "shared-memory")] + let ext_shm = match self.ext_shm.as_ref() { + Some(ext_shm) => ext_shm + .send_open_syn(&input.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, - None - ); + None => None, + }; // Extension Auth let ext_auth = zcondfeat!( @@ -408,6 +415,7 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { initial_sn: mine_initial_sn, cookie: input.other_cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -421,7 +429,11 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; - let output = SendOpenSynOut { mine_initial_sn }; + let output = SendOpenSynOut { + mine_initial_sn, + #[cfg(feature = "shared-memory")] + ext_shm: input.ext_shm, + }; Ok(output) } @@ -471,10 +483,11 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { // Extension Shm #[cfg(feature = "shared-memory")] - self.ext_shm - .recv_open_ack((&mut state.transport.ext_shm, open_ack.ext_shm)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; + if let Some(ext) = self.ext_shm.as_ref() { + ext.recv_open_ack((&mut state.transport.ext_shm, open_ack.ext_shm)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))? + }; // Extension Auth #[cfg(feature = "transport_auth")] @@ -531,7 +544,12 @@ pub(crate) async fn open_link( #[cfg(feature = "transport_multilink")] ext_mlink: manager.state.unicast.multilink.fsm(&manager.prng), #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::ShmFsm::new(&manager.state.unicast.shm), + ext_shm: manager + .state + .unicast + .auth_shm + .as_ref() + .map(ext::shm::ShmFsm::new), #[cfg(feature = "transport_auth")] ext_auth: manager.state.unicast.authenticator.fsm(&manager.prng), ext_lowlatency: ext::lowlatency::LowLatencyFsm::new(), @@ -555,7 +573,7 @@ pub(crate) async fn open_link( .multilink .open(manager.config.unicast.max_links > 1), #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::StateOpen::new(manager.config.unicast.is_shm), + ext_shm: ext::shm::StateOpen::new(), ext_lowlatency: ext::lowlatency::StateOpen::new(manager.config.unicast.is_lowlatency), }, @@ -619,7 +637,10 @@ pub(crate) async fn open_link( #[cfg(feature = "transport_multilink")] multilink: state.transport.ext_mlink.multilink(), #[cfg(feature = "shared-memory")] - is_shm: state.transport.ext_shm.is_shm(), + shm: match state.transport.ext_shm.negotiated_to_use_shm() { + true => osyn_out.ext_shm.map(TransportShmConfig::new), + false => None, + }, is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), }; diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs index e03201e0c5..1c9c190aae 100644 --- a/io/zenoh-transport/src/unicast/link.rs +++ b/io/zenoh-transport/src/unicast/link.rs @@ -207,7 +207,7 @@ impl TransportLinkUnicastRx { pub async fn recv_batch(&mut self, buff: C) -> ZResult where C: Fn() -> T + Copy, - T: ZSliceBuffer + 'static, + T: AsMut<[u8]> + ZSliceBuffer + 'static, { const ERR: &str = "Read error from link: "; @@ -220,14 +220,14 @@ impl TransportLinkUnicastRx { // Read the bytes let slice = into - .as_mut_slice() + .as_mut() .get_mut(len.len()..len.len() + l) .ok_or_else(|| zerror!("{ERR}{self}. Invalid batch length or buffer size."))?; self.link.read_exact(slice).await?; len.len() + l } else { // Read the bytes - self.link.read(into.as_mut_slice()).await? + self.link.read(into.as_mut()).await? }; // tracing::trace!("RBytes: {:02x?}", &into.as_slice()[0..end]); diff --git a/io/zenoh-transport/src/unicast/lowlatency/rx.rs b/io/zenoh-transport/src/unicast/lowlatency/rx.rs index 4be94cc1a0..de0b62354f 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/rx.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/rx.rs @@ -35,8 +35,8 @@ impl TransportUnicastLowlatency { if let Some(callback) = callback.as_ref() { #[cfg(feature = "shared-memory")] { - if self.config.is_shm { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shm().reader)?; + if self.config.shm.is_some() { + crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr)?; } } callback.handle_message(msg) diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index dcc9fc8476..726d21bb84 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -193,7 +193,7 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { #[cfg(feature = "shared-memory")] fn is_shm(&self) -> bool { - self.config.is_shm + self.config.shm.is_some() } fn is_qos(&self) -> bool { diff --git a/io/zenoh-transport/src/unicast/lowlatency/tx.rs b/io/zenoh-transport/src/unicast/lowlatency/tx.rs index 38751eb61d..d573544340 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/tx.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/tx.rs @@ -20,6 +20,9 @@ use zenoh_protocol::{ use zenoh_result::bail; use zenoh_result::ZResult; +#[cfg(feature = "shared-memory")] +use crate::shm::map_zmsg_to_partner; + impl TransportUnicastLowlatency { #[allow(unused_mut)] // When feature "shared-memory" is not enabled #[allow(clippy::let_and_return)] // When feature "stats" is not enabled @@ -27,12 +30,7 @@ impl TransportUnicastLowlatency { pub(crate) fn internal_schedule(&self, mut msg: NetworkMessage) -> ZResult<()> { #[cfg(feature = "shared-memory")] { - let res = if self.config.is_shm { - crate::shm::map_zmsg_to_shminfo(&mut msg) - } else { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shm().reader) - }; - if let Err(e) = res { + if let Err(e) = map_zmsg_to_partner(&mut msg, &self.config.shm) { bail!("Failed SHM conversion: {}", e); } } diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 93e9d4da80..6844f30163 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // #[cfg(feature = "shared-memory")] -use super::shared_memory_unicast::SharedMemoryUnicast; +use super::establishment::ext::shm::AuthUnicast; use super::{link::LinkUnicastWithOpenAck, transport_unicast_inner::InitTransportResult}; #[cfg(feature = "transport_auth")] use crate::unicast::establishment::ext::auth::Auth; @@ -49,6 +49,8 @@ use zenoh_protocol::{ transport::{close, TransportSn}, }; use zenoh_result::{bail, zerror, ZResult}; +#[cfg(feature = "shared-memory")] +use zenoh_shm::reader::SharedMemoryReader; /*************************************/ /* TRANSPORT CONFIG */ @@ -82,9 +84,10 @@ pub struct TransportManagerStateUnicast { // Active authenticators #[cfg(feature = "transport_auth")] pub(super) authenticator: Arc, - // Shared memory + // SHM probing + // Option will be None if SHM is disabled by Config #[cfg(feature = "shared-memory")] - pub(super) shm: Arc, + pub(super) auth_shm: Option, } pub struct TransportManagerParamsUnicast { @@ -211,6 +214,7 @@ impl TransportManagerBuilderUnicast { pub fn build( self, #[allow(unused)] prng: &mut PseudoRng, // Required for #[cfg(feature = "transport_multilink")] + #[cfg(feature = "shared-memory")] shm_reader: &SharedMemoryReader, ) -> ZResult { if self.is_qos && self.is_lowlatency { bail!("'qos' and 'lowlatency' options are incompatible"); @@ -238,10 +242,15 @@ impl TransportManagerBuilderUnicast { transports: Arc::new(AsyncMutex::new(HashMap::new())), #[cfg(feature = "transport_multilink")] multilink: Arc::new(MultiLink::make(prng)?), - #[cfg(feature = "shared-memory")] - shm: Arc::new(SharedMemoryUnicast::make()?), #[cfg(feature = "transport_auth")] authenticator: Arc::new(self.authenticator), + #[cfg(feature = "shared-memory")] + auth_shm: match self.is_shm { + true => Some(AuthUnicast::new( + shm_reader.supported_protocols().as_slice(), + )?), + false => None, + }, }; let params = TransportManagerParamsUnicast { config, state }; @@ -288,11 +297,6 @@ impl TransportManager { TransportManagerBuilderUnicast::default() } - #[cfg(feature = "shared-memory")] - pub(crate) fn shm(&self) -> &Arc { - &self.state.unicast.shm - } - pub async fn close_unicast(&self) { tracing::trace!("TransportManagerUnicast::clear())"); @@ -590,14 +594,14 @@ impl TransportManager { "shared-memory", { tracing::debug!( - "New transport opened between {} and {} - whatami: {}, sn resolution: {:?}, initial sn: {:?}, qos: {}, shm: {}, multilink: {}, lowlatency: {}", + "New transport opened between {} and {} - whatami: {}, sn resolution: {:?}, initial sn: {:?}, qos: {}, shm: {:?}, multilink: {}, lowlatency: {}", self.config.zid, config.zid, config.whatami, config.sn_resolution, config.tx_initial_sn, config.is_qos, - config.is_shm, + config.shm, is_multilink, config.is_lowlatency ); diff --git a/io/zenoh-transport/src/unicast/mod.rs b/io/zenoh-transport/src/unicast/mod.rs index 55226f287c..630b56aa1b 100644 --- a/io/zenoh-transport/src/unicast/mod.rs +++ b/io/zenoh-transport/src/unicast/mod.rs @@ -22,7 +22,7 @@ pub(crate) mod universal; pub mod test_helpers; #[cfg(feature = "shared-memory")] -pub(crate) mod shared_memory_unicast; +use crate::shm::TransportShmConfig; use self::transport_unicast_inner::TransportUnicastTrait; @@ -54,7 +54,7 @@ pub(crate) struct TransportConfigUnicast { #[cfg(feature = "transport_multilink")] pub(crate) multilink: Option, #[cfg(feature = "shared-memory")] - pub(crate) is_shm: bool, + pub(crate) shm: Option, pub(crate) is_lowlatency: bool, } diff --git a/io/zenoh-transport/src/unicast/shared_memory_unicast.rs b/io/zenoh-transport/src/unicast/shared_memory_unicast.rs deleted file mode 100644 index 881e6886d2..0000000000 --- a/io/zenoh-transport/src/unicast/shared_memory_unicast.rs +++ /dev/null @@ -1,57 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use rand::{Rng, SeedableRng}; -use tokio::sync::RwLock; -use zenoh_core::zerror; -use zenoh_crypto::PseudoRng; -use zenoh_result::ZResult; -use zenoh_shm::{SharedMemoryBuf, SharedMemoryManager, SharedMemoryReader}; - -pub(crate) type Challenge = u64; -const NAME: &str = "zshm"; - -/*************************************/ -/* Authenticator */ -/*************************************/ -pub(crate) struct SharedMemoryUnicast { - // Rust guarantees that fields are dropped in the order of declaration. - // Buffer needs to be dropped before the manager. - pub(crate) challenge: SharedMemoryBuf, - pub(crate) _manager: SharedMemoryManager, - pub(crate) reader: RwLock, -} - -unsafe impl Sync for SharedMemoryUnicast {} - -impl SharedMemoryUnicast { - pub fn make() -> ZResult { - // Create a challenge for session establishment - let mut prng = PseudoRng::from_entropy(); - let nonce = prng.gen::(); - let size = std::mem::size_of::(); - - let mut _manager = SharedMemoryManager::make(format!("{NAME}.{nonce}"), size)?; - - let mut challenge = _manager.alloc(size).map_err(|e| zerror!("{e}"))?; - let slice = unsafe { challenge.as_mut_slice() }; - slice[0..size].copy_from_slice(&nonce.to_le_bytes()); - - let shmauth = SharedMemoryUnicast { - challenge, - _manager, - reader: RwLock::new(SharedMemoryReader::new()), - }; - Ok(shmauth) - } -} diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index 9a85ee9a46..8d5d703be1 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -236,7 +236,7 @@ async fn rx_task( where T: ZSliceBuffer + 'static, F: Fn() -> T, - RecyclingObject: ZSliceBuffer, + RecyclingObject: AsMut<[u8]> + ZSliceBuffer, { let batch = link .recv_batch(|| pool.try_take().unwrap_or_else(|| pool.alloc())) diff --git a/io/zenoh-transport/src/unicast/universal/rx.rs b/io/zenoh-transport/src/unicast/universal/rx.rs index 83b2884a59..3edf57f507 100644 --- a/io/zenoh-transport/src/unicast/universal/rx.rs +++ b/io/zenoh-transport/src/unicast/universal/rx.rs @@ -42,8 +42,8 @@ impl TransportUnicastUniversal { ) -> ZResult<()> { #[cfg(feature = "shared-memory")] { - if self.config.is_shm { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.state.unicast.shm.reader)?; + if self.config.shm.is_some() { + crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr)?; } } callback.handle_message(msg) diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index 58acd5c4b2..5f581673e9 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -333,7 +333,7 @@ impl TransportUnicastTrait for TransportUnicastUniversal { #[cfg(feature = "shared-memory")] fn is_shm(&self) -> bool { - self.config.is_shm + self.config.shm.is_some() } fn is_qos(&self) -> bool { diff --git a/io/zenoh-transport/src/unicast/universal/tx.rs b/io/zenoh-transport/src/unicast/universal/tx.rs index ffc162c0b4..a381bb4d29 100644 --- a/io/zenoh-transport/src/unicast/universal/tx.rs +++ b/io/zenoh-transport/src/unicast/universal/tx.rs @@ -15,6 +15,9 @@ use super::transport::TransportUnicastUniversal; use zenoh_core::zread; use zenoh_protocol::network::NetworkMessage; +#[cfg(feature = "shared-memory")] +use crate::shm::map_zmsg_to_partner; + impl TransportUnicastUniversal { fn schedule_on_link(&self, msg: NetworkMessage) -> bool { macro_rules! zpush { @@ -61,12 +64,7 @@ impl TransportUnicastUniversal { pub(crate) fn internal_schedule(&self, mut msg: NetworkMessage) -> bool { #[cfg(feature = "shared-memory")] { - let res = if self.config.is_shm { - crate::shm::map_zmsg_to_shminfo(&mut msg) - } else { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shm().reader) - }; - if let Err(e) = res { + if let Err(e) = map_zmsg_to_partner(&mut msg, &self.config.shm) { tracing::trace!("Failed SHM conversion: {}", e); return false; } diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs index b07666af9c..d5eb62c961 100644 --- a/io/zenoh-transport/tests/multicast_compression.rs +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -168,13 +168,19 @@ mod tests { // Open transport -> This should be accepted println!("Opening transport with {endpoint}"); let _ = ztimeout!(peer01_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!peer01_manager.get_transports_multicast().await.is_empty()); - println!("\t{:?}", peer01_manager.get_transports_multicast().await); + assert!(!ztimeout!(peer01_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer01_manager.get_transports_multicast()) + ); println!("Opening transport with {endpoint}"); let _ = ztimeout!(peer02_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!peer02_manager.get_transports_multicast().await.is_empty()); - println!("\t{:?}", peer02_manager.get_transports_multicast().await); + assert!(!ztimeout!(peer02_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer02_manager.get_transports_multicast()) + ); // Wait to for peer 01 and 02 to join each other ztimeout!(async { @@ -186,10 +192,8 @@ mod tests { tokio::time::sleep(SLEEP_COUNT).await; } }); - let peer01_transport = peer01_manager - .get_transport_multicast(&peer02_id) - .await - .unwrap(); + let peer01_transport = + ztimeout!(peer01_manager.get_transport_multicast(&peer02_id)).unwrap(); println!( "\tPeer01 peers: {:?}", peer01_transport.get_peers().unwrap() @@ -204,10 +208,8 @@ mod tests { tokio::time::sleep(SLEEP_COUNT).await; } }); - let peer02_transport = peer02_manager - .get_transport_multicast(&peer01_id) - .await - .unwrap(); + let peer02_transport = + ztimeout!(peer02_manager.get_transport_multicast(&peer01_id)).unwrap(); println!( "\tPeer02 peers: {:?}", peer02_transport.get_peers().unwrap() @@ -235,7 +237,7 @@ mod tests { // Close the peer01 transport println!("Closing transport with {endpoint}"); ztimeout!(peer01.transport.close()).unwrap(); - assert!(peer01.manager.get_transports_multicast().await.is_empty()); + assert!(ztimeout!(peer01.manager.get_transports_multicast()).is_empty()); ztimeout!(async { while !peer02.transport.get_peers().unwrap().is_empty() { tokio::time::sleep(SLEEP_COUNT).await; @@ -245,7 +247,7 @@ mod tests { // Close the peer02 transport println!("Closing transport with {endpoint}"); ztimeout!(peer02.transport.close()).unwrap(); - assert!(peer02.manager.get_transports_multicast().await.is_empty()); + assert!(ztimeout!(peer02.manager.get_transports_multicast()).is_empty()); // Wait a little bit tokio::time::sleep(SLEEP).await; diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index 472af837ea..20ceb49218 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -164,13 +164,19 @@ mod tests { // Open transport -> This should be accepted println!("Opening transport with {endpoint}"); let _ = ztimeout!(peer01_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!peer01_manager.get_transports_multicast().await.is_empty()); - println!("\t{:?}", peer01_manager.get_transports_multicast().await); + assert!(!ztimeout!(peer01_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer01_manager.get_transports_multicast()) + ); println!("Opening transport with {endpoint}"); let _ = ztimeout!(peer02_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!peer02_manager.get_transports_multicast().await.is_empty()); - println!("\t{:?}", peer02_manager.get_transports_multicast().await); + assert!(!ztimeout!(peer02_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer02_manager.get_transports_multicast()) + ); // Wait to for peer 01 and 02 to join each other ztimeout!(async { @@ -182,10 +188,8 @@ mod tests { tokio::time::sleep(SLEEP_COUNT).await; } }); - let peer01_transport = peer01_manager - .get_transport_multicast(&peer02_id) - .await - .unwrap(); + let peer01_transport = + ztimeout!(peer01_manager.get_transport_multicast(&peer02_id)).unwrap(); println!( "\tPeer01 peers: {:?}", peer01_transport.get_peers().unwrap() @@ -200,10 +204,8 @@ mod tests { tokio::time::sleep(SLEEP_COUNT).await; } }); - let peer02_transport = peer02_manager - .get_transport_multicast(&peer01_id) - .await - .unwrap(); + let peer02_transport = + ztimeout!(peer02_manager.get_transport_multicast(&peer01_id)).unwrap(); println!( "\tPeer02 peers: {:?}", peer02_transport.get_peers().unwrap() @@ -231,7 +233,7 @@ mod tests { // Close the peer01 transport println!("Closing transport with {endpoint}"); ztimeout!(peer01.transport.close()).unwrap(); - assert!(peer01.manager.get_transports_multicast().await.is_empty()); + assert!(ztimeout!(peer01.manager.get_transports_multicast()).is_empty()); ztimeout!(async { while !peer02.transport.get_peers().unwrap().is_empty() { tokio::time::sleep(SLEEP_COUNT).await; @@ -241,7 +243,7 @@ mod tests { // Close the peer02 transport println!("Closing transport with {endpoint}"); ztimeout!(peer02.transport.close()).unwrap(); - assert!(peer02.manager.get_transports_multicast().await.is_empty()); + assert!(ztimeout!(peer02.manager.get_transports_multicast()).is_empty()); // Wait a little bit tokio::time::sleep(SLEEP).await; diff --git a/io/zenoh-transport/tests/unicast_authenticator.rs b/io/zenoh-transport/tests/unicast_authenticator.rs index 1cad7c6a63..abcf011eed 100644 --- a/io/zenoh-transport/tests/unicast_authenticator.rs +++ b/io/zenoh-transport/tests/unicast_authenticator.rs @@ -291,10 +291,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { ]; let router_pri_key = RsaPrivateKey::from_components(n, e, d, primes).unwrap(); let mut auth_pubkey = AuthPubKey::new(router_pub_key.into(), router_pri_key.into()); - auth_pubkey - .add_pubkey(client01_pub_key.into()) - .await - .unwrap(); + ztimeout!(auth_pubkey.add_pubkey(client01_pub_key.into())).unwrap(); let mut auth = Auth::empty(); auth.set_pubkey(Some(auth_pubkey)); let unicast = make_basic_transport_manager_builder( @@ -315,7 +312,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { // Add the locator on the router ztimeout!(router_manager.add_listener(endpoint.clone())).unwrap(); println!("Transport Authenticator PubKey [1a2]"); - let locators = router_manager.get_listeners().await; + let locators = ztimeout!(router_manager.get_listeners()); println!("Transport Authenticator PubKey [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -344,10 +341,10 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { // Add client02 pubkey to the router let router_auth_handle = router_manager.get_auth_handle_unicast(); - zasyncwrite!(router_auth_handle.get_pubkey().unwrap()) - .add_pubkey(client02_pub_key.into()) - .await - .unwrap(); + ztimeout!( + zasyncwrite!(router_auth_handle.get_pubkey().unwrap()).add_pubkey(client02_pub_key.into()) + ) + .unwrap(); /* [3b] */ // Open a first transport from client02 to the router @@ -435,13 +432,9 @@ async fn auth_usrpwd(endpoint: &EndPoint, lowlatency_transport: bool) { let router_handler = Arc::new(SHRouterAuthenticator::new()); // Create the router transport manager let mut auth_usrpwd_router = AuthUsrPwd::new(None); - auth_usrpwd_router - .add_user(user01.clone().into(), password01.clone().into()) - .await + ztimeout!(auth_usrpwd_router.add_user(user01.clone().into(), password01.clone().into())) .unwrap(); - auth_usrpwd_router - .add_user(user03.clone().into(), password03.clone().into()) - .await + ztimeout!(auth_usrpwd_router.add_user(user03.clone().into(), password03.clone().into())) .unwrap(); let mut auth_router = Auth::empty(); auth_router.set_usrpwd(Some(auth_usrpwd_router)); @@ -520,7 +513,7 @@ async fn auth_usrpwd(endpoint: &EndPoint, lowlatency_transport: bool) { println!("Transport Authenticator UserPassword [1a1]: {res:?}"); assert!(res.is_ok()); println!("Transport Authenticator UserPassword [1a2]"); - let locators = router_manager.get_listeners().await; + let locators = ztimeout!(router_manager.get_listeners()); println!("Transport Authenticator UserPassword [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index d052ed9313..6f80e7dd58 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -215,10 +215,7 @@ mod tests { let _ = ztimeout!(client_manager.open_transport_unicast(e.clone())).unwrap(); } - let client_transport = client_manager - .get_transport_unicast(&router_id) - .await - .unwrap(); + let client_transport = ztimeout!(client_manager.get_transport_unicast(&router_id)).unwrap(); // Return the handlers ( @@ -357,13 +354,12 @@ mod tests { { let c_stats = client_transport.get_stats().unwrap().report(); println!("\tClient: {:?}", c_stats); - let r_stats = router_manager - .get_transport_unicast(&client_manager.config.zid) - .await - .unwrap() - .get_stats() - .map(|s| s.report()) - .unwrap(); + let r_stats = + ztimeout!(router_manager.get_transport_unicast(&client_manager.config.zid)) + .unwrap() + .get_stats() + .map(|s| s.report()) + .unwrap(); println!("\tRouter: {:?}", r_stats); } diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index 54b469d6ec..dc4c0fbd3d 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -145,7 +145,7 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer01_manager.get_listeners().await; + let locs = ztimeout!(peer01_manager.get_listeners()); println!("[Transport Peer 01b] => Getting endpoints: {c_end01:?} {locs:?}"); assert_eq!(c_end01.len(), locs.len()); @@ -173,11 +173,8 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec Waiting... OK"); // Verify that the transport has been correctly open - assert_eq!(peer01_manager.get_transports_unicast().await.len(), 1); - let s02 = peer01_manager - .get_transport_unicast(&c_zid02) - .await - .unwrap(); + assert_eq!(ztimeout!(peer01_manager.get_transports_unicast()).len(), 1); + let s02 = ztimeout!(peer01_manager.get_transport_unicast(&c_zid02)).unwrap(); assert_eq!( s02.get_links().unwrap().len(), c_end01.len() + c_end02.len() @@ -246,7 +243,7 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer02_manager.get_listeners().await; + let locs = ztimeout!(peer02_manager.get_listeners()); println!("[Transport Peer 02b] => Getting endpoints: {c_end02:?} {locs:?}"); assert_eq!(c_end02.len(), locs.len()); @@ -276,13 +273,10 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec Transports: {:?}", - peer02_manager.get_transports_unicast().await + ztimeout!(peer02_manager.get_transports_unicast()) ); - assert_eq!(peer02_manager.get_transports_unicast().await.len(), 1); - let s01 = peer02_manager - .get_transport_unicast(&c_zid01) - .await - .unwrap(); + assert_eq!(ztimeout!(peer02_manager.get_transports_unicast()).len(), 1); + let s01 = ztimeout!(peer02_manager.get_transport_unicast(&c_zid01)).unwrap(); assert_eq!( s01.get_links().unwrap().len(), c_end01.len() + c_end02.len() diff --git a/io/zenoh-transport/tests/unicast_defragmentation.rs b/io/zenoh-transport/tests/unicast_defragmentation.rs index 0e88f40cde..40a513b874 100644 --- a/io/zenoh-transport/tests/unicast_defragmentation.rs +++ b/io/zenoh-transport/tests/unicast_defragmentation.rs @@ -64,10 +64,7 @@ async fn run(endpoint: &EndPoint, channel: Channel, msg_size: usize) { println!("Opening transport with {endpoint}"); let _ = ztimeout!(client_manager.open_transport_unicast(endpoint.clone())).unwrap(); - let client_transport = client_manager - .get_transport_unicast(&router_id) - .await - .unwrap(); + let client_transport = ztimeout!(client_manager.get_transport_unicast(&router_id)).unwrap(); // Create the message to send let message: NetworkMessage = Push { diff --git a/io/zenoh-transport/tests/unicast_intermittent.rs b/io/zenoh-transport/tests/unicast_intermittent.rs index 5588612c65..14670bf532 100644 --- a/io/zenoh-transport/tests/unicast_intermittent.rs +++ b/io/zenoh-transport/tests/unicast_intermittent.rs @@ -220,7 +220,7 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) // Add a listener to the router println!("\nTransport Intermittent [1a1]"); let _ = ztimeout!(router_manager.add_listener(endpoint.clone())).unwrap(); - let locators = router_manager.get_listeners().await; + let locators = ztimeout!(router_manager.get_listeners()); println!("Transport Intermittent [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -228,7 +228,10 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) // Open a transport from client01 to the router let c_ses1 = ztimeout!(client01_manager.open_transport_unicast(endpoint.clone())).unwrap(); assert_eq!(c_ses1.get_links().unwrap().len(), 1); - assert_eq!(client01_manager.get_transports_unicast().await.len(), 1); + assert_eq!( + ztimeout!(client01_manager.get_transports_unicast()).len(), + 1 + ); assert_eq!(c_ses1.get_zid().unwrap(), router_id); /* [3] */ @@ -244,7 +247,10 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) let c_ses2 = ztimeout!(c_client02_manager.open_transport_unicast(c_endpoint.clone())).unwrap(); assert_eq!(c_ses2.get_links().unwrap().len(), 1); - assert_eq!(c_client02_manager.get_transports_unicast().await.len(), 1); + assert_eq!( + ztimeout!(c_client02_manager.get_transports_unicast()).len(), + 1 + ); assert_eq!(c_ses2.get_zid().unwrap(), c_router_id); tokio::time::sleep(SLEEP).await; @@ -269,7 +275,10 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) let c_ses3 = ztimeout!(c_client03_manager.open_transport_unicast(c_endpoint.clone())).unwrap(); assert_eq!(c_ses3.get_links().unwrap().len(), 1); - assert_eq!(c_client03_manager.get_transports_unicast().await.len(), 1); + assert_eq!( + ztimeout!(c_client03_manager.get_transports_unicast()).len(), + 1 + ); assert_eq!(c_ses3.get_zid().unwrap(), c_router_id); tokio::time::sleep(SLEEP).await; @@ -361,15 +370,15 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) /* [5] */ // Close the open transport on the client println!("Transport Intermittent [5a1]"); - for s in client01_manager.get_transports_unicast().await.iter() { + for s in ztimeout!(client01_manager.get_transports_unicast()).iter() { ztimeout!(s.close()).unwrap(); } println!("Transport Intermittent [5a2]"); - for s in client02_manager.get_transports_unicast().await.iter() { + for s in ztimeout!(client02_manager.get_transports_unicast()).iter() { ztimeout!(s.close()).unwrap(); } println!("Transport Intermittent [5a3]"); - for s in client03_manager.get_transports_unicast().await.iter() { + for s in ztimeout!(client03_manager.get_transports_unicast()).iter() { ztimeout!(s.close()).unwrap(); } diff --git a/io/zenoh-transport/tests/unicast_multilink.rs b/io/zenoh-transport/tests/unicast_multilink.rs index 7952d77b10..c06485fd06 100644 --- a/io/zenoh-transport/tests/unicast_multilink.rs +++ b/io/zenoh-transport/tests/unicast_multilink.rs @@ -134,7 +134,7 @@ mod tests { println!("Transport Open Close [1a1]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [1a2]"); - let locators = router_manager.get_listeners().await; + let locators = ztimeout!(router_manager.get_listeners()); println!("Transport Open Close [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -148,7 +148,7 @@ mod tests { assert!(res.is_ok()); let c_ses1 = res.unwrap(); println!("Transport Open Close [1d1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [1d2]: {transports:?}"); assert_eq!(transports.len(), 1); assert_eq!(c_ses1.get_zid().unwrap(), router_id); @@ -188,7 +188,7 @@ mod tests { assert!(res.is_ok()); let c_ses2 = res.unwrap(); println!("Transport Open Close [2b1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [2b2]: {transports:?}"); assert_eq!(transports.len(), 1); assert_eq!(c_ses2.get_zid().unwrap(), router_id); @@ -224,7 +224,7 @@ mod tests { println!("Transport Open Close [3a2]: {res:?}"); assert!(res.is_err()); println!("Transport Open Close [3b1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [3b2]: {transports:?}"); assert_eq!(transports.len(), 1); assert_eq!(c_ses1.get_zid().unwrap(), router_id); @@ -254,7 +254,7 @@ mod tests { println!("Transport Open Close [4a2]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [4b1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [4b2]: {transports:?}"); assert_eq!(transports.len(), 0); @@ -284,7 +284,7 @@ mod tests { assert!(res.is_ok()); let c_ses3 = res.unwrap(); println!("Transport Open Close [5b1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [5b2]: {transports:?}"); assert_eq!(transports.len(), 1); assert_eq!(c_ses3.get_zid().unwrap(), router_id); @@ -316,7 +316,7 @@ mod tests { assert!(res.is_ok()); let c_ses4 = res.unwrap(); println!("Transport Open Close [6b1]"); - let transports = client02_manager.get_transports_unicast().await; + let transports = ztimeout!(client02_manager.get_transports_unicast()); println!("Transport Open Close [6b2]: {transports:?}"); assert_eq!(transports.len(), 1); assert_eq!(c_ses4.get_zid().unwrap(), router_id); @@ -332,7 +332,7 @@ mod tests { println!("Transport Open Close [6d2]: {res:?}"); assert!(res.is_err()); println!("Transport Open Close [6e1]"); - let transports = client02_manager.get_transports_unicast().await; + let transports = ztimeout!(client02_manager.get_transports_unicast()); println!("Transport Open Close [6e2]: {transports:?}"); assert_eq!(transports.len(), 1); @@ -340,7 +340,7 @@ mod tests { println!("Transport Open Close [6f1]"); ztimeout!(async { tokio::time::sleep(SLEEP).await; - let transports = router_manager.get_transports_unicast().await; + let transports = ztimeout!(router_manager.get_transports_unicast()); assert_eq!(transports.len(), 2); let s = transports .iter() @@ -358,7 +358,7 @@ mod tests { println!("Transport Open Close [7a2]: {res:?}"); assert!(res.is_err()); println!("Transport Open Close [7b1]"); - let transports = client03_manager.get_transports_unicast().await; + let transports = ztimeout!(client03_manager.get_transports_unicast()); println!("Transport Open Close [7b2]: {transports:?}"); assert_eq!(transports.len(), 0); @@ -373,7 +373,7 @@ mod tests { println!("Transport Open Close [8b2]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [8c1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [8c2]: {transports:?}"); assert_eq!(transports.len(), 0); @@ -400,7 +400,7 @@ mod tests { assert!(res.is_ok()); let c_ses4 = res.unwrap(); println!("Transport Open Close [9b1]"); - let transports = client02_manager.get_transports_unicast().await; + let transports = ztimeout!(client02_manager.get_transports_unicast()); println!("Transport Open Close [9b2]: {transports:?}"); assert_eq!(transports.len(), 1); println!("Transport Open Close [9c1]"); @@ -434,7 +434,7 @@ mod tests { println!("Transport Open Close [9a2]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [9b1]"); - let transports = client02_manager.get_transports_unicast().await; + let transports = ztimeout!(client02_manager.get_transports_unicast()); println!("Transport Open Close [9b2]: {transports:?}"); assert_eq!(transports.len(), 0); diff --git a/io/zenoh-transport/tests/unicast_openclose.rs b/io/zenoh-transport/tests/unicast_openclose.rs index ec897b9382..799290aced 100644 --- a/io/zenoh-transport/tests/unicast_openclose.rs +++ b/io/zenoh-transport/tests/unicast_openclose.rs @@ -151,7 +151,7 @@ async fn openclose_transport( println!("Transport Open Close [1a1]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [1a2]"); - let locators = router_manager.get_listeners().await; + let locators = ztimeout!(router_manager.get_listeners()); println!("Transport Open Close [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); diff --git a/io/zenoh-transport/tests/unicast_priorities.rs b/io/zenoh-transport/tests/unicast_priorities.rs index dd023b9749..fa7f68a8a9 100644 --- a/io/zenoh-transport/tests/unicast_priorities.rs +++ b/io/zenoh-transport/tests/unicast_priorities.rs @@ -227,10 +227,7 @@ async fn open_transport_unicast( let _ = ztimeout!(client_manager.open_transport_unicast(e.clone())).unwrap(); } - let client_transport = client_manager - .get_transport_unicast(&router_id) - .await - .unwrap(); + let client_transport = ztimeout!(client_manager.get_transport_unicast(&router_id)).unwrap(); // Return the handlers ( diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index 44ea43179b..637f9f8a86 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -13,7 +13,6 @@ // #[cfg(feature = "shared-memory")] mod tests { - use rand::{Rng, SeedableRng}; use std::{ any::Any, convert::TryFrom, @@ -25,7 +24,6 @@ mod tests { }; use zenoh_buffers::buffer::SplitBuffer; use zenoh_core::ztimeout; - use zenoh_crypto::PseudoRng; use zenoh_link::Link; use zenoh_protocol::{ core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, @@ -35,8 +33,19 @@ mod tests { }, zenoh::{PushBody, Put}, }; - use zenoh_result::{zerror, ZResult}; - use zenoh_shm::{SharedMemoryBuf, SharedMemoryManager}; + use zenoh_result::ZResult; + use zenoh_shm::{ + api::{ + protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, + }, + provider::shared_memory_provider::{ + BlockOn, GarbageCollect, SharedMemoryProviderBuilder, + }, + }, + SharedMemoryBuf, + }; use zenoh_transport::{ multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, @@ -44,7 +53,6 @@ mod tests { const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); - const USLEEP: Duration = Duration::from_micros(100); const MSG_COUNT: usize = 1_000; const MSG_SIZE: usize = 1_024; @@ -152,22 +160,16 @@ mod tests { let peer_shm02 = ZenohId::try_from([2]).unwrap(); let peer_net01 = ZenohId::try_from([3]).unwrap(); - let mut tries = 100; - let mut prng = PseudoRng::from_entropy(); - let mut shm01 = loop { - // Create the SharedMemoryManager - if let Ok(shm01) = SharedMemoryManager::make( - format!("peer_shm01_{}_{}", endpoint.protocol(), prng.gen::()), - 2 * MSG_SIZE, - ) { - break Ok(shm01); - } - tries -= 1; - if tries == 0 { - break Err(zerror!("Unable to create SharedMemoryManager!")); - } - } - .unwrap(); + // create SHM provider + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(2 * MSG_SIZE) + .unwrap() + .res() + .unwrap(); + let shm01 = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); // Create a peer manager with shared-memory authenticator enabled let peer_shm01_handler = Arc::new(SHPeer::new(true)); @@ -229,35 +231,28 @@ mod tests { // Retrieve the transports println!("Transport SHM [2a]"); - let peer_shm02_transport = peer_shm01_manager - .get_transport_unicast(&peer_shm02) - .await - .unwrap(); + let peer_shm02_transport = + ztimeout!(peer_shm01_manager.get_transport_unicast(&peer_shm02)).unwrap(); assert!(peer_shm02_transport.is_shm().unwrap()); println!("Transport SHM [2b]"); - let peer_net01_transport = peer_shm01_manager - .get_transport_unicast(&peer_net01) - .await - .unwrap(); + let peer_net01_transport = + ztimeout!(peer_shm01_manager.get_transport_unicast(&peer_net01)).unwrap(); assert!(!peer_net01_transport.is_shm().unwrap()); + let layout = shm01.alloc_layout().size(MSG_SIZE).res().unwrap(); + // Send the message println!("Transport SHM [3a]"); // The msg count for (msg_count, _) in (0..MSG_COUNT).enumerate() { // Create the message to send - let mut sbuf = ztimeout!(async { - loop { - match shm01.alloc(MSG_SIZE) { - Ok(sbuf) => break sbuf, - Err(_) => tokio::time::sleep(USLEEP).await, - } - } - }); - - let bs = unsafe { sbuf.as_mut_slice() }; - bs[0..8].copy_from_slice(&msg_count.to_le_bytes()); + let mut sbuf = ztimeout!(layout + .alloc() + .with_policy::>() + .res_async()) + .unwrap(); + sbuf[0..8].copy_from_slice(&msg_count.to_le_bytes()); let message: NetworkMessage = Push { wire_expr: "test".into(), @@ -296,16 +291,12 @@ mod tests { // The msg count for (msg_count, _) in (0..MSG_COUNT).enumerate() { // Create the message to send - let mut sbuf = ztimeout!(async { - loop { - match shm01.alloc(MSG_SIZE) { - Ok(sbuf) => break sbuf, - Err(_) => tokio::time::sleep(USLEEP).await, - } - } - }); - let bs = unsafe { sbuf.as_mut_slice() }; - bs[0..8].copy_from_slice(&msg_count.to_le_bytes()); + let mut sbuf = ztimeout!(layout + .alloc() + .with_policy::>() + .res_async()) + .unwrap(); + sbuf[0..8].copy_from_slice(&msg_count.to_le_bytes()); let message: NetworkMessage = Push { wire_expr: "test".into(), diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index d7856fde94..92267458f0 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -160,7 +160,7 @@ mod tests { println!("[Simultaneous 01a] => Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer01_manager.get_listeners().await; + let locs = ztimeout!(peer01_manager.get_listeners()); println!("[Simultaneous 01b] => Getting endpoints: {endpoint01:?} {locs:?}"); assert_eq!(endpoint01.len(), locs.len()); @@ -170,7 +170,7 @@ mod tests { println!("[Simultaneous 02a] => Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer02_manager.get_listeners().await; + let locs = ztimeout!(peer02_manager.get_listeners()); println!("[Simultaneous 02b] => Getting endpoints: {endpoint02:?} {locs:?}"); assert_eq!(endpoint02.len(), locs.len()); diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index a23fa48e96..a4a1e90edb 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -386,10 +386,7 @@ async fn open_transport_unicast( let _ = ztimeout!(client_manager.open_transport_unicast(e.clone())).unwrap(); } - let client_transport = client_manager - .get_transport_unicast(&router_id) - .await - .unwrap(); + let client_transport = ztimeout!(client_manager.get_transport_unicast(&router_id)).unwrap(); // Return the handlers ( @@ -529,9 +526,7 @@ async fn run_single( { let c_stats = client_transport.get_stats().unwrap().report(); println!("\tClient: {:?}", c_stats); - let r_stats = router_manager - .get_transport_unicast(&client_manager.config.zid) - .await + let r_stats = ztimeout!(router_manager.get_transport_unicast(&client_manager.config.zid)) .unwrap() .get_stats() .map(|s| s.report()) diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index c42b3d1b69..ec25e26ab9 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -29,6 +29,9 @@ maintenance = { status = "actively-developed" } [features] unstable = [] default = [] +shared-memory = [ + "zenoh/shared-memory", +] [dependencies] tokio = { workspace = true, features = [ diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index b954ed639c..440065331a 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -35,6 +35,7 @@ shared-memory = [ "zenoh-shm", "zenoh-protocol/shared-memory", "zenoh-transport/shared-memory", + "zenoh-buffers/shared-memory", ] stats = ["zenoh-transport/stats", "zenoh-protocol/stats"] transport_multilink = ["zenoh-transport/transport_multilink"] diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index 6f8ba23a65..036271b765 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -15,8 +15,14 @@ //! ZBytes primitives. use crate::buffers::ZBuf; use std::{ - borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, str::Utf8Error, - string::FromUtf8Error, sync::Arc, + borrow::Cow, + convert::Infallible, + fmt::Debug, + marker::PhantomData, + ops::{Add, AddAssign, Deref}, + str::Utf8Error, + string::FromUtf8Error, + sync::Arc, }; use unwrap_infallible::UnwrapInfallible; use zenoh_buffers::{ @@ -28,8 +34,70 @@ use zenoh_buffers::{ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; use zenoh_result::{ZError, ZResult}; -#[cfg(feature = "shared-memory")] -use zenoh_shm::SharedMemoryBuf; +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +use zenoh_shm::{ + api::slice::{ + zsliceshm::{zsliceshm, ZSliceShm}, + zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, + }, + SharedMemoryBuf, +}; + +pub enum Cipher { + Aes512(ZSlice), +} + +pub enum Compression { + LZ4, +} + +#[derive(Default)] +pub struct Transformation { + cipher: Option, + compression: Option, +} + +impl From for Transformation { + fn from(value: Cipher) -> Self { + Transformation { + cipher: Some(value), + ..Default::default() + } + } +} + +impl From for Transformation { + fn from(value: Compression) -> Self { + Transformation { + compression: Some(value), + ..Default::default() + } + } +} + +impl Add for Transformation { + type Output = Transformation; + + fn add(mut self, rhs: Self) -> Self::Output { + self += rhs; + self + } +} + +impl AddAssign for Transformation { + fn add_assign(&mut self, rhs: Transformation) { + fn combine(mut lhs: Option, mut rhs: Option) -> Option { + match (lhs.take(), rhs.take()) { + (Some(_), Some(r)) => Some(r), + (None, r) => r, + (l, None) => l, + } + } + + self.cipher = combine(self.cipher.take(), rhs.cipher); + self.compression = combine(self.compression.take(), rhs.compression); + } +} /// Trait to encode a type `T` into a [`Value`]. pub trait Serialize { @@ -40,10 +108,11 @@ pub trait Serialize { } pub trait Deserialize<'a, T> { + type Input: 'a; type Error; /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. - fn deserialize(self, t: &'a ZBytes) -> Result; + fn deserialize(self, t: Self::Input) -> Result; } /// ZBytes contains the serialized bytes of user data. @@ -128,7 +197,18 @@ impl ZBytes { /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. pub fn deserialize<'a, T>(&'a self) -> ZResult where - ZSerde: Deserialize<'a, T>, + ZSerde: Deserialize<'a, T, Input = &'a ZBytes>, + >::Error: Debug, + { + ZSerde + .deserialize(self) + .map_err(|e| zerror!("{:?}", e).into()) + } + + /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. + pub fn deserialize_mut<'a, T>(&'a mut self) -> ZResult + where + ZSerde: Deserialize<'a, T, Input = &'a mut ZBytes>, >::Error: Debug, { ZSerde @@ -139,7 +219,16 @@ impl ZBytes { /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. pub fn into<'a, T>(&'a self) -> T where - ZSerde: Deserialize<'a, T, Error = Infallible>, + ZSerde: Deserialize<'a, T, Input = &'a ZBytes, Error = Infallible>, + >::Error: Debug, + { + ZSerde.deserialize(self).unwrap_infallible() + } + + /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. + pub fn into_mut<'a, T>(&'a mut self) -> T + where + ZSerde: Deserialize<'a, T, Input = &'a mut ZBytes, Error = Infallible>, >::Error: Debug, { ZSerde.deserialize(self).unwrap_infallible() @@ -192,7 +281,7 @@ where impl Iterator for ZBytesIterator<'_, T> where - for<'a> ZSerde: Deserialize<'a, T>, + for<'a> ZSerde: Deserialize<'a, T, Input = &'a ZBytes>, for<'a> >::Error: Debug, { type Item = T; @@ -311,10 +400,25 @@ impl From<&ZBuf> for ZBytes { } } -impl Deserialize<'_, ZBuf> for ZSerde { +impl Serialize<&mut ZBuf> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut ZBuf) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&mut ZBuf> for ZBytes { + fn from(t: &mut ZBuf) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, ZBuf> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { Ok(v.0.clone()) } } @@ -331,6 +435,12 @@ impl From<&ZBytes> for ZBuf { } } +impl From<&mut ZBytes> for ZBuf { + fn from(value: &mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // ZSlice impl Serialize for ZSerde { type Output = ZBytes; @@ -360,10 +470,25 @@ impl From<&ZSlice> for ZBytes { } } -impl Deserialize<'_, ZSlice> for ZSerde { +impl Serialize<&mut ZSlice> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut ZSlice) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&mut ZSlice> for ZBytes { + fn from(t: &mut ZSlice) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, ZSlice> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { Ok(v.0.to_zslice()) } } @@ -380,6 +505,12 @@ impl From<&ZBytes> for ZSlice { } } +impl From<&mut ZBytes> for ZSlice { + fn from(value: &mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // [u8; N] impl Serialize<[u8; N]> for ZSerde { type Output = ZBytes; @@ -409,10 +540,25 @@ impl From<&[u8; N]> for ZBytes { } } -impl Deserialize<'_, [u8; N]> for ZSerde { +impl Serialize<&mut [u8; N]> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut [u8; N]) -> Self::Output { + ZBytes::new(*t) + } +} + +impl From<&mut [u8; N]> for ZBytes { + fn from(t: &mut [u8; N]) -> Self { + ZSerde.serialize(*t) + } +} + +impl<'a, const N: usize> Deserialize<'a, [u8; N]> for ZSerde { + type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result<[u8; N], Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<[u8; N], Self::Error> { use std::io::Read; if v.0.len() != N { @@ -441,6 +587,14 @@ impl TryFrom<&ZBytes> for [u8; N] { } } +impl TryFrom<&mut ZBytes> for [u8; N] { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Vec impl Serialize> for ZSerde { type Output = ZBytes; @@ -470,10 +624,25 @@ impl From<&Vec> for ZBytes { } } -impl Deserialize<'_, Vec> for ZSerde { +impl Serialize<&mut Vec> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut Vec) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&mut Vec> for ZBytes { + fn from(t: &mut Vec) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, Vec> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { Ok(v.0.contiguous().to_vec()) } } @@ -490,6 +659,12 @@ impl From<&ZBytes> for Vec { } } +impl From<&mut ZBytes> for Vec { + fn from(value: &mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // &[u8] impl Serialize<&[u8]> for ZSerde { type Output = ZBytes; @@ -505,6 +680,20 @@ impl From<&[u8]> for ZBytes { } } +impl Serialize<&mut [u8]> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut [u8]) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl From<&mut [u8]> for ZBytes { + fn from(t: &mut [u8]) -> Self { + ZSerde.serialize(t) + } +} + // Cow<[u8]> impl<'a> Serialize> for ZSerde { type Output = ZBytes; @@ -534,10 +723,25 @@ impl From<&Cow<'_, [u8]>> for ZBytes { } } +impl<'a> Serialize<&mut Cow<'a, [u8]>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut Cow<'a, [u8]>) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl From<&mut Cow<'_, [u8]>> for ZBytes { + fn from(t: &mut Cow<'_, [u8]>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &'a ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { Ok(v.0.contiguous()) } } @@ -557,6 +761,12 @@ impl<'a> From<&'a ZBytes> for Cow<'a, [u8]> { } } +impl<'a> From<&'a mut ZBytes> for Cow<'a, [u8]> { + fn from(value: &'a mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // String impl Serialize for ZSerde { type Output = ZBytes; @@ -586,10 +796,25 @@ impl From<&String> for ZBytes { } } -impl Deserialize<'_, String> for ZSerde { +impl Serialize<&mut String> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut String) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut String> for ZBytes { + fn from(t: &mut String) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, String> for ZSerde { + type Input = &'a ZBytes; type Error = FromUtf8Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { let v: Vec = ZSerde.deserialize(v).unwrap_infallible(); String::from_utf8(v) } @@ -611,12 +836,20 @@ impl TryFrom<&ZBytes> for String { } } +impl TryFrom<&mut ZBytes> for String { + type Error = FromUtf8Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // &str impl Serialize<&str> for ZSerde { type Output = ZBytes; fn serialize(self, s: &str) -> Self::Output { - Self.serialize(s.to_string()) + ZSerde.serialize(s.to_string()) } } @@ -626,6 +859,20 @@ impl From<&str> for ZBytes { } } +impl Serialize<&mut str> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut str) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut str> for ZBytes { + fn from(t: &mut str) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Serialize> for ZSerde { type Output = ZBytes; @@ -644,7 +891,7 @@ impl<'a> Serialize<&Cow<'a, str>> for ZSerde { type Output = ZBytes; fn serialize(self, s: &Cow<'a, str>) -> Self::Output { - Self.serialize(s.to_string()) + ZSerde.serialize(s.to_string()) } } @@ -654,10 +901,25 @@ impl From<&Cow<'_, str>> for ZBytes { } } +impl<'a> Serialize<&mut Cow<'a, str>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut Cow<'a, str>) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut Cow<'_, str>> for ZBytes { + fn from(t: &mut Cow<'_, str>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { + type Input = &'a ZBytes; type Error = Utf8Error; - fn deserialize(self, v: &'a ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { Cow::try_from(v) } } @@ -686,6 +948,18 @@ impl<'a> TryFrom<&'a ZBytes> for Cow<'a, str> { } } +impl<'a> TryFrom<&'a mut ZBytes> for Cow<'a, str> { + type Error = Utf8Error; + + fn try_from(v: &'a mut ZBytes) -> Result { + let v: Cow<'a, [u8]> = Cow::from(v); + let _ = core::str::from_utf8(v.as_ref())?; + // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 + // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. + Ok(unsafe { core::mem::transmute(v) }) + } +} + // - Integers impl macro_rules! impl_int { ($t:ty) => { @@ -725,10 +999,25 @@ macro_rules! impl_int { } } + impl Serialize<&mut $t> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut $t) -> Self::Output { + Self.serialize(*t) + } + } + + impl From<&mut $t> for ZBytes { + fn from(t: &mut $t) -> Self { + ZSerde.serialize(t) + } + } + impl<'a> Deserialize<'a, $t> for ZSerde { + type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result<$t, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<$t, Self::Error> { use std::io::Read; let mut r = v.reader(); @@ -758,6 +1047,14 @@ macro_rules! impl_int { ZSerde.deserialize(value) } } + + impl TryFrom<&mut ZBytes> for $t { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } + } }; } @@ -810,10 +1107,25 @@ impl From<&bool> for ZBytes { } } -impl Deserialize<'_, bool> for ZSerde { +impl Serialize<&mut bool> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut bool) -> Self::Output { + ZSerde.serialize(*t) + } +} + +impl From<&mut bool> for ZBytes { + fn from(t: &mut bool) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, bool> for ZSerde { + type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { let p = v.deserialize::().map_err(|_| ZDeserializeError)?; match p { 0 => Ok(false), @@ -839,6 +1151,14 @@ impl TryFrom<&ZBytes> for bool { } } +impl TryFrom<&mut ZBytes> for bool { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // - Zenoh advanced types encoders/decoders // Properties impl Serialize> for ZSerde { @@ -869,10 +1189,25 @@ impl<'s> From<&'s Properties<'s>> for ZBytes { } } +impl Serialize<&mut Properties<'_>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut Properties<'_>) -> Self::Output { + Self.serialize(t.as_str()) + } +} + +impl<'s> From<&'s mut Properties<'s>> for ZBytes { + fn from(t: &'s mut Properties<'s>) -> Self { + ZSerde.serialize(&*t) + } +} + impl<'s> Deserialize<'s, Properties<'s>> for ZSerde { + type Input = &'s ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &'s ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { let s = v .deserialize::>() .map_err(|_| ZDeserializeError)?; @@ -897,6 +1232,14 @@ impl<'s> TryFrom<&'s ZBytes> for Properties<'s> { } } +impl<'s> TryFrom<&'s mut ZBytes> for Properties<'s> { + type Error = ZDeserializeError; + + fn try_from(value: &'s mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // JSON impl Serialize for ZSerde { type Output = Result; @@ -932,10 +1275,29 @@ impl TryFrom<&serde_json::Value> for ZBytes { } } -impl Deserialize<'_, serde_json::Value> for ZSerde { +impl Serialize<&mut serde_json::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_json::Value) -> Self::Output { + let mut bytes = ZBytes::empty(); + serde_json::to_writer(bytes.writer(), t)?; + Ok(bytes) + } +} + +impl TryFrom<&mut serde_json::Value> for ZBytes { type Error = serde_json::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn try_from(value: &mut serde_json::Value) -> Result { + ZSerde.serialize(&*value) + } +} + +impl<'a> Deserialize<'a, serde_json::Value> for ZSerde { + type Input = &'a ZBytes; + type Error = serde_json::Error; + + fn deserialize(self, v: Self::Input) -> Result { serde_json::from_reader(v.reader()) } } @@ -956,6 +1318,14 @@ impl TryFrom<&ZBytes> for serde_json::Value { } } +impl TryFrom<&mut ZBytes> for serde_json::Value { + type Error = serde_json::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Yaml impl Serialize for ZSerde { type Output = Result; @@ -991,10 +1361,29 @@ impl TryFrom<&serde_yaml::Value> for ZBytes { } } -impl Deserialize<'_, serde_yaml::Value> for ZSerde { +impl Serialize<&mut serde_yaml::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_yaml::Value) -> Self::Output { + let mut bytes = ZBytes::empty(); + serde_yaml::to_writer(bytes.writer(), t)?; + Ok(bytes) + } +} + +impl TryFrom<&mut serde_yaml::Value> for ZBytes { + type Error = serde_yaml::Error; + + fn try_from(value: &mut serde_yaml::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl<'a> Deserialize<'a, serde_yaml::Value> for ZSerde { + type Input = &'a ZBytes; type Error = serde_yaml::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { serde_yaml::from_reader(v.reader()) } } @@ -1015,6 +1404,14 @@ impl TryFrom<&ZBytes> for serde_yaml::Value { } } +impl TryFrom<&mut ZBytes> for serde_yaml::Value { + type Error = serde_yaml::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // CBOR impl Serialize for ZSerde { type Output = Result; @@ -1050,10 +1447,27 @@ impl TryFrom<&serde_cbor::Value> for ZBytes { } } -impl Deserialize<'_, serde_cbor::Value> for ZSerde { +impl Serialize<&mut serde_cbor::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_cbor::Value) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl TryFrom<&mut serde_cbor::Value> for ZBytes { + type Error = serde_cbor::Error; + + fn try_from(value: &mut serde_cbor::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl<'a> Deserialize<'a, serde_cbor::Value> for ZSerde { + type Input = &'a ZBytes; type Error = serde_cbor::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { serde_cbor::from_reader(v.reader()) } } @@ -1074,6 +1488,14 @@ impl TryFrom<&ZBytes> for serde_cbor::Value { } } +impl TryFrom<&mut ZBytes> for serde_cbor::Value { + type Error = serde_cbor::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Pickle impl Serialize for ZSerde { type Output = Result; @@ -1113,10 +1535,27 @@ impl TryFrom<&serde_pickle::Value> for ZBytes { } } -impl Deserialize<'_, serde_pickle::Value> for ZSerde { +impl Serialize<&mut serde_pickle::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_pickle::Value) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl TryFrom<&mut serde_pickle::Value> for ZBytes { type Error = serde_pickle::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn try_from(value: &mut serde_pickle::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl<'a> Deserialize<'a, serde_pickle::Value> for ZSerde { + type Input = &'a ZBytes; + type Error = serde_pickle::Error; + + fn deserialize(self, v: Self::Input) -> Result { serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) } } @@ -1137,77 +1576,125 @@ impl TryFrom<&ZBytes> for serde_pickle::Value { } } +impl TryFrom<&mut ZBytes> for serde_pickle::Value { + type Error = serde_pickle::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Shared memory conversion -#[cfg(feature = "shared-memory")] -impl Serialize> for ZSerde { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: Arc) -> Self::Output { - ZBytes::new(t) + fn serialize(self, t: ZSliceShm) -> Self::Output { + let slice: ZSlice = t.into(); + ZBytes::new(slice) } } -#[cfg(feature = "shared-memory")] -impl From> for ZBytes { - fn from(t: Arc) -> Self { + +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl From for ZBytes { + fn from(t: ZSliceShm) -> Self { ZSerde.serialize(t) } } -#[cfg(feature = "shared-memory")] -impl Serialize> for ZSerde { +// Shared memory conversion +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: Box) -> Self::Output { - let smb: Arc = t.into(); - Self.serialize(smb) + fn serialize(self, t: ZSliceShmMut) -> Self::Output { + let slice: ZSlice = t.into(); + ZBytes::new(slice) } } -#[cfg(feature = "shared-memory")] -impl From> for ZBytes { - fn from(t: Box) -> Self { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl From for ZBytes { + fn from(t: ZSliceShmMut) -> Self { ZSerde.serialize(t) } } -#[cfg(feature = "shared-memory")] -impl Serialize for ZSerde { - type Output = ZBytes; +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> Deserialize<'a, &'a zsliceshm> for ZSerde { + type Input = &'a ZBytes; + type Error = ZDeserializeError; - fn serialize(self, t: SharedMemoryBuf) -> Self::Output { - ZBytes::new(t) + fn deserialize(self, v: Self::Input) -> Result<&'a zsliceshm, Self::Error> { + // A ZSliceShm is expected to have only one slice + let mut zslices = v.0.zslices(); + if let Some(zs) = zslices.next() { + if let Some(shmb) = zs.downcast_ref::() { + return Ok(shmb.into()); + } + } + Err(ZDeserializeError) } } -#[cfg(feature = "shared-memory")] -impl From for ZBytes { - fn from(t: SharedMemoryBuf) -> Self { - ZSerde.serialize(t) +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> TryFrom<&'a ZBytes> for &'a zsliceshm { + type Error = ZDeserializeError; + + fn try_from(value: &'a ZBytes) -> Result { + ZSerde.deserialize(value) } } -#[cfg(feature = "shared-memory")] -impl Deserialize<'_, SharedMemoryBuf> for ZSerde { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshm { type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result { - // A SharedMemoryBuf is expected to have only one slice - let mut zslices = v.0.zslices(); + fn try_from(value: &'a mut ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> Deserialize<'a, &'a mut zsliceshm> for ZSerde { + type Input = &'a mut ZBytes; + type Error = ZDeserializeError; + + fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshm, Self::Error> { + // A ZSliceShmBorrowMut is expected to have only one slice + let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_ref::() { - return Ok(shmb.clone()); + if let Some(shmb) = zs.downcast_mut::() { + return Ok(shmb.into()); } } Err(ZDeserializeError) } } -#[cfg(feature = "shared-memory")] -impl TryFrom for SharedMemoryBuf { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> Deserialize<'a, &'a mut zsliceshmmut> for ZSerde { + type Input = &'a mut ZBytes; type Error = ZDeserializeError; - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) + fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshmmut, Self::Error> { + // A ZSliceShmBorrowMut is expected to have only one slice + let mut zslices = v.0.zslices_mut(); + if let Some(zs) = zslices.next() { + if let Some(shmb) = zs.downcast_mut::() { + return shmb.try_into().map_err(|_| ZDeserializeError); + } + } + Err(ZDeserializeError) + } +} + +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshmmut { + type Error = ZDeserializeError; + + fn try_from(value: &'a mut ZBytes) -> Result { + ZSerde.deserialize(value) } } @@ -1267,16 +1754,17 @@ where } } -impl Deserialize<'_, (A, B)> for ZSerde +impl<'s, A, B> Deserialize<'s, (A, B)> for ZSerde where for<'a> A: TryFrom<&'a ZBytes>, for<'a> >::Error: Debug, for<'b> B: TryFrom<&'b ZBytes>, for<'b> >::Error: Debug, { + type Input = &'s ZBytes; type Error = ZError; - fn deserialize(self, bytes: &ZBytes) -> Result<(A, B), Self::Error> { + fn deserialize(self, bytes: Self::Input) -> Result<(A, B), Self::Error> { let codec = Zenoh080::new(); let mut reader = bytes.0.reader(); @@ -1320,6 +1808,20 @@ where } } +impl TryFrom<&mut ZBytes> for (A, B) +where + for<'a> A: TryFrom<&'a ZBytes>, + for<'a> >::Error: Debug, + for<'b> B: TryFrom<&'b ZBytes>, + for<'b> >::Error: Debug, +{ + type Error = ZError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // For convenience to always convert a Value in the examples #[derive(Debug, Clone, PartialEq, Eq)] pub enum StringOrBase64 { @@ -1361,6 +1863,13 @@ impl From<&ZBytes> for StringOrBase64 { } } +impl From<&mut ZBytes> for StringOrBase64 { + fn from(v: &mut ZBytes) -> Self { + StringOrBase64::from(&*v) + } +} + +// Protocol attachment extension impl From for AttachmentType { fn from(this: ZBytes) -> Self { AttachmentType { @@ -1384,6 +1893,16 @@ mod tests { use zenoh_buffers::{ZBuf, ZSlice}; use zenoh_protocol::core::Properties; + #[cfg(all(feature = "shared-memory", feature = "unstable"))] + use zenoh_shm::api::{ + protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, + }, + provider::shared_memory_provider::SharedMemoryProviderBuilder, + slice::zsliceshm::{zsliceshm, ZSliceShm}, + }; + const NUM: usize = 1_000; macro_rules! serialize_deserialize { @@ -1399,81 +1918,118 @@ mod tests { }; } - let mut rng = rand::thread_rng(); - - // unsigned integer - serialize_deserialize!(u8, u8::MIN); - serialize_deserialize!(u16, u16::MIN); - serialize_deserialize!(u32, u32::MIN); - serialize_deserialize!(u64, u64::MIN); - serialize_deserialize!(usize, usize::MIN); - - serialize_deserialize!(u8, u8::MAX); - serialize_deserialize!(u16, u16::MAX); - serialize_deserialize!(u32, u32::MAX); - serialize_deserialize!(u64, u64::MAX); - serialize_deserialize!(usize, usize::MAX); - - for _ in 0..NUM { - serialize_deserialize!(u8, rng.gen::()); - serialize_deserialize!(u16, rng.gen::()); - serialize_deserialize!(u32, rng.gen::()); - serialize_deserialize!(u64, rng.gen::()); - serialize_deserialize!(usize, rng.gen::()); - } + // WARN: test function body produces stack overflow, so I split it into subroutines + #[inline(never)] + fn numeric() { + let mut rng = rand::thread_rng(); + + // unsigned integer + serialize_deserialize!(u8, u8::MIN); + serialize_deserialize!(u16, u16::MIN); + serialize_deserialize!(u32, u32::MIN); + serialize_deserialize!(u64, u64::MIN); + serialize_deserialize!(usize, usize::MIN); + + serialize_deserialize!(u8, u8::MAX); + serialize_deserialize!(u16, u16::MAX); + serialize_deserialize!(u32, u32::MAX); + serialize_deserialize!(u64, u64::MAX); + serialize_deserialize!(usize, usize::MAX); + + for _ in 0..NUM { + serialize_deserialize!(u8, rng.gen::()); + serialize_deserialize!(u16, rng.gen::()); + serialize_deserialize!(u32, rng.gen::()); + serialize_deserialize!(u64, rng.gen::()); + serialize_deserialize!(usize, rng.gen::()); + } - // signed integer - serialize_deserialize!(i8, i8::MIN); - serialize_deserialize!(i16, i16::MIN); - serialize_deserialize!(i32, i32::MIN); - serialize_deserialize!(i64, i64::MIN); - serialize_deserialize!(isize, isize::MIN); - - serialize_deserialize!(i8, i8::MAX); - serialize_deserialize!(i16, i16::MAX); - serialize_deserialize!(i32, i32::MAX); - serialize_deserialize!(i64, i64::MAX); - serialize_deserialize!(isize, isize::MAX); - - for _ in 0..NUM { - serialize_deserialize!(i8, rng.gen::()); - serialize_deserialize!(i16, rng.gen::()); - serialize_deserialize!(i32, rng.gen::()); - serialize_deserialize!(i64, rng.gen::()); - serialize_deserialize!(isize, rng.gen::()); - } + // signed integer + serialize_deserialize!(i8, i8::MIN); + serialize_deserialize!(i16, i16::MIN); + serialize_deserialize!(i32, i32::MIN); + serialize_deserialize!(i64, i64::MIN); + serialize_deserialize!(isize, isize::MIN); + + serialize_deserialize!(i8, i8::MAX); + serialize_deserialize!(i16, i16::MAX); + serialize_deserialize!(i32, i32::MAX); + serialize_deserialize!(i64, i64::MAX); + serialize_deserialize!(isize, isize::MAX); + + for _ in 0..NUM { + serialize_deserialize!(i8, rng.gen::()); + serialize_deserialize!(i16, rng.gen::()); + serialize_deserialize!(i32, rng.gen::()); + serialize_deserialize!(i64, rng.gen::()); + serialize_deserialize!(isize, rng.gen::()); + } - // float - serialize_deserialize!(f32, f32::MIN); - serialize_deserialize!(f64, f64::MIN); + // float + serialize_deserialize!(f32, f32::MIN); + serialize_deserialize!(f64, f64::MIN); - serialize_deserialize!(f32, f32::MAX); - serialize_deserialize!(f64, f64::MAX); + serialize_deserialize!(f32, f32::MAX); + serialize_deserialize!(f64, f64::MAX); - for _ in 0..NUM { - serialize_deserialize!(f32, rng.gen::()); - serialize_deserialize!(f64, rng.gen::()); + for _ in 0..NUM { + serialize_deserialize!(f32, rng.gen::()); + serialize_deserialize!(f64, rng.gen::()); + } + } + numeric(); + + // WARN: test function body produces stack overflow, so I split it into subroutines + #[inline(never)] + fn basic() { + // String + serialize_deserialize!(String, ""); + serialize_deserialize!(String, String::from("abcdef")); + + // Cow + serialize_deserialize!(Cow, Cow::from("")); + serialize_deserialize!(Cow, Cow::from(String::from("abcdef"))); + + // Vec + serialize_deserialize!(Vec, vec![0u8; 0]); + serialize_deserialize!(Vec, vec![0u8; 64]); + + // Cow<[u8]> + serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 0])); + serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 64])); + + // ZBuf + serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); + serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); + } + basic(); + + // SHM + #[cfg(all(feature = "shared-memory", feature = "unstable"))] + { + // create an SHM backend... + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(4096) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Prepare a layout for allocations + let layout = provider.alloc_layout().size(1024).res().unwrap(); + + // allocate an SHM buffer + let mutable_shm_buf = layout.alloc().res().unwrap(); + + // convert to immutable SHM buffer + let immutable_shm_buf: ZSliceShm = mutable_shm_buf.into(); + + serialize_deserialize!(&zsliceshm, immutable_shm_buf); } - - // String - serialize_deserialize!(String, ""); - serialize_deserialize!(String, String::from("abcdef")); - - // Cow - serialize_deserialize!(Cow, Cow::from("")); - serialize_deserialize!(Cow, Cow::from(String::from("abcdef"))); - - // Vec - serialize_deserialize!(Vec, vec![0u8; 0]); - serialize_deserialize!(Vec, vec![0u8; 64]); - - // Cow<[u8]> - serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 0])); - serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 64])); - - // ZBuf - serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); - serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); // Properties serialize_deserialize!(Properties, Properties::from("")); diff --git a/zenoh/src/encoding.rs b/zenoh/src/encoding.rs index 81dfb04752..2b21765d38 100644 --- a/zenoh/src/encoding.rs +++ b/zenoh/src/encoding.rs @@ -17,7 +17,7 @@ use std::{borrow::Cow, convert::Infallible, fmt, str::FromStr}; use zenoh_buffers::{ZBuf, ZSlice}; use zenoh_protocol::core::EncodingId; #[cfg(feature = "shared-memory")] -use ::{std::sync::Arc, zenoh_shm::SharedMemoryBuf}; +use zenoh_shm::api::slice::{zsliceshm::ZSliceShm, zsliceshmmut::ZSliceShmMut}; /// Default encoding values used by Zenoh. /// @@ -835,16 +835,10 @@ impl EncodingMapping for serde_pickle::Value { // - Zenoh SHM #[cfg(feature = "shared-memory")] -impl EncodingMapping for Arc { +impl EncodingMapping for ZSliceShm { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } - -#[cfg(feature = "shared-memory")] -impl EncodingMapping for Box { - const ENCODING: Encoding = Encoding::ZENOH_BYTES; -} - #[cfg(feature = "shared-memory")] -impl EncodingMapping for SharedMemoryBuf { +impl EncodingMapping for ZSliceShmMut { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 7e25375d64..ac7d8b3059 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -88,10 +88,16 @@ use net::runtime::Runtime; use prelude::*; use scouting::ScoutBuilder; use std::future::Ready; +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +use std::sync::Arc; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; pub use zenoh_macros::{ke, kedefine, keformat, kewrite}; use zenoh_protocol::core::WhatAmIMatcher; use zenoh_result::{zerror, ZResult}; +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +pub use zenoh_shm::api as shm; +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +pub use zenoh_shm::api::client_storage::SharedMemoryClientStorage; use zenoh_util::concat_enabled_features; /// A zenoh error. @@ -148,8 +154,6 @@ pub mod queryable; pub mod sample; pub mod subscriber; pub mod value; -#[cfg(feature = "shared-memory")] -pub use zenoh_shm as shm; /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate /// reading and writing data. @@ -252,7 +256,11 @@ where TryIntoConfig: std::convert::TryInto + Send + 'static, >::Error: std::fmt::Debug, { - OpenBuilder { config } + OpenBuilder { + config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + shm_clients: None, + } } /// A builder returned by [`open`] used to open a zenoh [`Session`]. @@ -273,6 +281,20 @@ where >::Error: std::fmt::Debug, { config: TryIntoConfig, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + shm_clients: Option>, +} + +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +impl OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + pub fn with_shm_clients(mut self, shm_clients: Arc) -> Self { + self.shm_clients = Some(shm_clients); + self + } } impl Resolvable for OpenBuilder @@ -293,7 +315,12 @@ where .config .try_into() .map_err(|e| zerror!("Invalid Zenoh configuration {:?}", &e))?; - Session::new(config).res_sync() + Session::new( + config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + self.shm_clients, + ) + .res_sync() } } diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 68e121847d..ee58bc5b5d 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -40,6 +40,10 @@ use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_protocol::core::{Locator, WhatAmI, ZenohId}; use zenoh_protocol::network::NetworkMessage; use zenoh_result::{bail, ZResult}; +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +use zenoh_shm::api::client_storage::SharedMemoryClientStorage; +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +use zenoh_shm::reader::SharedMemoryReader; use zenoh_sync::get_mut_unchecked; use zenoh_task::TaskController; use zenoh_transport::{ @@ -47,6 +51,33 @@ use zenoh_transport::{ TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; +#[derive(Default)] +pub struct RuntimeBuilder { + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + shm_clients: Option>, +} + +impl RuntimeBuilder { + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + pub fn shm_clients(mut self, shm_clients: Arc) -> Self { + self.shm_clients = Some(shm_clients); + self + } + + pub async fn build(self, config: Config) -> ZResult { + let mut runtime = Runtime::init( + config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + self.shm_clients, + ) + .await?; + match runtime.start().await { + Ok(()) => Ok(runtime), + Err(err) => Err(err), + } + } +} + struct RuntimeState { zid: ZenohId, whatami: WhatAmI, @@ -89,14 +120,19 @@ impl PluginStartArgs for Runtime {} impl Runtime { pub async fn new(config: Config) -> ZResult { - let mut runtime = Runtime::init(config).await?; - match runtime.start().await { - Ok(()) => Ok(runtime), - Err(err) => Err(err), - } + Self::builder().build(config).await + } + + pub fn builder() -> RuntimeBuilder { + RuntimeBuilder::default() } - pub(crate) async fn init(config: Config) -> ZResult { + pub(crate) async fn init( + config: Config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] shm_clients: Option< + Arc, + >, + ) -> ZResult { tracing::debug!("Zenoh Rust API {}", GIT_VERSION); let zid = *config.id(); @@ -118,8 +154,18 @@ impl Runtime { .from_config(&config) .await? .whatami(whatami) - .zid(zid) - .build(handler.clone())?; + .zid(zid); + + #[cfg(feature = "unstable")] + let transport_manager = zcondfeat!( + "shared-memory", + transport_manager.shm_reader(shm_clients.map(SharedMemoryReader::new)), + transport_manager + ) + .build(handler.clone())?; + + #[cfg(not(feature = "unstable"))] + let transport_manager = transport_manager.build(handler.clone())?; let config = Notifier::new(config); diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index c59cca8b9e..a2ea5a768b 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -135,9 +135,9 @@ impl

ValueBuilderTrait for PublicationBuilder { } } - fn payload(self, payload: IntoPayload) -> Self + fn payload(self, payload: IntoZBytes) -> Self where - IntoPayload: Into, + IntoZBytes: Into, { Self { kind: PublicationBuilderPut { @@ -418,9 +418,9 @@ impl<'a> Publisher<'a> { /// # } /// ``` #[inline] - pub fn put(&self, payload: IntoPayload) -> PublisherPutBuilder<'_> + pub fn put(&self, payload: IntoZBytes) -> PublisherPutBuilder<'_> where - IntoPayload: Into, + IntoZBytes: Into, { PublicationBuilder { publisher: self, diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 563df461b8..d5d4de5d0b 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -142,15 +142,15 @@ impl Query { /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), /// replying on a disjoint key expression will result in an error when resolving the reply. #[inline(always)] - pub fn reply<'b, TryIntoKeyExpr, IntoPayload>( + pub fn reply<'b, TryIntoKeyExpr, IntoZBytes>( &self, key_expr: TryIntoKeyExpr, - payload: IntoPayload, + payload: IntoZBytes, ) -> ReplyPutBuilder<'_, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, - IntoPayload: Into, + IntoZBytes: Into, { ReplyBuilder { query: self, @@ -531,6 +531,8 @@ impl SyncResolve for ReplyErrBuilder<'_> { payload: ResponseBody::Err(zenoh::Err { encoding: self.value.encoding.into(), ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, ext_unknown: vec![], payload: self.value.payload.into(), }), diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index cab5c2333a..4b2f0d751d 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -76,13 +76,13 @@ pub struct SampleBuilder { } impl SampleBuilder { - pub fn put( + pub fn put( key_expr: IntoKeyExpr, - payload: IntoPayload, + payload: IntoZBytes, ) -> SampleBuilder where IntoKeyExpr: Into>, - IntoPayload: Into, + IntoZBytes: Into, { Self { sample: Sample { diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index 6078a5a350..b5dbd727ec 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -59,15 +59,15 @@ pub(crate) struct DataInfo { } pub(crate) trait DataInfoIntoSample { - fn into_sample( + fn into_sample( self, key_expr: IntoKeyExpr, - payload: IntoPayload, + payload: IntoZBytes, #[cfg(feature = "unstable")] attachment: Option, ) -> Sample where IntoKeyExpr: Into>, - IntoPayload: Into; + IntoZBytes: Into; } impl DataInfoIntoSample for DataInfo { @@ -76,15 +76,15 @@ impl DataInfoIntoSample for DataInfo { // The test for it is intentionally not added to avoid inserting extra "if" into hot path. // The correctness of the data should be ensured by the caller. #[inline] - fn into_sample( + fn into_sample( self, key_expr: IntoKeyExpr, - payload: IntoPayload, + payload: IntoZBytes, #[cfg(feature = "unstable")] attachment: Option, ) -> Sample where IntoKeyExpr: Into>, - IntoPayload: Into, + IntoZBytes: Into, { Sample { key_expr: key_expr.into(), @@ -106,15 +106,15 @@ impl DataInfoIntoSample for DataInfo { impl DataInfoIntoSample for Option { #[inline] - fn into_sample( + fn into_sample( self, key_expr: IntoKeyExpr, - payload: IntoPayload, + payload: IntoZBytes, #[cfg(feature = "unstable")] attachment: Option, ) -> Sample where IntoKeyExpr: Into>, - IntoPayload: Into, + IntoZBytes: Into, { if let Some(data_info) = self { data_info.into_sample( diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 4a6a312dcf..465f03bf14 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -80,6 +80,8 @@ use zenoh_protocol::{ }, }; use zenoh_result::ZResult; +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +use zenoh_shm::api::client_storage::SharedMemoryClientStorage; use zenoh_task::TaskController; use zenoh_util::core::AsyncResolve; @@ -705,15 +707,15 @@ impl Session { /// # } /// ``` #[inline] - pub fn put<'a, 'b: 'a, TryIntoKeyExpr, IntoPayload>( + pub fn put<'a, 'b: 'a, TryIntoKeyExpr, IntoZBytes>( &'a self, key_expr: TryIntoKeyExpr, - payload: IntoPayload, + payload: IntoZBytes, ) -> SessionPutBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, - IntoPayload: Into, + IntoZBytes: Into, { PublicationBuilder { publisher: self.declare_publisher(key_expr), @@ -832,12 +834,23 @@ impl Session { } #[allow(clippy::new_ret_no_self)] - pub(super) fn new(config: Config) -> impl Resolve> { + pub(super) fn new( + config: Config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] shm_clients: Option< + Arc, + >, + ) -> impl Resolve> { ResolveFuture::new(async move { tracing::debug!("Config: {:?}", &config); let aggregated_subscribers = config.aggregation().subscribers().clone(); let aggregated_publishers = config.aggregation().publishers().clone(); - match Runtime::init(config).await { + match Runtime::init( + config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + shm_clients, + ) + .await + { Ok(mut runtime) => { let mut session = Self::init( runtime.clone(), diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 26165334eb..3360d95c96 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -15,7 +15,7 @@ //! Value primitives. use crate::{bytes::ZBytes, encoding::Encoding}; -/// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. +/// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the payload's [`ZBytes`] should be interpreted. #[non_exhaustive] #[derive(Clone, Debug, PartialEq, Eq)] pub struct Value { @@ -24,7 +24,7 @@ pub struct Value { } impl Value { - /// Creates a new [`Value`] with specified [`Payload`] and [`Encoding`]. + /// Creates a new [`Value`] with specified [`ZBytes`] and [`Encoding`]. pub fn new(payload: T, encoding: E) -> Self where T: Into, @@ -48,7 +48,7 @@ impl Value { self.payload.is_empty() && self.encoding == Encoding::default() } - /// Gets binary [`Payload`] of this [`Value`]. + /// Gets binary [`ZBytes`] of this [`Value`]. pub fn payload(&self) -> &ZBytes { &self.payload } diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 9c807bd121..9fd00788f4 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -42,16 +42,14 @@ async fn close_session(session: Session) { async fn zenoh_events() { let session = open_session(&["tcp/127.0.0.1:18447"], &[]).await; let zid = session.zid(); - let sub1 = session + let sub1 = ztimeout!(session .declare_subscriber(format!("@/session/{zid}/transport/unicast/*")) - .res() - .await - .unwrap(); - let sub2 = session + .res()) + .unwrap(); + let sub2 = ztimeout!(session .declare_subscriber(format!("@/session/{zid}/transport/unicast/*/link/*")) - .res() - .await - .unwrap(); + .res()) + .unwrap(); let session2 = open_session(&["tcp/127.0.0.1:18448"], &["tcp/127.0.0.1:18447"]).await; let zid2 = session2.zid(); @@ -104,7 +102,7 @@ async fn zenoh_events() { assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); assert!(sample.as_ref().unwrap().kind() == SampleKind::Delete); - sub2.undeclare().res().await.unwrap(); - sub1.undeclare().res().await.unwrap(); + ztimeout!(sub2.undeclare().res()).unwrap(); + ztimeout!(sub1.undeclare().res()).unwrap(); close_session(session).await; } diff --git a/zenoh/tests/payload.rs b/zenoh/tests/payload.rs new file mode 100644 index 0000000000..d9910bedf5 --- /dev/null +++ b/zenoh/tests/payload.rs @@ -0,0 +1,97 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +#[test] +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +fn shm_payload_single_buf() { + use zenoh::shm::slice::zsliceshm::{zsliceshm, ZSliceShm}; + use zenoh::shm::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; + use zenoh::{ + bytes::ZBytes, + shm::{ + protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, + }, + provider::shared_memory_provider::SharedMemoryProviderBuilder, + }, + }; + + // create an SHM backend... + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(4096) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Prepare a layout for allocations + let layout = provider.alloc_layout().size(1024).res().unwrap(); + + // allocate an SHM buffer + let mut owned_shm_buf_mut = layout.alloc().res().unwrap(); + + // get data + let _data: &[u8] = &owned_shm_buf_mut; + let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + + // convert into immutable owned buffer + let owned_shm_buf: ZSliceShm = owned_shm_buf_mut.into(); + + // get data + let _data: &[u8] = &owned_shm_buf; + + // convert again into mutable owned buffer + let mut owned_shm_buf_mut: ZSliceShmMut = owned_shm_buf.try_into().unwrap(); + + // get data + let _data: &[u8] = &owned_shm_buf_mut; + let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + + // build a ZBytes from an SHM buffer + let mut payload: ZBytes = owned_shm_buf_mut.into(); + + { + // deserialize ZBytes as borrowed zsliceshm + let borrowed_shm_buf: &zsliceshm = payload.deserialize().unwrap(); + + // get data + let _data: &[u8] = borrowed_shm_buf; + + // construct owned buffer from borrowed type + let owned = borrowed_shm_buf.to_owned(); + + // get data + let _data: &[u8] = &owned; + } + + { + // deserialize ZBytes as mutably borrowed zsliceshm + let borrowed_shm_buf: &mut zsliceshm = payload.deserialize_mut().unwrap(); + + // get data + let _data: &[u8] = borrowed_shm_buf; + + // convert zsliceshm to zsliceshmmut + let borrowed_shm_buf_mut: &mut zsliceshmmut = borrowed_shm_buf.try_into().unwrap(); + + // get data + let _data: &[u8] = borrowed_shm_buf_mut; + let _data_mut: &mut [u8] = borrowed_shm_buf_mut; + } +} diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index d80e9bd8d9..b6a0e9d226 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -127,14 +127,14 @@ impl Task { // The Queryable task keeps replying to requested messages until all checkpoints are finished. Self::Queryable(ke, payload_size) => { - let queryable = session.declare_queryable(ke).res_async().await?; + let queryable = ztimeout!(session.declare_queryable(ke).res_async())?; let payload = vec![0u8; *payload_size]; loop { tokio::select! { _ = token.cancelled() => break, query = queryable.recv_async() => { - query?.reply(ke.to_owned(), payload.clone()).res_async().await?; + ztimeout!(query?.reply(ke.to_owned(), payload.clone()).res_async())?; }, } } @@ -279,7 +279,7 @@ impl Recipe { // In case of client can't connect to some peers/routers loop { - if let Ok(session) = zenoh::open(config.clone()).res_async().await { + if let Ok(session) = ztimeout!(zenoh::open(config.clone()).res_async()) { break session.into_arc(); } else { tokio::time::sleep(Duration::from_secs(1)).await; @@ -315,11 +315,7 @@ impl Recipe { // node_task_tracker.wait().await; // Close the session once all the task assoicated with the node are done. - Arc::try_unwrap(session) - .unwrap() - .close() - .res_async() - .await?; + ztimeout!(Arc::try_unwrap(session).unwrap().close().res_async())?; println!("Node: {} is closed.", &node.name); Result::Ok(()) diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs new file mode 100644 index 0000000000..2a9685eb36 --- /dev/null +++ b/zenoh/tests/shm.rs @@ -0,0 +1,204 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +mod tests { + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::Arc; + use std::time::Duration; + use zenoh::prelude::r#async::*; + use zenoh::shm::protocol_implementations::posix::posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend; + use zenoh::shm::protocol_implementations::posix::protocol_id::POSIX_PROTOCOL_ID; + use zenoh::shm::provider::shared_memory_provider::{ + BlockOn, GarbageCollect, SharedMemoryProviderBuilder, + }; + use zenoh_core::ztimeout; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + + const MSG_COUNT: usize = 1_00; + const MSG_SIZE: [usize; 2] = [1_024, 100_000]; + + async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { + // Open the sessions + let mut config = config::peer(); + config.listen.endpoints = endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config.transport.shared_memory.set_enabled(true).unwrap(); + println!("[ ][01a] Opening peer01 session: {:?}", endpoints); + let peer01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + + let mut config = config::peer(); + config.connect.endpoints = endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config.transport.shared_memory.set_enabled(true).unwrap(); + println!("[ ][02a] Opening peer02 session: {:?}", endpoints); + let peer02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + + (peer01, peer02) + } + + async fn open_session_multicast(endpoint01: &str, endpoint02: &str) -> (Session, Session) { + // Open the sessions + let mut config = config::peer(); + config.listen.endpoints = vec![endpoint01.parse().unwrap()]; + config.scouting.multicast.set_enabled(Some(true)).unwrap(); + config.transport.shared_memory.set_enabled(true).unwrap(); + println!("[ ][01a] Opening peer01 session: {}", endpoint01); + let peer01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + + let mut config = config::peer(); + config.listen.endpoints = vec![endpoint02.parse().unwrap()]; + config.scouting.multicast.set_enabled(Some(true)).unwrap(); + config.transport.shared_memory.set_enabled(true).unwrap(); + println!("[ ][02a] Opening peer02 session: {}", endpoint02); + let peer02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + + (peer01, peer02) + } + + async fn close_session(peer01: Session, peer02: Session) { + println!("[ ][01d] Closing peer02 session"); + ztimeout!(peer01.close().res_async()).unwrap(); + println!("[ ][02d] Closing peer02 session"); + ztimeout!(peer02.close().res_async()).unwrap(); + } + + async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Reliability) { + let msg_count = match reliability { + Reliability::Reliable => MSG_COUNT, + Reliability::BestEffort => 1, + }; + let msgs = Arc::new(AtomicUsize::new(0)); + + for size in MSG_SIZE { + let key_expr = format!("shm{size}"); + + msgs.store(0, Ordering::SeqCst); + + // Subscribe to data + println!("[PS][01b] Subscribing on peer01 session"); + let c_msgs = msgs.clone(); + let _sub = ztimeout!(peer01 + .declare_subscriber(&key_expr) + .callback(move |sample| { + assert_eq!(sample.payload().len(), size); + c_msgs.fetch_add(1, Ordering::Relaxed); + }) + .res_async()) + .unwrap(); + + // Wait for the declaration to propagate + tokio::time::sleep(SLEEP).await; + + // create SHM backend... + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(size * MSG_COUNT / 10) + .unwrap() + .res() + .unwrap(); + // ...and SHM provider + let shm01 = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // remember segment size that was allocated + let shm_segment_size = shm01.available(); + + // Prepare a layout for allocations + let layout = shm01.alloc_layout().size(size).res().unwrap(); + + // Put data + println!("[PS][03b] Putting on peer02 session. {MSG_COUNT} msgs of {size} bytes."); + for c in 0..msg_count { + // Allocate new message + let sbuf = ztimeout!(layout + .alloc() + .with_policy::>() + .res_async()) + .unwrap(); + println!("{c} created"); + + // Publish this message + ztimeout!(peer02 + .put(&key_expr, sbuf) + .congestion_control(CongestionControl::Block) + .res_async()) + .unwrap(); + println!("{c} putted"); + } + + // wat for all messages received + ztimeout!(async { + loop { + let cnt = msgs.load(Ordering::Relaxed); + println!("[PS][03b] Received {cnt}/{msg_count}."); + if cnt != msg_count { + tokio::time::sleep(SLEEP).await; + } else { + break; + } + } + }); + + // wat for all memory reclaimed + ztimeout!(async { + loop { + shm01.garbage_collect(); + let available = shm01.available(); + println!("[PS][03b] SHM available {available}/{shm_segment_size}"); + if available != shm_segment_size { + tokio::time::sleep(SLEEP).await; + } else { + break; + } + } + }); + } + } + + #[cfg(feature = "shared-memory")] + #[test] + fn zenoh_shm_unicast() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Initiate logging + zenoh_util::try_init_log_from_env(); + + let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:17447"]).await; + test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; + close_session(peer01, peer02).await; + }); + } + + #[cfg(feature = "shared-memory")] + #[test] + fn zenoh_shm_multicast() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Initiate logging + zenoh_util::try_init_log_from_env(); + + let (peer01, peer02) = + open_session_multicast("udp/224.0.0.1:17448", "udp/224.0.0.1:17448").await; + test_session_pubsub(&peer01, &peer02, Reliability::BestEffort).await; + close_session(peer01, peer02).await; + }); + } +} From e2279d85192e8229d488380c3fb960e9a77d98a9 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 22 Apr 2024 16:59:09 +0200 Subject: [PATCH 277/598] Remove experimental code from bytes --- zenoh/src/bytes.rs | 66 ++-------------------------------------------- 1 file changed, 2 insertions(+), 64 deletions(-) diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index 036271b765..fb4e3a19e9 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -15,14 +15,8 @@ //! ZBytes primitives. use crate::buffers::ZBuf; use std::{ - borrow::Cow, - convert::Infallible, - fmt::Debug, - marker::PhantomData, - ops::{Add, AddAssign, Deref}, - str::Utf8Error, - string::FromUtf8Error, - sync::Arc, + borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, str::Utf8Error, + string::FromUtf8Error, sync::Arc, }; use unwrap_infallible::UnwrapInfallible; use zenoh_buffers::{ @@ -43,62 +37,6 @@ use zenoh_shm::{ SharedMemoryBuf, }; -pub enum Cipher { - Aes512(ZSlice), -} - -pub enum Compression { - LZ4, -} - -#[derive(Default)] -pub struct Transformation { - cipher: Option, - compression: Option, -} - -impl From for Transformation { - fn from(value: Cipher) -> Self { - Transformation { - cipher: Some(value), - ..Default::default() - } - } -} - -impl From for Transformation { - fn from(value: Compression) -> Self { - Transformation { - compression: Some(value), - ..Default::default() - } - } -} - -impl Add for Transformation { - type Output = Transformation; - - fn add(mut self, rhs: Self) -> Self::Output { - self += rhs; - self - } -} - -impl AddAssign for Transformation { - fn add_assign(&mut self, rhs: Transformation) { - fn combine(mut lhs: Option, mut rhs: Option) -> Option { - match (lhs.take(), rhs.take()) { - (Some(_), Some(r)) => Some(r), - (None, r) => r, - (l, None) => l, - } - } - - self.cipher = combine(self.cipher.take(), rhs.cipher); - self.compression = combine(self.compression.take(), rhs.compression); - } -} - /// Trait to encode a type `T` into a [`Value`]. pub trait Serialize { type Output; From f98436abb66febfb342823876973322106c5ded1 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 22 Apr 2024 18:09:35 +0200 Subject: [PATCH 278/598] merge protocol changes followup --- zenoh/src/api/scouting.rs | 2 ++ zenoh/src/api/session.rs | 27 +++++++++++++++++++++++++-- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 2b0022f242..a3c86655b6 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -351,6 +351,7 @@ fn _scout( /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; +/// use zenoh::scouting::WhatAmI; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .res() @@ -376,3 +377,4 @@ where handler: DefaultHandler::default(), } } + diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index d00ae0a532..c481b01bdf 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -2664,7 +2664,11 @@ where TryIntoConfig: std::convert::TryInto + Send + 'static, >::Error: std::fmt::Debug, { - OpenBuilder { config } + OpenBuilder { + config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + shm_clients: None, + } } /// A builder returned by [`open`] used to open a zenoh [`Session`]. @@ -2685,6 +2689,20 @@ where >::Error: std::fmt::Debug, { config: TryIntoConfig, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + shm_clients: Option>, +} + +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +impl OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + pub fn with_shm_clients(mut self, shm_clients: Arc) -> Self { + self.shm_clients = Some(shm_clients); + self + } } impl Resolvable for OpenBuilder @@ -2705,7 +2723,12 @@ where .config .try_into() .map_err(|e| zerror!("Invalid Zenoh configuration {:?}", &e))?; - Session::new(config).res_sync() + Session::new( + config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + self.shm_clients, + ) + .res_sync() } } From 420e38b1335840c35662ed01bf91ffec9ddbdc46 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 23 Apr 2024 09:36:14 +0200 Subject: [PATCH 279/598] build fixes --- zenoh/src/api/loader.rs | 2 +- zenoh/src/api/mod.rs | 3 +++ zenoh/src/lib.rs | 3 ++- zenoh/src/net/runtime/adminspace.rs | 13 ++++--------- zenoh/src/net/runtime/mod.rs | 9 +++++---- zenohd/src/main.rs | 7 +++---- 6 files changed, 18 insertions(+), 19 deletions(-) diff --git a/zenoh/src/api/loader.rs b/zenoh/src/api/loader.rs index 084bae82b7..e4a28de02e 100644 --- a/zenoh/src/api/loader.rs +++ b/zenoh/src/api/loader.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::sealed::{PluginsManager, PLUGIN_PREFIX}; +use super::plugins::{PluginsManager, PLUGIN_PREFIX}; use crate::runtime::Runtime; use zenoh_config::{Config, PluginLoad}; use zenoh_result::ZResult; diff --git a/zenoh/src/api/mod.rs b/zenoh/src/api/mod.rs index c2cc3504f0..e93a5e025c 100644 --- a/zenoh/src/api/mod.rs +++ b/zenoh/src/api/mod.rs @@ -23,6 +23,7 @@ pub(crate) mod info; pub(crate) mod key_expr; #[cfg(feature = "unstable")] pub(crate) mod liveliness; +#[cfg(all(feature = "unstable", feature = "plugins"))] pub(crate) mod plugins; pub(crate) mod publication; pub(crate) mod query; @@ -34,3 +35,5 @@ pub(crate) mod session; pub(crate) mod subscriber; pub(crate) mod time; pub(crate) mod value; +#[cfg(all(feature = "unstable", feature = "plugins"))] +pub(crate) mod loader; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index de57f195e6..bf6675f63b 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -82,7 +82,6 @@ extern crate zenoh_result; mod api; mod net; - #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub use zenoh_shm::api as shm; #[cfg(all(feature = "unstable", feature = "shared-memory"))] @@ -329,6 +328,7 @@ pub mod time { /// This operation is used by the plugins to share the same Runtime as the router. #[doc(hidden)] pub mod runtime { + pub use crate::net::runtime::RuntimeBuilder; pub use crate::net::runtime::{AdminSpace, Runtime}; pub use zenoh_runtime::ZRuntime; } @@ -343,6 +343,7 @@ pub mod config { } #[doc(hidden)] +#[cfg(all(feature = "unstable", feature = "plugins"))] pub mod plugins { pub use crate::api::plugins::PluginsManager; pub use crate::api::plugins::Response; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 64c1a4cae1..b35d81a81a 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -15,19 +15,13 @@ use super::Runtime; use crate::api::builders::sample::ValueBuilderTrait; use crate::api::bytes::ZBytes; use crate::api::key_expr::KeyExpr; -use crate::api::plugins; +#[cfg(all(feature = "unstable", feature = "plugins"))] +use crate::api::plugins::PluginsManager; use crate::api::queryable::Query; use crate::api::queryable::QueryInner; use crate::api::value::Value; use crate::encoding::Encoding; use crate::net::primitives::Primitives; -#[cfg(all(feature = "unstable", feature = "plugins"))] -use crate::plugins::sealed::{self as plugins}; -use crate::prelude::sync::SyncResolve; -use crate::queryable::Query; -use crate::queryable::QueryInner; -use crate::sample::builder::ValueBuilderTrait; -use crate::value::Value; use serde_json::json; use std::collections::HashMap; use std::convert::TryFrom; @@ -37,6 +31,7 @@ use std::sync::Mutex; use tracing::{error, trace}; use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{unwrap_or_default, ConfigValidator, ValidatedMap, WhatAmI}; +use zenoh_core::SyncResolve; #[cfg(all(feature = "unstable", feature = "plugins"))] use zenoh_plugin_trait::{PluginControl, PluginStatus}; #[cfg(all(feature = "unstable", feature = "plugins"))] @@ -109,7 +104,7 @@ impl ConfigValidator for AdminSpace { impl AdminSpace { #[cfg(all(feature = "unstable", feature = "plugins"))] fn start_plugin( - plugin_mgr: &mut plugins::PluginsManager, + plugin_mgr: &mut PluginsManager, config: &zenoh_config::PluginLoad, start_args: &Runtime, required: bool, diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 06847b9ceb..456899f918 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -23,9 +23,11 @@ pub mod orchestrator; use super::primitives::DeMux; use super::routing; use super::routing::router::Router; +#[cfg(all(feature = "unstable", feature = "plugins"))] +use crate::api::loader::{load_plugins, start_plugins}; use crate::config::{unwrap_or_default, Config, ModeDependent, Notifier}; #[cfg(all(feature = "unstable", feature = "plugins"))] -use crate::plugins::sealed::PluginsManager; +use crate::api::plugins::PluginsManager; use crate::{GIT_VERSION, LONG_VERSION}; pub use adminspace::AdminSpace; use futures::stream::StreamExt; @@ -39,7 +41,6 @@ use std::time::Duration; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use uhlc::{HLCBuilder, HLC}; -use zenoh_config::{unwrap_or_default, Config, ModeDependent, Notifier}; use zenoh_link::{EndPoint, Link}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_protocol::core::{Locator, WhatAmI, ZenohId}; @@ -158,7 +159,7 @@ impl RuntimeBuilder { #[cfg(all(feature = "unstable", feature = "plugins"))] let plugins_manager = plugins_manager .take() - .unwrap_or_else(|| crate::plugins::loader::load_plugins(&config)); + .unwrap_or_else(|| load_plugins(&config)); // Admin space creation flag let start_admin_space = *config.adminspace.enabled(); @@ -185,7 +186,7 @@ impl RuntimeBuilder { // Start plugins #[cfg(all(feature = "unstable", feature = "plugins"))] - crate::plugins::loader::start_plugins(&runtime); + start_plugins(&runtime); // Start notifier task let receiver = config.subscribe(); diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 9480f99223..3629e4dae4 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -18,11 +18,10 @@ use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; use zenoh::config::EndPoint; -use zenoh::config::{Config, ModeDependentValue, PermissionsConf, PluginLoad, ValidatedMap}; +use zenoh::config::{Config, ModeDependentValue, PermissionsConf, ValidatedMap}; use zenoh::core::Result; -use zenoh::plugins::PluginsManager; -use zenoh::runtime::{AdminSpace, Runtime}; use zenoh::scouting::WhatAmI; +use zenoh::core::AsyncResolve; #[cfg(feature = "loki")] use url::Url; @@ -108,7 +107,7 @@ fn main() { let config = config_from_args(&args); tracing::info!("Initial conf: {}", &config); - let _session = match zenoh::open(config).res().await { + let _session = match zenoh::open(config).res_async().await { Ok(runtime) => runtime, Err(e) => { println!("{e}. Exiting..."); From a9a906f7bcad34b2f62353ef5aee2c210e2748e7 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 23 Apr 2024 09:44:02 +0200 Subject: [PATCH 280/598] cargo fmt --- zenoh/src/api/mod.rs | 4 ++-- zenoh/src/api/scouting.rs | 1 - zenoh/src/net/runtime/mod.rs | 2 +- zenohd/src/main.rs | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/zenoh/src/api/mod.rs b/zenoh/src/api/mod.rs index e93a5e025c..694890ad6c 100644 --- a/zenoh/src/api/mod.rs +++ b/zenoh/src/api/mod.rs @@ -24,6 +24,8 @@ pub(crate) mod key_expr; #[cfg(feature = "unstable")] pub(crate) mod liveliness; #[cfg(all(feature = "unstable", feature = "plugins"))] +pub(crate) mod loader; +#[cfg(all(feature = "unstable", feature = "plugins"))] pub(crate) mod plugins; pub(crate) mod publication; pub(crate) mod query; @@ -35,5 +37,3 @@ pub(crate) mod session; pub(crate) mod subscriber; pub(crate) mod time; pub(crate) mod value; -#[cfg(all(feature = "unstable", feature = "plugins"))] -pub(crate) mod loader; diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index a3c86655b6..c4e411dec9 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -377,4 +377,3 @@ where handler: DefaultHandler::default(), } } - diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 456899f918..4991844650 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -25,9 +25,9 @@ use super::routing; use super::routing::router::Router; #[cfg(all(feature = "unstable", feature = "plugins"))] use crate::api::loader::{load_plugins, start_plugins}; -use crate::config::{unwrap_or_default, Config, ModeDependent, Notifier}; #[cfg(all(feature = "unstable", feature = "plugins"))] use crate::api::plugins::PluginsManager; +use crate::config::{unwrap_or_default, Config, ModeDependent, Notifier}; use crate::{GIT_VERSION, LONG_VERSION}; pub use adminspace::AdminSpace; use futures::stream::StreamExt; diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 3629e4dae4..d8fed7eeb4 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -19,9 +19,9 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; use zenoh::config::EndPoint; use zenoh::config::{Config, ModeDependentValue, PermissionsConf, ValidatedMap}; +use zenoh::core::AsyncResolve; use zenoh::core::Result; use zenoh::scouting::WhatAmI; -use zenoh::core::AsyncResolve; #[cfg(feature = "loki")] use url::Url; From 618bed1ff9c2ef17cf73860182b6849b68f06fb2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 23 Apr 2024 09:57:24 +0200 Subject: [PATCH 281/598] restored zenoh-macro for zenoh-ext --- Cargo.lock | 1 + zenoh-ext/Cargo.toml | 1 + zenoh-ext/src/publication_cache.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 4 ++-- zenoh/src/lib.rs | 1 - 5 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4244273f87..737cb62f75 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5249,6 +5249,7 @@ dependencies = [ "tokio", "tracing", "zenoh", + "zenoh-macros", "zenoh-util", ] diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index 61b0bf13df..402d37e5f4 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -51,6 +51,7 @@ serde = { workspace = true, features = ["default"] } serde_cbor = { workspace = true } serde_json = { workspace = true } zenoh = { workspace = true, features = ["unstable"], default-features = false } +zenoh-macros = { workspace = true } [package.metadata.docs.rs] features = ["unstable"] diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index b6a380d766..7080b44ac4 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -66,7 +66,7 @@ impl<'a, 'b, 'c> PublicationCacheBuilder<'a, 'b, 'c> { /// Restrict the matching queries that will be receive by this [`PublicationCache`]'s queryable /// to the ones that have the given [`Locality`](zenoh::prelude::Locality). - #[zenoh::internal::unstable] + #[zenoh_macros::unstable] #[inline] pub fn queryable_allowed_origin(mut self, origin: Locality) -> Self { self.queryable_origin = Some(origin); diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 1bce18a64f..35eb9afe46 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -163,7 +163,7 @@ impl<'a, 'b, Handler> QueryingSubscriberBuilder<'a, 'b, crate::UserSpace, Handle /// Restrict the matching publications that will be receive by this [`Subscriber`] /// to the ones that have the given [`Locality`](zenoh::prelude::Locality). - #[zenoh::internal::unstable] + #[zenoh_macros::unstable] #[inline] pub fn allowed_origin(mut self, origin: Locality) -> Self { self.origin = origin; @@ -523,7 +523,7 @@ where /// Restrict the matching publications that will be receive by this [`FetchingSubscriber`] /// to the ones that have the given [`Locality`](zenoh::prelude::Locality). - #[zenoh::internal::unstable] + #[zenoh_macros::unstable] #[inline] pub fn allowed_origin(mut self, origin: Locality) -> Self { self.origin = origin; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index bf6675f63b..47c95f2d52 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -357,7 +357,6 @@ pub mod internal { pub use zenoh_core::zerror; pub use zenoh_core::zlock; pub use zenoh_core::ztimeout; - pub use zenoh_macros::unstable; pub use zenoh_result::bail; pub use zenoh_sync::Condition; pub use zenoh_task::TaskController; From d10308568c7fb0444ea3412259bf344cafd0f8dc Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 23 Apr 2024 11:51:10 +0200 Subject: [PATCH 282/598] shm clippy still fails --- zenoh/src/lib.rs | 21 ++++++++++++++------- zenoh/tests/payload.rs | 13 +------------ zenoh/tests/shm.rs | 7 +------ 3 files changed, 16 insertions(+), 25 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 47c95f2d52..c4f671ec79 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -82,11 +82,6 @@ extern crate zenoh_result; mod api; mod net; -#[cfg(all(feature = "unstable", feature = "shared-memory"))] -pub use zenoh_shm::api as shm; -#[cfg(all(feature = "unstable", feature = "shared-memory"))] -pub use zenoh_shm::api::client_storage::SharedMemoryClientStorage; - lazy_static::lazy_static!( static ref LONG_VERSION: String = format!("{} built with {}", GIT_VERSION, env!("RUSTC_VERSION")); ); @@ -366,7 +361,19 @@ pub mod internal { pub use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; } -#[cfg(feature = "shared-memory")] +#[cfg(all(feature = "unstable", feature = "shared-memory"))] pub mod shm { - pub use zenoh_shm::SharedMemoryManager; + pub use zenoh_shm::api::client_storage::SharedMemoryClientStorage; + pub use zenoh_shm::api::slice::zsliceshm::{zsliceshm, ZSliceShm}; + pub use zenoh_shm::api::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; + pub use zenoh_shm::api::{ + protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, + }, + provider::shared_memory_provider::SharedMemoryProviderBuilder, + }; + pub use zenoh_shm::api::provider::shared_memory_provider::{ + BlockOn, GarbageCollect, + }; } diff --git a/zenoh/tests/payload.rs b/zenoh/tests/payload.rs index d9910bedf5..fac5d37367 100644 --- a/zenoh/tests/payload.rs +++ b/zenoh/tests/payload.rs @@ -15,18 +15,7 @@ #[test] #[cfg(all(feature = "shared-memory", feature = "unstable"))] fn shm_payload_single_buf() { - use zenoh::shm::slice::zsliceshm::{zsliceshm, ZSliceShm}; - use zenoh::shm::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; - use zenoh::{ - bytes::ZBytes, - shm::{ - protocol_implementations::posix::{ - posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, - protocol_id::POSIX_PROTOCOL_ID, - }, - provider::shared_memory_provider::SharedMemoryProviderBuilder, - }, - }; + use zenoh::prelude::r#async::*; // create an SHM backend... let backend = PosixSharedMemoryProviderBackend::builder() diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 2a9685eb36..92d1b17732 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -17,12 +17,7 @@ mod tests { use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; - use zenoh::shm::protocol_implementations::posix::posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend; - use zenoh::shm::protocol_implementations::posix::protocol_id::POSIX_PROTOCOL_ID; - use zenoh::shm::provider::shared_memory_provider::{ - BlockOn, GarbageCollect, SharedMemoryProviderBuilder, - }; - use zenoh_core::ztimeout; + use zenoh::internal::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); From 3d6a6e8c1d0334be8c1d3ef16a87095f6bebdb83 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 23 Apr 2024 14:06:00 +0300 Subject: [PATCH 283/598] [skip ci] SHM Payload API example and test --- examples/Cargo.toml | 7 +- examples/examples/z_payload_shm.rs | 101 +++++++++++++++++++++++++++++ zenoh/tests/payload.rs | 51 +++++---------- 3 files changed, 124 insertions(+), 35 deletions(-) create mode 100644 examples/examples/z_payload_shm.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml index e117507ae9..b240d06723 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -156,4 +156,9 @@ path = "examples/z_pong.rs" [[example]] name = "z_alloc_shm" path = "examples/z_alloc_shm.rs" -required-features = ["unstable", "shared-memory"] \ No newline at end of file +required-features = ["unstable", "shared-memory"] + +[[example]] +name = "z_payload_shm" +path = "examples/z_payload_shm.rs" +required-features = ["unstable", "shared-memory"] diff --git a/examples/examples/z_payload_shm.rs b/examples/examples/z_payload_shm.rs new file mode 100644 index 0000000000..3b03b80502 --- /dev/null +++ b/examples/examples/z_payload_shm.rs @@ -0,0 +1,101 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh::shm::slice::zsliceshm::{zsliceshm, ZSliceShm}; +use zenoh::shm::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; +use zenoh::{ + bytes::ZBytes, + shm::{ + protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, + }, + provider::shared_memory_provider::SharedMemoryProviderBuilder, + }, +}; + +fn main() { + // create an SHM backend... + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(4096) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Prepare a layout for allocations + let layout = provider.alloc_layout().size(1024).res().unwrap(); + + // allocate an SHM buffer (ZSliceShmMut) + let mut owned_shm_buf_mut = layout.alloc().res().unwrap(); + + // mutable and immutable API + let _data: &[u8] = &owned_shm_buf_mut; + let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + + // convert into immutable owned buffer (ZSliceShmMut -> ZSlceShm) + let owned_shm_buf: ZSliceShm = owned_shm_buf_mut.into(); + + // immutable API + let _data: &[u8] = &owned_shm_buf; + + // convert again into mutable owned buffer (ZSliceShm -> ZSlceShmMut) + let mut owned_shm_buf_mut: ZSliceShmMut = owned_shm_buf.try_into().unwrap(); + + // mutable and immutable API + let _data: &[u8] = &owned_shm_buf_mut; + let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + + // build a ZBytes from an SHM buffer (ZSliceShmMut -> ZBytes) + let mut payload: ZBytes = owned_shm_buf_mut.into(); + + // branch to illustrate immutable access to SHM data + { + // deserialize ZBytes as an immutably borrowed zsliceshm (ZBytes -> &zsliceshm) + let borrowed_shm_buf: &zsliceshm = payload.deserialize().unwrap(); + + // immutable API + let _data: &[u8] = borrowed_shm_buf; + + // construct owned buffer from borrowed type (&zsliceshm -> ZSliceShm) + let owned = borrowed_shm_buf.to_owned(); + + // immutable API + let _data: &[u8] = &owned; + + // try to construct mutable ZSliceShmMut (ZSliceShm -> ZSliceShmMut) + let owned_mut: Result = owned.try_into(); + // the attempt fails because ZSliceShm has two existing references ('owned' and inside 'payload') + assert!(owned_mut.is_err()) + } + + // branch to illustrate mutable access to SHM data + { + // deserialize ZBytes as mutably borrowed zsliceshm (ZBytes -> &mut zsliceshm) + let borrowed_shm_buf: &mut zsliceshm = payload.deserialize_mut().unwrap(); + + // immutable API + let _data: &[u8] = borrowed_shm_buf; + + // convert zsliceshm to zsliceshmmut (&mut zsliceshm -> &mut zsliceshmmut) + let borrowed_shm_buf_mut: &mut zsliceshmmut = borrowed_shm_buf.try_into().unwrap(); + + // mutable and immutable API + let _data: &[u8] = borrowed_shm_buf_mut; + let _data_mut: &mut [u8] = borrowed_shm_buf_mut; + } +} diff --git a/zenoh/tests/payload.rs b/zenoh/tests/payload.rs index d9910bedf5..1bcbf33ef4 100644 --- a/zenoh/tests/payload.rs +++ b/zenoh/tests/payload.rs @@ -43,55 +43,38 @@ fn shm_payload_single_buf() { // Prepare a layout for allocations let layout = provider.alloc_layout().size(1024).res().unwrap(); - // allocate an SHM buffer - let mut owned_shm_buf_mut = layout.alloc().res().unwrap(); + // allocate an SHM buffer (ZSliceShmMut) + let owned_shm_buf_mut = layout.alloc().res().unwrap(); - // get data - let _data: &[u8] = &owned_shm_buf_mut; - let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; - - // convert into immutable owned buffer + // convert into immutable owned buffer (ZSliceShmMut -> ZSlceShm) let owned_shm_buf: ZSliceShm = owned_shm_buf_mut.into(); - // get data - let _data: &[u8] = &owned_shm_buf; - - // convert again into mutable owned buffer - let mut owned_shm_buf_mut: ZSliceShmMut = owned_shm_buf.try_into().unwrap(); - - // get data - let _data: &[u8] = &owned_shm_buf_mut; - let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + // convert again into mutable owned buffer (ZSliceShm -> ZSlceShmMut) + let owned_shm_buf_mut: ZSliceShmMut = owned_shm_buf.try_into().unwrap(); - // build a ZBytes from an SHM buffer + // build a ZBytes from an SHM buffer (ZSliceShmMut -> ZBytes) let mut payload: ZBytes = owned_shm_buf_mut.into(); + // branch to illustrate immutable access to SHM data { - // deserialize ZBytes as borrowed zsliceshm + // deserialize ZBytes as an immutably borrowed zsliceshm (ZBytes -> &zsliceshm) let borrowed_shm_buf: &zsliceshm = payload.deserialize().unwrap(); - // get data - let _data: &[u8] = borrowed_shm_buf; - - // construct owned buffer from borrowed type + // construct owned buffer from borrowed type (&zsliceshm -> ZSliceShm) let owned = borrowed_shm_buf.to_owned(); - // get data - let _data: &[u8] = &owned; + // try to construct mutable ZSliceShmMut (ZSliceShm -> ZSliceShmMut) + let owned_mut: Result = owned.try_into(); + // the attempt fails because ZSliceShm has two existing references ('owned' and inside 'payload') + assert!(owned_mut.is_err()) } + // branch to illustrate mutable access to SHM data { - // deserialize ZBytes as mutably borrowed zsliceshm + // deserialize ZBytes as mutably borrowed zsliceshm (ZBytes -> &mut zsliceshm) let borrowed_shm_buf: &mut zsliceshm = payload.deserialize_mut().unwrap(); - // get data - let _data: &[u8] = borrowed_shm_buf; - - // convert zsliceshm to zsliceshmmut - let borrowed_shm_buf_mut: &mut zsliceshmmut = borrowed_shm_buf.try_into().unwrap(); - - // get data - let _data: &[u8] = borrowed_shm_buf_mut; - let _data_mut: &mut [u8] = borrowed_shm_buf_mut; + // convert zsliceshm to zsliceshmmut (&mut zsliceshm -> &mut zsliceshmmut) + let _borrowed_shm_buf_mut: &mut zsliceshmmut = borrowed_shm_buf.try_into().unwrap(); } } From d9b65a73eb79230c2fb4020f5c428b2d41d111f9 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 23 Apr 2024 14:36:06 +0200 Subject: [PATCH 284/598] Fix tuple deserialization lifetime (#954) --- zenoh/src/bytes.rs | 53 ++++++++++++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 18 deletions(-) diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index fb4e3a19e9..c36136ef81 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -1694,10 +1694,10 @@ where impl<'s, A, B> Deserialize<'s, (A, B)> for ZSerde where - for<'a> A: TryFrom<&'a ZBytes>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b ZBytes>, - for<'b> >::Error: Debug, + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, { type Input = &'s ZBytes; type Error = ZError; @@ -1712,18 +1712,18 @@ where let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; let bpld = ZBytes::new(bbuf); - let a = A::try_from(&apld).map_err(|e| zerror!("{:?}", e))?; - let b = B::try_from(&bpld).map_err(|e| zerror!("{:?}", e))?; + let a = A::try_from(apld).map_err(|e| zerror!("{:?}", e))?; + let b = B::try_from(bpld).map_err(|e| zerror!("{:?}", e))?; Ok((a, b)) } } impl TryFrom for (A, B) where - A: for<'a> TryFrom<&'a ZBytes>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b ZBytes>, - for<'b> >::Error: Debug, + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, { type Error = ZError; @@ -1734,10 +1734,10 @@ where impl TryFrom<&ZBytes> for (A, B) where - for<'a> A: TryFrom<&'a ZBytes>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b ZBytes>, - for<'b> >::Error: Debug, + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, { type Error = ZError; @@ -1748,10 +1748,10 @@ where impl TryFrom<&mut ZBytes> for (A, B) where - for<'a> A: TryFrom<&'a ZBytes>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b ZBytes>, - for<'b> >::Error: Debug, + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, { type Error = ZError; @@ -1977,6 +1977,14 @@ mod tests { serialize_deserialize!((usize, usize), (0, 1)); serialize_deserialize!((usize, String), (0, String::from("a"))); serialize_deserialize!((String, String), (String::from("a"), String::from("b"))); + serialize_deserialize!( + (Cow<'static, [u8]>, Cow<'static, [u8]>), + (Cow::from(vec![0u8; 8]), Cow::from(vec![0u8; 8])) + ); + serialize_deserialize!( + (Cow<'static, str>, Cow<'static, str>), + (Cow::from("a"), Cow::from("b")) + ); // Iterator let v: [usize; 5] = [0, 1, 2, 3, 4]; @@ -2061,5 +2069,14 @@ mod tests { println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(String, String)>()); assert_eq!(hm, o); + + let mut hm: HashMap, Cow<'static, str>> = HashMap::new(); + hm.insert(Cow::from("0"), Cow::from("a")); + hm.insert(Cow::from("1"), Cow::from("b")); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from_iter(hm.iter()); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(Cow<'static, str>, Cow<'static, str>)>()); + assert_eq!(hm, o); } } From da5a1a01edaade37228e80342bb69b48ce362735 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 24 Apr 2024 18:39:36 +0200 Subject: [PATCH 285/598] cargo fmt --- zenoh/src/lib.rs | 4 +--- zenoh/tests/shm.rs | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c4f671ec79..60dab218db 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -364,6 +364,7 @@ pub mod internal { #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub mod shm { pub use zenoh_shm::api::client_storage::SharedMemoryClientStorage; + pub use zenoh_shm::api::provider::shared_memory_provider::{BlockOn, GarbageCollect}; pub use zenoh_shm::api::slice::zsliceshm::{zsliceshm, ZSliceShm}; pub use zenoh_shm::api::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; pub use zenoh_shm::api::{ @@ -373,7 +374,4 @@ pub mod shm { }, provider::shared_memory_provider::SharedMemoryProviderBuilder, }; - pub use zenoh_shm::api::provider::shared_memory_provider::{ - BlockOn, GarbageCollect, - }; } diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 92d1b17732..a7bc481e27 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -16,8 +16,8 @@ mod tests { use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; - use zenoh::prelude::r#async::*; use zenoh::internal::ztimeout; + use zenoh::prelude::r#async::*; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); From 6182ae1b710ded6ff837de6c14bac567d25ffee3 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 24 Apr 2024 18:54:59 +0200 Subject: [PATCH 286/598] test fix --- .config/nextest.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.config/nextest.toml b/.config/nextest.toml index b2ed4cde98..4999dce0d3 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -16,7 +16,7 @@ test(=three_node_combination) | test(=watchdog_alloc_concurrent) | test(=header_check_memory_concurrent) | test(=header_link_concurrent) | -test(=header_link_failure_concurrent) +test(=header_link_failure_concurrent) | test(=downsampling_by_keyexpr) """ threads-required = 'num-cpus' From dd6720df4c8a29140baf65f8a603a059dbcc1e2c Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 25 Apr 2024 12:30:20 +0200 Subject: [PATCH 287/598] restored lost bytes.rs --- zenoh/src/api/bytes.rs | 817 +++++++++++++++++++++++++++++++++-------- 1 file changed, 664 insertions(+), 153 deletions(-) diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 6f8ba23a65..c36136ef81 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -28,8 +28,14 @@ use zenoh_buffers::{ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; use zenoh_result::{ZError, ZResult}; -#[cfg(feature = "shared-memory")] -use zenoh_shm::SharedMemoryBuf; +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +use zenoh_shm::{ + api::slice::{ + zsliceshm::{zsliceshm, ZSliceShm}, + zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, + }, + SharedMemoryBuf, +}; /// Trait to encode a type `T` into a [`Value`]. pub trait Serialize { @@ -40,10 +46,11 @@ pub trait Serialize { } pub trait Deserialize<'a, T> { + type Input: 'a; type Error; /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. - fn deserialize(self, t: &'a ZBytes) -> Result; + fn deserialize(self, t: Self::Input) -> Result; } /// ZBytes contains the serialized bytes of user data. @@ -128,7 +135,18 @@ impl ZBytes { /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. pub fn deserialize<'a, T>(&'a self) -> ZResult where - ZSerde: Deserialize<'a, T>, + ZSerde: Deserialize<'a, T, Input = &'a ZBytes>, + >::Error: Debug, + { + ZSerde + .deserialize(self) + .map_err(|e| zerror!("{:?}", e).into()) + } + + /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. + pub fn deserialize_mut<'a, T>(&'a mut self) -> ZResult + where + ZSerde: Deserialize<'a, T, Input = &'a mut ZBytes>, >::Error: Debug, { ZSerde @@ -139,7 +157,16 @@ impl ZBytes { /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. pub fn into<'a, T>(&'a self) -> T where - ZSerde: Deserialize<'a, T, Error = Infallible>, + ZSerde: Deserialize<'a, T, Input = &'a ZBytes, Error = Infallible>, + >::Error: Debug, + { + ZSerde.deserialize(self).unwrap_infallible() + } + + /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. + pub fn into_mut<'a, T>(&'a mut self) -> T + where + ZSerde: Deserialize<'a, T, Input = &'a mut ZBytes, Error = Infallible>, >::Error: Debug, { ZSerde.deserialize(self).unwrap_infallible() @@ -192,7 +219,7 @@ where impl Iterator for ZBytesIterator<'_, T> where - for<'a> ZSerde: Deserialize<'a, T>, + for<'a> ZSerde: Deserialize<'a, T, Input = &'a ZBytes>, for<'a> >::Error: Debug, { type Item = T; @@ -311,10 +338,25 @@ impl From<&ZBuf> for ZBytes { } } -impl Deserialize<'_, ZBuf> for ZSerde { +impl Serialize<&mut ZBuf> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut ZBuf) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&mut ZBuf> for ZBytes { + fn from(t: &mut ZBuf) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, ZBuf> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { Ok(v.0.clone()) } } @@ -331,6 +373,12 @@ impl From<&ZBytes> for ZBuf { } } +impl From<&mut ZBytes> for ZBuf { + fn from(value: &mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // ZSlice impl Serialize for ZSerde { type Output = ZBytes; @@ -360,10 +408,25 @@ impl From<&ZSlice> for ZBytes { } } -impl Deserialize<'_, ZSlice> for ZSerde { +impl Serialize<&mut ZSlice> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut ZSlice) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&mut ZSlice> for ZBytes { + fn from(t: &mut ZSlice) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, ZSlice> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { Ok(v.0.to_zslice()) } } @@ -380,6 +443,12 @@ impl From<&ZBytes> for ZSlice { } } +impl From<&mut ZBytes> for ZSlice { + fn from(value: &mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // [u8; N] impl Serialize<[u8; N]> for ZSerde { type Output = ZBytes; @@ -409,10 +478,25 @@ impl From<&[u8; N]> for ZBytes { } } -impl Deserialize<'_, [u8; N]> for ZSerde { +impl Serialize<&mut [u8; N]> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut [u8; N]) -> Self::Output { + ZBytes::new(*t) + } +} + +impl From<&mut [u8; N]> for ZBytes { + fn from(t: &mut [u8; N]) -> Self { + ZSerde.serialize(*t) + } +} + +impl<'a, const N: usize> Deserialize<'a, [u8; N]> for ZSerde { + type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result<[u8; N], Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<[u8; N], Self::Error> { use std::io::Read; if v.0.len() != N { @@ -441,6 +525,14 @@ impl TryFrom<&ZBytes> for [u8; N] { } } +impl TryFrom<&mut ZBytes> for [u8; N] { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Vec impl Serialize> for ZSerde { type Output = ZBytes; @@ -470,10 +562,25 @@ impl From<&Vec> for ZBytes { } } -impl Deserialize<'_, Vec> for ZSerde { +impl Serialize<&mut Vec> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut Vec) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&mut Vec> for ZBytes { + fn from(t: &mut Vec) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, Vec> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { Ok(v.0.contiguous().to_vec()) } } @@ -490,6 +597,12 @@ impl From<&ZBytes> for Vec { } } +impl From<&mut ZBytes> for Vec { + fn from(value: &mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // &[u8] impl Serialize<&[u8]> for ZSerde { type Output = ZBytes; @@ -505,6 +618,20 @@ impl From<&[u8]> for ZBytes { } } +impl Serialize<&mut [u8]> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut [u8]) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl From<&mut [u8]> for ZBytes { + fn from(t: &mut [u8]) -> Self { + ZSerde.serialize(t) + } +} + // Cow<[u8]> impl<'a> Serialize> for ZSerde { type Output = ZBytes; @@ -534,10 +661,25 @@ impl From<&Cow<'_, [u8]>> for ZBytes { } } +impl<'a> Serialize<&mut Cow<'a, [u8]>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut Cow<'a, [u8]>) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl From<&mut Cow<'_, [u8]>> for ZBytes { + fn from(t: &mut Cow<'_, [u8]>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &'a ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { Ok(v.0.contiguous()) } } @@ -557,6 +699,12 @@ impl<'a> From<&'a ZBytes> for Cow<'a, [u8]> { } } +impl<'a> From<&'a mut ZBytes> for Cow<'a, [u8]> { + fn from(value: &'a mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // String impl Serialize for ZSerde { type Output = ZBytes; @@ -586,10 +734,25 @@ impl From<&String> for ZBytes { } } -impl Deserialize<'_, String> for ZSerde { +impl Serialize<&mut String> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut String) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut String> for ZBytes { + fn from(t: &mut String) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, String> for ZSerde { + type Input = &'a ZBytes; type Error = FromUtf8Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { let v: Vec = ZSerde.deserialize(v).unwrap_infallible(); String::from_utf8(v) } @@ -611,12 +774,20 @@ impl TryFrom<&ZBytes> for String { } } +impl TryFrom<&mut ZBytes> for String { + type Error = FromUtf8Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // &str impl Serialize<&str> for ZSerde { type Output = ZBytes; fn serialize(self, s: &str) -> Self::Output { - Self.serialize(s.to_string()) + ZSerde.serialize(s.to_string()) } } @@ -626,6 +797,20 @@ impl From<&str> for ZBytes { } } +impl Serialize<&mut str> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut str) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut str> for ZBytes { + fn from(t: &mut str) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Serialize> for ZSerde { type Output = ZBytes; @@ -644,7 +829,7 @@ impl<'a> Serialize<&Cow<'a, str>> for ZSerde { type Output = ZBytes; fn serialize(self, s: &Cow<'a, str>) -> Self::Output { - Self.serialize(s.to_string()) + ZSerde.serialize(s.to_string()) } } @@ -654,10 +839,25 @@ impl From<&Cow<'_, str>> for ZBytes { } } +impl<'a> Serialize<&mut Cow<'a, str>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut Cow<'a, str>) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut Cow<'_, str>> for ZBytes { + fn from(t: &mut Cow<'_, str>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { + type Input = &'a ZBytes; type Error = Utf8Error; - fn deserialize(self, v: &'a ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { Cow::try_from(v) } } @@ -686,6 +886,18 @@ impl<'a> TryFrom<&'a ZBytes> for Cow<'a, str> { } } +impl<'a> TryFrom<&'a mut ZBytes> for Cow<'a, str> { + type Error = Utf8Error; + + fn try_from(v: &'a mut ZBytes) -> Result { + let v: Cow<'a, [u8]> = Cow::from(v); + let _ = core::str::from_utf8(v.as_ref())?; + // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 + // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. + Ok(unsafe { core::mem::transmute(v) }) + } +} + // - Integers impl macro_rules! impl_int { ($t:ty) => { @@ -725,10 +937,25 @@ macro_rules! impl_int { } } + impl Serialize<&mut $t> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut $t) -> Self::Output { + Self.serialize(*t) + } + } + + impl From<&mut $t> for ZBytes { + fn from(t: &mut $t) -> Self { + ZSerde.serialize(t) + } + } + impl<'a> Deserialize<'a, $t> for ZSerde { + type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result<$t, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<$t, Self::Error> { use std::io::Read; let mut r = v.reader(); @@ -758,6 +985,14 @@ macro_rules! impl_int { ZSerde.deserialize(value) } } + + impl TryFrom<&mut ZBytes> for $t { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } + } }; } @@ -810,10 +1045,25 @@ impl From<&bool> for ZBytes { } } -impl Deserialize<'_, bool> for ZSerde { +impl Serialize<&mut bool> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut bool) -> Self::Output { + ZSerde.serialize(*t) + } +} + +impl From<&mut bool> for ZBytes { + fn from(t: &mut bool) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, bool> for ZSerde { + type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { let p = v.deserialize::().map_err(|_| ZDeserializeError)?; match p { 0 => Ok(false), @@ -839,6 +1089,14 @@ impl TryFrom<&ZBytes> for bool { } } +impl TryFrom<&mut ZBytes> for bool { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // - Zenoh advanced types encoders/decoders // Properties impl Serialize> for ZSerde { @@ -869,10 +1127,25 @@ impl<'s> From<&'s Properties<'s>> for ZBytes { } } +impl Serialize<&mut Properties<'_>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut Properties<'_>) -> Self::Output { + Self.serialize(t.as_str()) + } +} + +impl<'s> From<&'s mut Properties<'s>> for ZBytes { + fn from(t: &'s mut Properties<'s>) -> Self { + ZSerde.serialize(&*t) + } +} + impl<'s> Deserialize<'s, Properties<'s>> for ZSerde { + type Input = &'s ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &'s ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { let s = v .deserialize::>() .map_err(|_| ZDeserializeError)?; @@ -897,6 +1170,14 @@ impl<'s> TryFrom<&'s ZBytes> for Properties<'s> { } } +impl<'s> TryFrom<&'s mut ZBytes> for Properties<'s> { + type Error = ZDeserializeError; + + fn try_from(value: &'s mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // JSON impl Serialize for ZSerde { type Output = Result; @@ -932,10 +1213,29 @@ impl TryFrom<&serde_json::Value> for ZBytes { } } -impl Deserialize<'_, serde_json::Value> for ZSerde { +impl Serialize<&mut serde_json::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_json::Value) -> Self::Output { + let mut bytes = ZBytes::empty(); + serde_json::to_writer(bytes.writer(), t)?; + Ok(bytes) + } +} + +impl TryFrom<&mut serde_json::Value> for ZBytes { type Error = serde_json::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn try_from(value: &mut serde_json::Value) -> Result { + ZSerde.serialize(&*value) + } +} + +impl<'a> Deserialize<'a, serde_json::Value> for ZSerde { + type Input = &'a ZBytes; + type Error = serde_json::Error; + + fn deserialize(self, v: Self::Input) -> Result { serde_json::from_reader(v.reader()) } } @@ -956,6 +1256,14 @@ impl TryFrom<&ZBytes> for serde_json::Value { } } +impl TryFrom<&mut ZBytes> for serde_json::Value { + type Error = serde_json::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Yaml impl Serialize for ZSerde { type Output = Result; @@ -991,10 +1299,29 @@ impl TryFrom<&serde_yaml::Value> for ZBytes { } } -impl Deserialize<'_, serde_yaml::Value> for ZSerde { +impl Serialize<&mut serde_yaml::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_yaml::Value) -> Self::Output { + let mut bytes = ZBytes::empty(); + serde_yaml::to_writer(bytes.writer(), t)?; + Ok(bytes) + } +} + +impl TryFrom<&mut serde_yaml::Value> for ZBytes { + type Error = serde_yaml::Error; + + fn try_from(value: &mut serde_yaml::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl<'a> Deserialize<'a, serde_yaml::Value> for ZSerde { + type Input = &'a ZBytes; type Error = serde_yaml::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { serde_yaml::from_reader(v.reader()) } } @@ -1015,6 +1342,14 @@ impl TryFrom<&ZBytes> for serde_yaml::Value { } } +impl TryFrom<&mut ZBytes> for serde_yaml::Value { + type Error = serde_yaml::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // CBOR impl Serialize for ZSerde { type Output = Result; @@ -1050,10 +1385,27 @@ impl TryFrom<&serde_cbor::Value> for ZBytes { } } -impl Deserialize<'_, serde_cbor::Value> for ZSerde { +impl Serialize<&mut serde_cbor::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_cbor::Value) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl TryFrom<&mut serde_cbor::Value> for ZBytes { type Error = serde_cbor::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn try_from(value: &mut serde_cbor::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl<'a> Deserialize<'a, serde_cbor::Value> for ZSerde { + type Input = &'a ZBytes; + type Error = serde_cbor::Error; + + fn deserialize(self, v: Self::Input) -> Result { serde_cbor::from_reader(v.reader()) } } @@ -1074,6 +1426,14 @@ impl TryFrom<&ZBytes> for serde_cbor::Value { } } +impl TryFrom<&mut ZBytes> for serde_cbor::Value { + type Error = serde_cbor::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Pickle impl Serialize for ZSerde { type Output = Result; @@ -1113,10 +1473,27 @@ impl TryFrom<&serde_pickle::Value> for ZBytes { } } -impl Deserialize<'_, serde_pickle::Value> for ZSerde { +impl Serialize<&mut serde_pickle::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_pickle::Value) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl TryFrom<&mut serde_pickle::Value> for ZBytes { + type Error = serde_pickle::Error; + + fn try_from(value: &mut serde_pickle::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl<'a> Deserialize<'a, serde_pickle::Value> for ZSerde { + type Input = &'a ZBytes; type Error = serde_pickle::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) } } @@ -1137,77 +1514,125 @@ impl TryFrom<&ZBytes> for serde_pickle::Value { } } +impl TryFrom<&mut ZBytes> for serde_pickle::Value { + type Error = serde_pickle::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Shared memory conversion -#[cfg(feature = "shared-memory")] -impl Serialize> for ZSerde { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: Arc) -> Self::Output { - ZBytes::new(t) + fn serialize(self, t: ZSliceShm) -> Self::Output { + let slice: ZSlice = t.into(); + ZBytes::new(slice) } } -#[cfg(feature = "shared-memory")] -impl From> for ZBytes { - fn from(t: Arc) -> Self { + +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl From for ZBytes { + fn from(t: ZSliceShm) -> Self { ZSerde.serialize(t) } } -#[cfg(feature = "shared-memory")] -impl Serialize> for ZSerde { +// Shared memory conversion +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: Box) -> Self::Output { - let smb: Arc = t.into(); - Self.serialize(smb) + fn serialize(self, t: ZSliceShmMut) -> Self::Output { + let slice: ZSlice = t.into(); + ZBytes::new(slice) } } -#[cfg(feature = "shared-memory")] -impl From> for ZBytes { - fn from(t: Box) -> Self { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl From for ZBytes { + fn from(t: ZSliceShmMut) -> Self { ZSerde.serialize(t) } } -#[cfg(feature = "shared-memory")] -impl Serialize for ZSerde { - type Output = ZBytes; +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> Deserialize<'a, &'a zsliceshm> for ZSerde { + type Input = &'a ZBytes; + type Error = ZDeserializeError; - fn serialize(self, t: SharedMemoryBuf) -> Self::Output { - ZBytes::new(t) + fn deserialize(self, v: Self::Input) -> Result<&'a zsliceshm, Self::Error> { + // A ZSliceShm is expected to have only one slice + let mut zslices = v.0.zslices(); + if let Some(zs) = zslices.next() { + if let Some(shmb) = zs.downcast_ref::() { + return Ok(shmb.into()); + } + } + Err(ZDeserializeError) } } -#[cfg(feature = "shared-memory")] -impl From for ZBytes { - fn from(t: SharedMemoryBuf) -> Self { - ZSerde.serialize(t) +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> TryFrom<&'a ZBytes> for &'a zsliceshm { + type Error = ZDeserializeError; + + fn try_from(value: &'a ZBytes) -> Result { + ZSerde.deserialize(value) } } -#[cfg(feature = "shared-memory")] -impl Deserialize<'_, SharedMemoryBuf> for ZSerde { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshm { type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result { - // A SharedMemoryBuf is expected to have only one slice - let mut zslices = v.0.zslices(); + fn try_from(value: &'a mut ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> Deserialize<'a, &'a mut zsliceshm> for ZSerde { + type Input = &'a mut ZBytes; + type Error = ZDeserializeError; + + fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshm, Self::Error> { + // A ZSliceShmBorrowMut is expected to have only one slice + let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_ref::() { - return Ok(shmb.clone()); + if let Some(shmb) = zs.downcast_mut::() { + return Ok(shmb.into()); } } Err(ZDeserializeError) } } -#[cfg(feature = "shared-memory")] -impl TryFrom for SharedMemoryBuf { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> Deserialize<'a, &'a mut zsliceshmmut> for ZSerde { + type Input = &'a mut ZBytes; type Error = ZDeserializeError; - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) + fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshmmut, Self::Error> { + // A ZSliceShmBorrowMut is expected to have only one slice + let mut zslices = v.0.zslices_mut(); + if let Some(zs) = zslices.next() { + if let Some(shmb) = zs.downcast_mut::() { + return shmb.try_into().map_err(|_| ZDeserializeError); + } + } + Err(ZDeserializeError) + } +} + +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshmmut { + type Error = ZDeserializeError; + + fn try_from(value: &'a mut ZBytes) -> Result { + ZSerde.deserialize(value) } } @@ -1267,16 +1692,17 @@ where } } -impl Deserialize<'_, (A, B)> for ZSerde +impl<'s, A, B> Deserialize<'s, (A, B)> for ZSerde where - for<'a> A: TryFrom<&'a ZBytes>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b ZBytes>, - for<'b> >::Error: Debug, + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, { + type Input = &'s ZBytes; type Error = ZError; - fn deserialize(self, bytes: &ZBytes) -> Result<(A, B), Self::Error> { + fn deserialize(self, bytes: Self::Input) -> Result<(A, B), Self::Error> { let codec = Zenoh080::new(); let mut reader = bytes.0.reader(); @@ -1286,18 +1712,18 @@ where let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; let bpld = ZBytes::new(bbuf); - let a = A::try_from(&apld).map_err(|e| zerror!("{:?}", e))?; - let b = B::try_from(&bpld).map_err(|e| zerror!("{:?}", e))?; + let a = A::try_from(apld).map_err(|e| zerror!("{:?}", e))?; + let b = B::try_from(bpld).map_err(|e| zerror!("{:?}", e))?; Ok((a, b)) } } impl TryFrom for (A, B) where - A: for<'a> TryFrom<&'a ZBytes>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b ZBytes>, - for<'b> >::Error: Debug, + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, { type Error = ZError; @@ -1308,10 +1734,10 @@ where impl TryFrom<&ZBytes> for (A, B) where - for<'a> A: TryFrom<&'a ZBytes>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b ZBytes>, - for<'b> >::Error: Debug, + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, { type Error = ZError; @@ -1320,6 +1746,20 @@ where } } +impl TryFrom<&mut ZBytes> for (A, B) +where + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, +{ + type Error = ZError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // For convenience to always convert a Value in the examples #[derive(Debug, Clone, PartialEq, Eq)] pub enum StringOrBase64 { @@ -1361,6 +1801,13 @@ impl From<&ZBytes> for StringOrBase64 { } } +impl From<&mut ZBytes> for StringOrBase64 { + fn from(v: &mut ZBytes) -> Self { + StringOrBase64::from(&*v) + } +} + +// Protocol attachment extension impl From for AttachmentType { fn from(this: ZBytes) -> Self { AttachmentType { @@ -1384,6 +1831,16 @@ mod tests { use zenoh_buffers::{ZBuf, ZSlice}; use zenoh_protocol::core::Properties; + #[cfg(all(feature = "shared-memory", feature = "unstable"))] + use zenoh_shm::api::{ + protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, + }, + provider::shared_memory_provider::SharedMemoryProviderBuilder, + slice::zsliceshm::{zsliceshm, ZSliceShm}, + }; + const NUM: usize = 1_000; macro_rules! serialize_deserialize { @@ -1399,81 +1856,118 @@ mod tests { }; } - let mut rng = rand::thread_rng(); - - // unsigned integer - serialize_deserialize!(u8, u8::MIN); - serialize_deserialize!(u16, u16::MIN); - serialize_deserialize!(u32, u32::MIN); - serialize_deserialize!(u64, u64::MIN); - serialize_deserialize!(usize, usize::MIN); - - serialize_deserialize!(u8, u8::MAX); - serialize_deserialize!(u16, u16::MAX); - serialize_deserialize!(u32, u32::MAX); - serialize_deserialize!(u64, u64::MAX); - serialize_deserialize!(usize, usize::MAX); - - for _ in 0..NUM { - serialize_deserialize!(u8, rng.gen::()); - serialize_deserialize!(u16, rng.gen::()); - serialize_deserialize!(u32, rng.gen::()); - serialize_deserialize!(u64, rng.gen::()); - serialize_deserialize!(usize, rng.gen::()); - } + // WARN: test function body produces stack overflow, so I split it into subroutines + #[inline(never)] + fn numeric() { + let mut rng = rand::thread_rng(); + + // unsigned integer + serialize_deserialize!(u8, u8::MIN); + serialize_deserialize!(u16, u16::MIN); + serialize_deserialize!(u32, u32::MIN); + serialize_deserialize!(u64, u64::MIN); + serialize_deserialize!(usize, usize::MIN); + + serialize_deserialize!(u8, u8::MAX); + serialize_deserialize!(u16, u16::MAX); + serialize_deserialize!(u32, u32::MAX); + serialize_deserialize!(u64, u64::MAX); + serialize_deserialize!(usize, usize::MAX); + + for _ in 0..NUM { + serialize_deserialize!(u8, rng.gen::()); + serialize_deserialize!(u16, rng.gen::()); + serialize_deserialize!(u32, rng.gen::()); + serialize_deserialize!(u64, rng.gen::()); + serialize_deserialize!(usize, rng.gen::()); + } - // signed integer - serialize_deserialize!(i8, i8::MIN); - serialize_deserialize!(i16, i16::MIN); - serialize_deserialize!(i32, i32::MIN); - serialize_deserialize!(i64, i64::MIN); - serialize_deserialize!(isize, isize::MIN); - - serialize_deserialize!(i8, i8::MAX); - serialize_deserialize!(i16, i16::MAX); - serialize_deserialize!(i32, i32::MAX); - serialize_deserialize!(i64, i64::MAX); - serialize_deserialize!(isize, isize::MAX); - - for _ in 0..NUM { - serialize_deserialize!(i8, rng.gen::()); - serialize_deserialize!(i16, rng.gen::()); - serialize_deserialize!(i32, rng.gen::()); - serialize_deserialize!(i64, rng.gen::()); - serialize_deserialize!(isize, rng.gen::()); - } + // signed integer + serialize_deserialize!(i8, i8::MIN); + serialize_deserialize!(i16, i16::MIN); + serialize_deserialize!(i32, i32::MIN); + serialize_deserialize!(i64, i64::MIN); + serialize_deserialize!(isize, isize::MIN); + + serialize_deserialize!(i8, i8::MAX); + serialize_deserialize!(i16, i16::MAX); + serialize_deserialize!(i32, i32::MAX); + serialize_deserialize!(i64, i64::MAX); + serialize_deserialize!(isize, isize::MAX); + + for _ in 0..NUM { + serialize_deserialize!(i8, rng.gen::()); + serialize_deserialize!(i16, rng.gen::()); + serialize_deserialize!(i32, rng.gen::()); + serialize_deserialize!(i64, rng.gen::()); + serialize_deserialize!(isize, rng.gen::()); + } - // float - serialize_deserialize!(f32, f32::MIN); - serialize_deserialize!(f64, f64::MIN); + // float + serialize_deserialize!(f32, f32::MIN); + serialize_deserialize!(f64, f64::MIN); - serialize_deserialize!(f32, f32::MAX); - serialize_deserialize!(f64, f64::MAX); + serialize_deserialize!(f32, f32::MAX); + serialize_deserialize!(f64, f64::MAX); - for _ in 0..NUM { - serialize_deserialize!(f32, rng.gen::()); - serialize_deserialize!(f64, rng.gen::()); + for _ in 0..NUM { + serialize_deserialize!(f32, rng.gen::()); + serialize_deserialize!(f64, rng.gen::()); + } + } + numeric(); + + // WARN: test function body produces stack overflow, so I split it into subroutines + #[inline(never)] + fn basic() { + // String + serialize_deserialize!(String, ""); + serialize_deserialize!(String, String::from("abcdef")); + + // Cow + serialize_deserialize!(Cow, Cow::from("")); + serialize_deserialize!(Cow, Cow::from(String::from("abcdef"))); + + // Vec + serialize_deserialize!(Vec, vec![0u8; 0]); + serialize_deserialize!(Vec, vec![0u8; 64]); + + // Cow<[u8]> + serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 0])); + serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 64])); + + // ZBuf + serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); + serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); + } + basic(); + + // SHM + #[cfg(all(feature = "shared-memory", feature = "unstable"))] + { + // create an SHM backend... + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(4096) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Prepare a layout for allocations + let layout = provider.alloc_layout().size(1024).res().unwrap(); + + // allocate an SHM buffer + let mutable_shm_buf = layout.alloc().res().unwrap(); + + // convert to immutable SHM buffer + let immutable_shm_buf: ZSliceShm = mutable_shm_buf.into(); + + serialize_deserialize!(&zsliceshm, immutable_shm_buf); } - - // String - serialize_deserialize!(String, ""); - serialize_deserialize!(String, String::from("abcdef")); - - // Cow - serialize_deserialize!(Cow, Cow::from("")); - serialize_deserialize!(Cow, Cow::from(String::from("abcdef"))); - - // Vec - serialize_deserialize!(Vec, vec![0u8; 0]); - serialize_deserialize!(Vec, vec![0u8; 64]); - - // Cow<[u8]> - serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 0])); - serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 64])); - - // ZBuf - serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); - serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); // Properties serialize_deserialize!(Properties, Properties::from("")); @@ -1483,6 +1977,14 @@ mod tests { serialize_deserialize!((usize, usize), (0, 1)); serialize_deserialize!((usize, String), (0, String::from("a"))); serialize_deserialize!((String, String), (String::from("a"), String::from("b"))); + serialize_deserialize!( + (Cow<'static, [u8]>, Cow<'static, [u8]>), + (Cow::from(vec![0u8; 8]), Cow::from(vec![0u8; 8])) + ); + serialize_deserialize!( + (Cow<'static, str>, Cow<'static, str>), + (Cow::from("a"), Cow::from("b")) + ); // Iterator let v: [usize; 5] = [0, 1, 2, 3, 4]; @@ -1567,5 +2069,14 @@ mod tests { println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(String, String)>()); assert_eq!(hm, o); + + let mut hm: HashMap, Cow<'static, str>> = HashMap::new(); + hm.insert(Cow::from("0"), Cow::from("a")); + hm.insert(Cow::from("1"), Cow::from("b")); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from_iter(hm.iter()); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(Cow<'static, str>, Cow<'static, str>)>()); + assert_eq!(hm, o); } } From 19be2468d15a5cca59d1b88512e00c0753beea05 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 25 Apr 2024 12:42:42 +0200 Subject: [PATCH 288/598] shm examples fixed --- examples/examples/z_alloc_shm.rs | 10 +--------- examples/examples/z_ping_shm.rs | 10 ---------- examples/examples/z_pub_shm.rs | 1 - examples/examples/z_sub_shm.rs | 1 - zenoh/src/lib.rs | 3 +++ 5 files changed, 4 insertions(+), 21 deletions(-) diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index a6afb1190c..34e1c07058 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -12,14 +12,6 @@ // ZettaScale Zenoh Team, // use zenoh::prelude::r#async::*; -use zenoh::shm::protocol_implementations::posix::posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend; -use zenoh::shm::protocol_implementations::posix::protocol_id::POSIX_PROTOCOL_ID; -use zenoh::shm::provider::shared_memory_provider::{ - BlockOn, GarbageCollect, SharedMemoryProviderBuilder, -}; -use zenoh::shm::provider::shared_memory_provider::{Deallocate, Defragment}; -use zenoh::shm::provider::types::{AllocAlignment, MemoryLayout}; -use zenoh::Result; #[tokio::main] async fn main() { @@ -28,7 +20,7 @@ async fn main() { run().await.unwrap() } -async fn run() -> Result<()> { +async fn run() -> ZResult<()> { // Construct an SHM backend let backend = { // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 08c08276d4..98d9bae825 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -13,17 +13,7 @@ // use clap::Parser; use std::time::{Duration, Instant}; -use zenoh::buffers::ZSlice; -use zenoh::config::Config; use zenoh::prelude::sync::*; -use zenoh::publication::CongestionControl; -use zenoh::shm::protocol_implementations::posix::{ - posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, - protocol_id::POSIX_PROTOCOL_ID, -}; -use zenoh::shm::provider::shared_memory_provider::SharedMemoryProviderBuilder; -use zenoh::shm::provider::types::AllocAlignment; -use zenoh::shm::provider::types::MemoryLayout; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index cdabee5ff3..79527c3e5f 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index aa3967becd..282fd8c776 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -14,7 +14,6 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; -use zenoh::shm::slice::zsliceshm::zsliceshm; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 60dab218db..fce115cfb1 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -365,6 +365,9 @@ pub mod internal { pub mod shm { pub use zenoh_shm::api::client_storage::SharedMemoryClientStorage; pub use zenoh_shm::api::provider::shared_memory_provider::{BlockOn, GarbageCollect}; + pub use zenoh_shm::api::provider::shared_memory_provider::{Deallocate, Defragment}; + pub use zenoh_shm::api::provider::types::AllocAlignment; + pub use zenoh_shm::api::provider::types::MemoryLayout; pub use zenoh_shm::api::slice::zsliceshm::{zsliceshm, ZSliceShm}; pub use zenoh_shm::api::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; pub use zenoh_shm::api::{ From afacf77d052e0a957ffaeb94422d2eb743a53e0f Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Thu, 25 Apr 2024 16:22:53 +0200 Subject: [PATCH 289/598] refactor: remove `zenoh::query::Mode` because unused (#977) It seems that `Mode` has been integrated into ConsolidationMode, replacing `Mode`. --- zenoh/src/query.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index db7071c278..16cd7fdec5 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -31,13 +31,6 @@ pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; /// The kind of consolidation. pub type ConsolidationMode = zenoh_protocol::zenoh::query::Consolidation; -/// The operation: either manual or automatic. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum Mode { - Auto, - Manual(T), -} - /// The replies consolidation strategy to apply on replies to a [`get`](Session::get). #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct QueryConsolidation { From 193e2230680c7638718889094fddf4a9d6a8859a Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 25 Apr 2024 18:09:26 +0200 Subject: [PATCH 290/598] Remove keyexpr with_parameters (#979) * Remove keyexpr with_parameters * Fix nextest.toml * Update plugins/zenoh-plugin-storage-manager/src/replica/storage.rs Co-authored-by: Joseph Perez --------- Co-authored-by: Joseph Perez --- .config/nextest.toml | 2 +- .../src/replica/aligner.rs | 8 ++++---- .../src/replica/storage.rs | 2 +- zenoh/src/key_expr.rs | 16 +--------------- zenoh/src/selector.rs | 7 +++++-- 5 files changed, 12 insertions(+), 23 deletions(-) diff --git a/.config/nextest.toml b/.config/nextest.toml index b2ed4cde98..4999dce0d3 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -16,7 +16,7 @@ test(=three_node_combination) | test(=watchdog_alloc_concurrent) | test(=header_check_memory_concurrent) | test(=header_link_concurrent) | -test(=header_link_failure_concurrent) +test(=header_link_failure_concurrent) | test(=downsampling_by_keyexpr) """ threads-required = 'num-cpus' diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 3392bf28e8..75368783b5 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -316,10 +316,10 @@ impl Aligner { async fn perform_query(&self, from: &str, properties: String) -> (Vec, bool) { let mut no_err = true; - let selector = KeyExpr::from(&self.digest_key) - .join(&from) - .unwrap() - .with_parameters(&properties); + let selector = Selector::new( + KeyExpr::from(&self.digest_key).join(&from).unwrap(), + properties, + ); tracing::trace!("[ALIGNER] Sending Query '{}'...", selector); let mut return_val = Vec::new(); match self diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 14425f4c28..0dc8bcb79d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -638,7 +638,7 @@ impl StorageService { // with `_time=[..]` to get historical data (in case of time-series) let replies = match self .session - .get(KeyExpr::from(&self.key_expr).with_parameters("_time=[..]")) + .get(Selector::new(&self.key_expr, "_time=[..]")) .target(QueryTarget::All) .consolidation(ConsolidationMode::None) .res() diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index c1c0504208..419918d547 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -57,7 +57,7 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; -use crate::{net::primitives::Primitives, prelude::Selector, Session, Undeclarable}; +use crate::{net::primitives::Primitives, Session, Undeclarable}; #[derive(Clone, Debug)] pub(crate) enum KeyExprInner<'a> { @@ -301,20 +301,6 @@ impl<'a> KeyExpr<'a> { Ok(r.into()) } } - - pub fn with_parameters(self, selector: &'a str) -> Selector<'a> { - Selector { - key_expr: self, - parameters: selector.into(), - } - } - - pub fn with_owned_parameters(self, selector: String) -> Selector<'a> { - Selector { - key_expr: self, - parameters: selector.into(), - } - } } impl FromStr for KeyExpr<'static> { diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 2c7fc2d782..343f5fda1d 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -239,7 +239,7 @@ impl TryFrom for Selector<'_> { Some(qmark_position) => { let parameters = s[qmark_position + 1..].to_owned(); s.truncate(qmark_position); - Ok(KeyExpr::try_from(s)?.with_owned_parameters(parameters)) + Ok(Selector::new(KeyExpr::try_from(s)?, parameters)) } None => Ok(KeyExpr::try_from(s)?.into()), } @@ -252,7 +252,10 @@ impl<'a> TryFrom<&'a str> for Selector<'a> { match s.find('?') { Some(qmark_position) => { let params = &s[qmark_position + 1..]; - Ok(KeyExpr::try_from(&s[..qmark_position])?.with_parameters(params)) + Ok(Selector::new( + KeyExpr::try_from(&s[..qmark_position])?, + params, + )) } None => Ok(KeyExpr::try_from(s)?.into()), } From 88af83f596f19a3c947e06478ab0c7c6717dfa65 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 01:54:55 +0200 Subject: [PATCH 291/598] cargo fmt --- zenoh/src/api/key_expr.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 260399922f..774cf28790 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -12,10 +12,7 @@ // ZettaScale Zenoh Team, // -use super::{ - selector::Selector, - session::{Session, Undeclarable}, -}; +use super::session::{Session, Undeclarable}; use crate::net::primitives::Primitives; use std::{ convert::{TryFrom, TryInto}, From 8e636ed8cb6fcd9841d6078ebabecfa533ca6769 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 02:03:19 +0200 Subject: [PATCH 292/598] build fixes --- zenoh/src/api/builders/publication.rs | 2 -- zenoh/src/api/session.rs | 6 ++---- zenoh/src/lib.rs | 1 - 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index ef2224193f..711cb063f6 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -20,7 +20,6 @@ use crate::api::bytes::OptionZBytes; use crate::api::bytes::ZBytes; use crate::api::key_expr::KeyExpr; use crate::api::publication::Priority; -#[cfg(feature = "unstable")] use crate::api::sample::Locality; use crate::api::sample::SampleKind; #[cfg(feature = "unstable")] @@ -255,7 +254,6 @@ pub struct PublisherBuilder<'a, 'b: 'a> { pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, pub(crate) is_express: bool, - #[cfg(feature = "unstable")] pub(crate) destination: Locality, } diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index c481b01bdf..4fc0df5c1a 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -22,10 +22,7 @@ use super::{ info::SessionInfo, key_expr::{KeyExpr, KeyExprInner}, publication::Priority, - query::{ - ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply, - _REPLY_KEY_EXPR_ANY_SEL_PARAM, - }, + query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, selector::{Selector, TIME_RANGE_KEY}, @@ -87,6 +84,7 @@ use super::{ liveliness::{Liveliness, LivelinessTokenState}, publication::Publisher, publication::{MatchingListenerState, MatchingStatus}, + query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, sample::SourceInfo, }; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index fce115cfb1..3c011e2439 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -271,7 +271,6 @@ pub mod publication { /// Query primitives pub mod query { - pub use crate::api::query::Mode; pub use crate::api::query::Reply; #[zenoh_macros::unstable] pub use crate::api::query::ReplyKeyExpr; From fc9e2d3cbbc2dab248cdf72f7f419a062e54f4f1 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 26 Apr 2024 11:26:50 +0200 Subject: [PATCH 293/598] Fix clippy warnings --- zenoh/src/net/runtime/adminspace.rs | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 2ce736c1fa..c724ede9bf 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -714,15 +714,8 @@ fn subscribers_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - let payload = match ZBytes::try_from( - serde_json::to_string(&sub.1).unwrap_or_else(|_| "{}".to_string()), - ) { - Ok(p) => p, - Err(e) => { - tracing::error!("Error serializing AdminSpace reply: {:?}", e); - return; - } - }; + let payload = + ZBytes::from(serde_json::to_string(&sub.1).unwrap_or_else(|_| "{}".to_string())); if let Err(e) = query .reply(key, payload) .encoding(Encoding::APPLICATION_JSON) @@ -745,15 +738,8 @@ fn queryables_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - let payload = match ZBytes::try_from( - serde_json::to_string(&qabl.1).unwrap_or_else(|_| "{}".to_string()), - ) { - Ok(p) => p, - Err(e) => { - tracing::error!("Error serializing AdminSpace reply: {:?}", e); - return; - } - }; + let payload = + ZBytes::from(serde_json::to_string(&qabl.1).unwrap_or_else(|_| "{}".to_string())); if let Err(e) = query .reply(key, payload) .encoding(Encoding::APPLICATION_JSON) From 42bd3e4e5305d5b9c35ad01a9e1b890b9c950183 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 26 Apr 2024 11:56:18 +0200 Subject: [PATCH 294/598] Fix valgrind check --- ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index bc8716bb45..364617eb2a 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -20,15 +20,13 @@ use zenoh::prelude::r#async::*; async fn main() { zenoh_util::init_log_test(); - let _z = zenoh_runtime::ZRuntimePoolGuard; - let queryable_key_expr = KeyExpr::try_from("test/valgrind/data").unwrap(); let get_selector = Selector::try_from("test/valgrind/**").unwrap(); println!("Declaring Queryable on '{queryable_key_expr}'..."); let queryable_session = zenoh::open(Config::default()).res().await.unwrap(); let _queryable = queryable_session - .declare_queryable(queryable_key_expr) + .declare_queryable(queryable_key_expr.clone()) .callback(move |query| { println!(">> Handling query '{}'", query.selector()); let queryable_key_expr = queryable_key_expr.clone(); From 9284388c466b3ad5822e826bd612ccb847bd5eba Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 26 Apr 2024 12:00:32 +0200 Subject: [PATCH 295/598] Add payload_mut to sample for zsliceshmmut deserialization --- examples/examples/z_sub_shm.rs | 20 ++++++++++++++++++++ zenoh/src/sample/mod.rs | 6 ++++++ 2 files changed, 26 insertions(+) diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index aa3967becd..35fb80d833 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -49,6 +49,26 @@ async fn main() { } } } + + // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber + // // holding a reference to the SHM buffer, then it will be able to get a mutable reference to it. + // // With the mutable reference at hand, it's possible to mutate in place the SHM buffer content. + // + // use zenoh::shm::slice::zsliceshmmut::zsliceshmmut; + + // while let Ok(mut sample) = subscriber.recv_async().await { + // let kind = sample.kind(); + // let key_expr = sample.key_expr().to_string(); + // match sample.payload_mut().deserialize_mut::<&mut zsliceshmmut>() { + // Ok(payload) => println!( + // ">> [Subscriber] Received {} ('{}': '{:02x?}')", + // kind, key_expr, payload + // ), + // Err(e) => { + // println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); + // } + // } + // } } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index b5dbd727ec..0c1180fb8f 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -308,6 +308,12 @@ impl Sample { &self.payload } + /// Gets the payload of this Sample. + #[inline] + pub fn payload_mut(&mut self) -> &mut ZBytes { + &mut self.payload + } + /// Gets the kind of this Sample. #[inline] pub fn kind(&self) -> SampleKind { From c4f7a49435c022bef1699af6a2d63dd7dd70b3c6 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 12:15:00 +0200 Subject: [PATCH 296/598] clippy fix --- plugins/zenoh-plugin-storage-manager/src/replica/storage.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 5f72192663..ba078c0012 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -34,6 +34,7 @@ use zenoh::key_expr::OwnedKeyExpr; use zenoh::query::{ConsolidationMode, QueryTarget}; use zenoh::sample::{Sample, SampleKind, TimestampBuilderTrait}; use zenoh::sample::{SampleBuilder, ValueBuilderTrait}; +use zenoh::selector::Selector; use zenoh::session::SessionDeclarations; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; From 7f2e7a5a4339285822553d8074dcc1964dc28665 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 12:18:12 +0200 Subject: [PATCH 297/598] missing merged file added --- commons/zenoh-config/src/lib.rs | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 26f7cfefaa..1029446557 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -35,7 +35,7 @@ use validated_struct::ValidatedMapAssociatedTypes; pub use validated_struct::{GetError, ValidatedMap}; use zenoh_core::zlock; pub use zenoh_protocol::core::{ - whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, + whatami, EndPoint, Locator, Priority, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, }; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Bits}, @@ -482,9 +482,6 @@ validated_struct::validator! { /// To use it, you must enable zenoh's unstable feature flag. ///

AdminSpaceConf { - /// Enable the admin space - #[serde(default = "set_false")] - pub enabled: bool, /// Permissions on the admin space pub permissions: PermissionsConf { @@ -510,11 +507,7 @@ validated_struct::validator! { /// A list of directories where plugins may be searched for if no `__path__` was specified for them. /// The executable's current directory will be added to the search paths. - pub plugins_loading: #[derive(Default)] - PluginsLoading { - pub enabled: bool, - pub search_dirs: Option>, // TODO (low-prio): Switch this String to a PathBuf? (applies to other paths in the config as well) - }, + plugins_search_dirs: Vec, // TODO (low-prio): Switch this String to a PathBuf? (applies to other paths in the config as well) #[validated(recursive_accessors)] /// The configuration for plugins. /// @@ -728,13 +721,10 @@ impl Config { } pub fn libloader(&self) -> LibLoader { - if self.plugins_loading.enabled { - match self.plugins_loading.search_dirs() { - Some(dirs) => LibLoader::new(dirs, true), - None => LibLoader::default(), - } + if self.plugins_search_dirs.is_empty() { + LibLoader::default() } else { - LibLoader::empty() + LibLoader::new(&self.plugins_search_dirs, true) } } } From 509c7279ba6d9ea54150162225255884707e38a1 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 12:23:24 +0200 Subject: [PATCH 298/598] priority removed --- commons/zenoh-config/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 1029446557..e7d73248db 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -35,7 +35,7 @@ use validated_struct::ValidatedMapAssociatedTypes; pub use validated_struct::{GetError, ValidatedMap}; use zenoh_core::zlock; pub use zenoh_protocol::core::{ - whatami, EndPoint, Locator, Priority, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, + whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, }; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Bits}, From 983bd89191302ba31f6129c127d59b1733a05859 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 12:28:25 +0200 Subject: [PATCH 299/598] changes in config restored --- commons/zenoh-config/src/lib.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index e7d73248db..26f7cfefaa 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -482,6 +482,9 @@ validated_struct::validator! { /// To use it, you must enable zenoh's unstable feature flag. /// AdminSpaceConf { + /// Enable the admin space + #[serde(default = "set_false")] + pub enabled: bool, /// Permissions on the admin space pub permissions: PermissionsConf { @@ -507,7 +510,11 @@ validated_struct::validator! { /// A list of directories where plugins may be searched for if no `__path__` was specified for them. /// The executable's current directory will be added to the search paths. - plugins_search_dirs: Vec, // TODO (low-prio): Switch this String to a PathBuf? (applies to other paths in the config as well) + pub plugins_loading: #[derive(Default)] + PluginsLoading { + pub enabled: bool, + pub search_dirs: Option>, // TODO (low-prio): Switch this String to a PathBuf? (applies to other paths in the config as well) + }, #[validated(recursive_accessors)] /// The configuration for plugins. /// @@ -721,10 +728,13 @@ impl Config { } pub fn libloader(&self) -> LibLoader { - if self.plugins_search_dirs.is_empty() { - LibLoader::default() + if self.plugins_loading.enabled { + match self.plugins_loading.search_dirs() { + Some(dirs) => LibLoader::new(dirs, true), + None => LibLoader::default(), + } } else { - LibLoader::new(&self.plugins_search_dirs, true) + LibLoader::empty() } } } From a0c78df48b90917be063c4e2e56a1cd16499395d Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 14:55:51 +0200 Subject: [PATCH 300/598] removed old bytes.rs --- zenoh/src/bytes.rs | 2082 -------------------------------------------- 1 file changed, 2082 deletions(-) delete mode 100644 zenoh/src/bytes.rs diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs deleted file mode 100644 index c36136ef81..0000000000 --- a/zenoh/src/bytes.rs +++ /dev/null @@ -1,2082 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! ZBytes primitives. -use crate::buffers::ZBuf; -use std::{ - borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, str::Utf8Error, - string::FromUtf8Error, sync::Arc, -}; -use unwrap_infallible::UnwrapInfallible; -use zenoh_buffers::{ - buffer::{Buffer, SplitBuffer}, - reader::HasReader, - writer::HasWriter, - ZBufReader, ZBufWriter, ZSlice, -}; -use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; -use zenoh_result::{ZError, ZResult}; -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -use zenoh_shm::{ - api::slice::{ - zsliceshm::{zsliceshm, ZSliceShm}, - zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, - }, - SharedMemoryBuf, -}; - -/// Trait to encode a type `T` into a [`Value`]. -pub trait Serialize { - type Output; - - /// The implementer should take care of serializing the type `T` and set the proper [`Encoding`]. - fn serialize(self, t: T) -> Self::Output; -} - -pub trait Deserialize<'a, T> { - type Input: 'a; - type Error; - - /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. - fn deserialize(self, t: Self::Input) -> Result; -} - -/// ZBytes contains the serialized bytes of user data. -#[repr(transparent)] -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct ZBytes(ZBuf); - -impl ZBytes { - /// Create an empty ZBytes. - pub const fn empty() -> Self { - Self(ZBuf::empty()) - } - - /// Create a [`ZBytes`] from any type `T` that implements [`Into`]. - pub fn new(t: T) -> Self - where - T: Into, - { - Self(t.into()) - } - - /// Returns wether the ZBytes is empty or not. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Returns the length of the ZBytes. - pub fn len(&self) -> usize { - self.0.len() - } - - /// Get a [`ZBytesReader`] implementing [`std::io::Read`] trait. - pub fn reader(&self) -> ZBytesReader<'_> { - ZBytesReader(self.0.reader()) - } - - /// Build a [`ZBytes`] from a generic reader implementing [`std::io::Read`]. This operation copies data from the reader. - pub fn from_reader(mut reader: R) -> Result - where - R: std::io::Read, - { - let mut buf: Vec = vec![]; - reader.read_to_end(&mut buf)?; - Ok(ZBytes::new(buf)) - } - - /// Get a [`ZBytesWriter`] implementing [`std::io::Write`] trait. - pub fn writer(&mut self) -> ZBytesWriter<'_> { - ZBytesWriter(self.0.writer()) - } - - /// Get a [`ZBytesReader`] implementing [`std::io::Read`] trait. - pub fn iter(&self) -> ZBytesIterator<'_, T> - where - T: for<'b> TryFrom<&'b ZBytes>, - for<'b> ZSerde: Deserialize<'b, T>, - for<'b> >::Error: Debug, - { - ZBytesIterator { - reader: self.0.reader(), - _t: PhantomData::, - } - } - - /// Serialize an object of type `T` as a [`Value`] using the [`ZSerde`]. - /// - /// ```rust - /// use zenoh::bytes::ZBytes; - /// - /// let start = String::from("abc"); - /// let bytes = ZBytes::serialize(start.clone()); - /// let end: String = bytes.deserialize().unwrap(); - /// assert_eq!(start, end); - /// ``` - pub fn serialize(t: T) -> Self - where - ZSerde: Serialize, - { - ZSerde.serialize(t) - } - - /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. - pub fn deserialize<'a, T>(&'a self) -> ZResult - where - ZSerde: Deserialize<'a, T, Input = &'a ZBytes>, - >::Error: Debug, - { - ZSerde - .deserialize(self) - .map_err(|e| zerror!("{:?}", e).into()) - } - - /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. - pub fn deserialize_mut<'a, T>(&'a mut self) -> ZResult - where - ZSerde: Deserialize<'a, T, Input = &'a mut ZBytes>, - >::Error: Debug, - { - ZSerde - .deserialize(self) - .map_err(|e| zerror!("{:?}", e).into()) - } - - /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. - pub fn into<'a, T>(&'a self) -> T - where - ZSerde: Deserialize<'a, T, Input = &'a ZBytes, Error = Infallible>, - >::Error: Debug, - { - ZSerde.deserialize(self).unwrap_infallible() - } - - /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. - pub fn into_mut<'a, T>(&'a mut self) -> T - where - ZSerde: Deserialize<'a, T, Input = &'a mut ZBytes, Error = Infallible>, - >::Error: Debug, - { - ZSerde.deserialize(self).unwrap_infallible() - } -} - -/// A reader that implements [`std::io::Read`] trait to read from a [`ZBytes`]. -#[repr(transparent)] -#[derive(Debug)] -pub struct ZBytesReader<'a>(ZBufReader<'a>); - -impl std::io::Read for ZBytesReader<'_> { - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { - std::io::Read::read(&mut self.0, buf) - } -} - -impl std::io::Seek for ZBytesReader<'_> { - fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { - std::io::Seek::seek(&mut self.0, pos) - } -} - -/// A writer that implements [`std::io::Write`] trait to write into a [`ZBytes`]. -#[repr(transparent)] -#[derive(Debug)] -pub struct ZBytesWriter<'a>(ZBufWriter<'a>); - -impl std::io::Write for ZBytesWriter<'_> { - fn write(&mut self, buf: &[u8]) -> std::io::Result { - std::io::Write::write(&mut self.0, buf) - } - - fn flush(&mut self) -> std::io::Result<()> { - Ok(()) - } -} - -/// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`ZBytes`]. -/// Note that [`ZBytes`] contains a serialized version of `T` and iterating over a [`ZBytes`] performs lazy deserialization. -#[repr(transparent)] -#[derive(Debug)] -pub struct ZBytesIterator<'a, T> -where - ZSerde: Deserialize<'a, T>, -{ - reader: ZBufReader<'a>, - _t: PhantomData, -} - -impl Iterator for ZBytesIterator<'_, T> -where - for<'a> ZSerde: Deserialize<'a, T, Input = &'a ZBytes>, - for<'a> >::Error: Debug, -{ - type Item = T; - - fn next(&mut self) -> Option { - let codec = Zenoh080::new(); - - let kbuf: ZBuf = codec.read(&mut self.reader).ok()?; - let kpld = ZBytes::new(kbuf); - - let t = ZSerde.deserialize(&kpld).ok()?; - Some(t) - } -} - -impl FromIterator for ZBytes -where - ZSerde: Serialize, -{ - fn from_iter>(iter: T) -> Self { - let codec = Zenoh080::new(); - let mut buffer: ZBuf = ZBuf::empty(); - let mut writer = buffer.writer(); - for t in iter { - let tpld = ZSerde.serialize(t); - // SAFETY: we are serializing slices on a ZBuf, so serialization will never - // fail unless we run out of memory. In that case, Rust memory allocator - // will panic before the serializer has any chance to fail. - unsafe { - codec.write(&mut writer, &tpld.0).unwrap_unchecked(); - } - } - - ZBytes::new(buffer) - } -} - -/// Wrapper type for API ergonomicity to allow any type `T` to be converted into `Option` where `T` implements `Into`. -#[repr(transparent)] -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct OptionZBytes(Option); - -impl From for OptionZBytes -where - T: Into, -{ - fn from(value: T) -> Self { - Self(Some(value.into())) - } -} - -impl From> for OptionZBytes -where - T: Into, -{ - fn from(mut value: Option) -> Self { - match value.take() { - Some(v) => Self(Some(v.into())), - None => Self(None), - } - } -} - -impl From<&Option> for OptionZBytes -where - for<'a> &'a T: Into, -{ - fn from(value: &Option) -> Self { - match value.as_ref() { - Some(v) => Self(Some(v.into())), - None => Self(None), - } - } -} - -impl From for Option { - fn from(value: OptionZBytes) -> Self { - value.0 - } -} - -/// The default serializer for ZBytes. It supports primitives types, such as: Vec, int, uint, float, string, bool. -/// It also supports common Rust serde values. -#[derive(Clone, Copy, Debug)] -pub struct ZSerde; - -#[derive(Debug, Clone, Copy)] -pub struct ZDeserializeError; - -// ZBuf -impl Serialize for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: ZBuf) -> Self::Output { - ZBytes::new(t) - } -} - -impl From for ZBytes { - fn from(t: ZBuf) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&ZBuf> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &ZBuf) -> Self::Output { - ZBytes::new(t.clone()) - } -} - -impl From<&ZBuf> for ZBytes { - fn from(t: &ZBuf) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut ZBuf> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut ZBuf) -> Self::Output { - ZBytes::new(t.clone()) - } -} - -impl From<&mut ZBuf> for ZBytes { - fn from(t: &mut ZBuf) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, ZBuf> for ZSerde { - type Input = &'a ZBytes; - type Error = Infallible; - - fn deserialize(self, v: Self::Input) -> Result { - Ok(v.0.clone()) - } -} - -impl From for ZBuf { - fn from(value: ZBytes) -> Self { - value.0 - } -} - -impl From<&ZBytes> for ZBuf { - fn from(value: &ZBytes) -> Self { - ZSerde.deserialize(value).unwrap_infallible() - } -} - -impl From<&mut ZBytes> for ZBuf { - fn from(value: &mut ZBytes) -> Self { - ZSerde.deserialize(&*value).unwrap_infallible() - } -} - -// ZSlice -impl Serialize for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: ZSlice) -> Self::Output { - ZBytes::new(t) - } -} - -impl From for ZBytes { - fn from(t: ZSlice) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&ZSlice> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &ZSlice) -> Self::Output { - ZBytes::new(t.clone()) - } -} - -impl From<&ZSlice> for ZBytes { - fn from(t: &ZSlice) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut ZSlice> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut ZSlice) -> Self::Output { - ZBytes::new(t.clone()) - } -} - -impl From<&mut ZSlice> for ZBytes { - fn from(t: &mut ZSlice) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, ZSlice> for ZSerde { - type Input = &'a ZBytes; - type Error = Infallible; - - fn deserialize(self, v: Self::Input) -> Result { - Ok(v.0.to_zslice()) - } -} - -impl From for ZSlice { - fn from(value: ZBytes) -> Self { - ZBuf::from(value).to_zslice() - } -} - -impl From<&ZBytes> for ZSlice { - fn from(value: &ZBytes) -> Self { - ZSerde.deserialize(value).unwrap_infallible() - } -} - -impl From<&mut ZBytes> for ZSlice { - fn from(value: &mut ZBytes) -> Self { - ZSerde.deserialize(&*value).unwrap_infallible() - } -} - -// [u8; N] -impl Serialize<[u8; N]> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: [u8; N]) -> Self::Output { - ZBytes::new(t) - } -} - -impl From<[u8; N]> for ZBytes { - fn from(t: [u8; N]) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&[u8; N]> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &[u8; N]) -> Self::Output { - ZBytes::new(*t) - } -} - -impl From<&[u8; N]> for ZBytes { - fn from(t: &[u8; N]) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut [u8; N]> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut [u8; N]) -> Self::Output { - ZBytes::new(*t) - } -} - -impl From<&mut [u8; N]> for ZBytes { - fn from(t: &mut [u8; N]) -> Self { - ZSerde.serialize(*t) - } -} - -impl<'a, const N: usize> Deserialize<'a, [u8; N]> for ZSerde { - type Input = &'a ZBytes; - type Error = ZDeserializeError; - - fn deserialize(self, v: Self::Input) -> Result<[u8; N], Self::Error> { - use std::io::Read; - - if v.0.len() != N { - return Err(ZDeserializeError); - } - let mut dst = [0u8; N]; - let mut reader = v.reader(); - reader.read_exact(&mut dst).map_err(|_| ZDeserializeError)?; - Ok(dst) - } -} - -impl TryFrom for [u8; N] { - type Error = ZDeserializeError; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for [u8; N] { - type Error = ZDeserializeError; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for [u8; N] { - type Error = ZDeserializeError; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// Vec -impl Serialize> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: Vec) -> Self::Output { - ZBytes::new(t) - } -} - -impl From> for ZBytes { - fn from(t: Vec) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&Vec> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &Vec) -> Self::Output { - ZBytes::new(t.clone()) - } -} - -impl From<&Vec> for ZBytes { - fn from(t: &Vec) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut Vec> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut Vec) -> Self::Output { - ZBytes::new(t.clone()) - } -} - -impl From<&mut Vec> for ZBytes { - fn from(t: &mut Vec) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, Vec> for ZSerde { - type Input = &'a ZBytes; - type Error = Infallible; - - fn deserialize(self, v: Self::Input) -> Result, Self::Error> { - Ok(v.0.contiguous().to_vec()) - } -} - -impl From for Vec { - fn from(value: ZBytes) -> Self { - ZSerde.deserialize(&value).unwrap_infallible() - } -} - -impl From<&ZBytes> for Vec { - fn from(value: &ZBytes) -> Self { - ZSerde.deserialize(value).unwrap_infallible() - } -} - -impl From<&mut ZBytes> for Vec { - fn from(value: &mut ZBytes) -> Self { - ZSerde.deserialize(&*value).unwrap_infallible() - } -} - -// &[u8] -impl Serialize<&[u8]> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &[u8]) -> Self::Output { - ZBytes::new(t.to_vec()) - } -} - -impl From<&[u8]> for ZBytes { - fn from(t: &[u8]) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut [u8]> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut [u8]) -> Self::Output { - ZSerde.serialize(&*t) - } -} - -impl From<&mut [u8]> for ZBytes { - fn from(t: &mut [u8]) -> Self { - ZSerde.serialize(t) - } -} - -// Cow<[u8]> -impl<'a> Serialize> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: Cow<'a, [u8]>) -> Self::Output { - ZBytes::new(t.to_vec()) - } -} - -impl From> for ZBytes { - fn from(t: Cow<'_, [u8]>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Serialize<&Cow<'a, [u8]>> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &Cow<'a, [u8]>) -> Self::Output { - ZBytes::new(t.to_vec()) - } -} - -impl From<&Cow<'_, [u8]>> for ZBytes { - fn from(t: &Cow<'_, [u8]>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Serialize<&mut Cow<'a, [u8]>> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut Cow<'a, [u8]>) -> Self::Output { - ZSerde.serialize(&*t) - } -} - -impl From<&mut Cow<'_, [u8]>> for ZBytes { - fn from(t: &mut Cow<'_, [u8]>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { - type Input = &'a ZBytes; - type Error = Infallible; - - fn deserialize(self, v: Self::Input) -> Result, Self::Error> { - Ok(v.0.contiguous()) - } -} - -impl From for Cow<'static, [u8]> { - fn from(v: ZBytes) -> Self { - match v.0.contiguous() { - Cow::Borrowed(s) => Cow::Owned(s.to_vec()), - Cow::Owned(s) => Cow::Owned(s), - } - } -} - -impl<'a> From<&'a ZBytes> for Cow<'a, [u8]> { - fn from(value: &'a ZBytes) -> Self { - ZSerde.deserialize(value).unwrap_infallible() - } -} - -impl<'a> From<&'a mut ZBytes> for Cow<'a, [u8]> { - fn from(value: &'a mut ZBytes) -> Self { - ZSerde.deserialize(&*value).unwrap_infallible() - } -} - -// String -impl Serialize for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: String) -> Self::Output { - ZBytes::new(s.into_bytes()) - } -} - -impl From for ZBytes { - fn from(t: String) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&String> for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: &String) -> Self::Output { - ZBytes::new(s.clone().into_bytes()) - } -} - -impl From<&String> for ZBytes { - fn from(t: &String) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut String> for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: &mut String) -> Self::Output { - ZSerde.serialize(&*s) - } -} - -impl From<&mut String> for ZBytes { - fn from(t: &mut String) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, String> for ZSerde { - type Input = &'a ZBytes; - type Error = FromUtf8Error; - - fn deserialize(self, v: Self::Input) -> Result { - let v: Vec = ZSerde.deserialize(v).unwrap_infallible(); - String::from_utf8(v) - } -} - -impl TryFrom for String { - type Error = FromUtf8Error; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for String { - type Error = FromUtf8Error; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for String { - type Error = FromUtf8Error; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// &str -impl Serialize<&str> for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: &str) -> Self::Output { - ZSerde.serialize(s.to_string()) - } -} - -impl From<&str> for ZBytes { - fn from(t: &str) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut str> for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: &mut str) -> Self::Output { - ZSerde.serialize(&*s) - } -} - -impl From<&mut str> for ZBytes { - fn from(t: &mut str) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Serialize> for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: Cow<'a, str>) -> Self::Output { - Self.serialize(s.to_string()) - } -} - -impl From> for ZBytes { - fn from(t: Cow<'_, str>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Serialize<&Cow<'a, str>> for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: &Cow<'a, str>) -> Self::Output { - ZSerde.serialize(s.to_string()) - } -} - -impl From<&Cow<'_, str>> for ZBytes { - fn from(t: &Cow<'_, str>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Serialize<&mut Cow<'a, str>> for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: &mut Cow<'a, str>) -> Self::Output { - ZSerde.serialize(&*s) - } -} - -impl From<&mut Cow<'_, str>> for ZBytes { - fn from(t: &mut Cow<'_, str>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { - type Input = &'a ZBytes; - type Error = Utf8Error; - - fn deserialize(self, v: Self::Input) -> Result, Self::Error> { - Cow::try_from(v) - } -} - -impl TryFrom for Cow<'static, str> { - type Error = Utf8Error; - - fn try_from(v: ZBytes) -> Result { - let v: Cow<'static, [u8]> = Cow::from(v); - let _ = core::str::from_utf8(v.as_ref())?; - // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 - // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. - Ok(unsafe { core::mem::transmute(v) }) - } -} - -impl<'a> TryFrom<&'a ZBytes> for Cow<'a, str> { - type Error = Utf8Error; - - fn try_from(v: &'a ZBytes) -> Result { - let v: Cow<'a, [u8]> = Cow::from(v); - let _ = core::str::from_utf8(v.as_ref())?; - // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 - // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. - Ok(unsafe { core::mem::transmute(v) }) - } -} - -impl<'a> TryFrom<&'a mut ZBytes> for Cow<'a, str> { - type Error = Utf8Error; - - fn try_from(v: &'a mut ZBytes) -> Result { - let v: Cow<'a, [u8]> = Cow::from(v); - let _ = core::str::from_utf8(v.as_ref())?; - // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 - // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. - Ok(unsafe { core::mem::transmute(v) }) - } -} - -// - Integers impl -macro_rules! impl_int { - ($t:ty) => { - impl Serialize<$t> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: $t) -> Self::Output { - let bs = t.to_le_bytes(); - let mut end = 1; - if t != 0 as $t { - end += bs.iter().rposition(|b| *b != 0).unwrap_or(bs.len() - 1); - }; - // SAFETY: - // - 0 is a valid start index because bs is guaranteed to always have a length greater or equal than 1 - // - end is a valid end index because is bounded between 0 and bs.len() - ZBytes::new(unsafe { ZSlice::new_unchecked(Arc::new(bs), 0, end) }) - } - } - - impl From<$t> for ZBytes { - fn from(t: $t) -> Self { - ZSerde.serialize(t) - } - } - - impl Serialize<&$t> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &$t) -> Self::Output { - Self.serialize(*t) - } - } - - impl From<&$t> for ZBytes { - fn from(t: &$t) -> Self { - ZSerde.serialize(t) - } - } - - impl Serialize<&mut $t> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut $t) -> Self::Output { - Self.serialize(*t) - } - } - - impl From<&mut $t> for ZBytes { - fn from(t: &mut $t) -> Self { - ZSerde.serialize(t) - } - } - - impl<'a> Deserialize<'a, $t> for ZSerde { - type Input = &'a ZBytes; - type Error = ZDeserializeError; - - fn deserialize(self, v: Self::Input) -> Result<$t, Self::Error> { - use std::io::Read; - - let mut r = v.reader(); - let mut bs = (0 as $t).to_le_bytes(); - if v.len() > bs.len() { - return Err(ZDeserializeError); - } - r.read_exact(&mut bs[..v.len()]) - .map_err(|_| ZDeserializeError)?; - let t = <$t>::from_le_bytes(bs); - Ok(t) - } - } - - impl TryFrom for $t { - type Error = ZDeserializeError; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } - } - - impl TryFrom<&ZBytes> for $t { - type Error = ZDeserializeError; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } - } - - impl TryFrom<&mut ZBytes> for $t { - type Error = ZDeserializeError; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } - } - }; -} - -// Zenoh unsigned integers -impl_int!(u8); -impl_int!(u16); -impl_int!(u32); -impl_int!(u64); -impl_int!(usize); - -// Zenoh signed integers -impl_int!(i8); -impl_int!(i16); -impl_int!(i32); -impl_int!(i64); -impl_int!(isize); - -// Zenoh floats -impl_int!(f32); -impl_int!(f64); - -// Zenoh bool -impl Serialize for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: bool) -> Self::Output { - // SAFETY: casting a bool into an integer is well-defined behaviour. - // 0 is false, 1 is true: https://doc.rust-lang.org/std/primitive.bool.html - ZBytes::new(ZBuf::from((t as u8).to_le_bytes())) - } -} - -impl From for ZBytes { - fn from(t: bool) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&bool> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &bool) -> Self::Output { - ZSerde.serialize(*t) - } -} - -impl From<&bool> for ZBytes { - fn from(t: &bool) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut bool> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut bool) -> Self::Output { - ZSerde.serialize(*t) - } -} - -impl From<&mut bool> for ZBytes { - fn from(t: &mut bool) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, bool> for ZSerde { - type Input = &'a ZBytes; - type Error = ZDeserializeError; - - fn deserialize(self, v: Self::Input) -> Result { - let p = v.deserialize::().map_err(|_| ZDeserializeError)?; - match p { - 0 => Ok(false), - 1 => Ok(true), - _ => Err(ZDeserializeError), - } - } -} - -impl TryFrom for bool { - type Error = ZDeserializeError; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for bool { - type Error = ZDeserializeError; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for bool { - type Error = ZDeserializeError; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// - Zenoh advanced types encoders/decoders -// Properties -impl Serialize> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: Properties<'_>) -> Self::Output { - Self.serialize(t.as_str()) - } -} - -impl From> for ZBytes { - fn from(t: Properties<'_>) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&Properties<'_>> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &Properties<'_>) -> Self::Output { - Self.serialize(t.as_str()) - } -} - -impl<'s> From<&'s Properties<'s>> for ZBytes { - fn from(t: &'s Properties<'s>) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut Properties<'_>> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut Properties<'_>) -> Self::Output { - Self.serialize(t.as_str()) - } -} - -impl<'s> From<&'s mut Properties<'s>> for ZBytes { - fn from(t: &'s mut Properties<'s>) -> Self { - ZSerde.serialize(&*t) - } -} - -impl<'s> Deserialize<'s, Properties<'s>> for ZSerde { - type Input = &'s ZBytes; - type Error = ZDeserializeError; - - fn deserialize(self, v: Self::Input) -> Result, Self::Error> { - let s = v - .deserialize::>() - .map_err(|_| ZDeserializeError)?; - Ok(Properties::from(s)) - } -} - -impl TryFrom for Properties<'static> { - type Error = ZDeserializeError; - - fn try_from(v: ZBytes) -> Result { - let s = v.deserialize::>().map_err(|_| ZDeserializeError)?; - Ok(Properties::from(s.into_owned())) - } -} - -impl<'s> TryFrom<&'s ZBytes> for Properties<'s> { - type Error = ZDeserializeError; - - fn try_from(value: &'s ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl<'s> TryFrom<&'s mut ZBytes> for Properties<'s> { - type Error = ZDeserializeError; - - fn try_from(value: &'s mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// JSON -impl Serialize for ZSerde { - type Output = Result; - - fn serialize(self, t: serde_json::Value) -> Self::Output { - ZSerde.serialize(&t) - } -} - -impl TryFrom for ZBytes { - type Error = serde_json::Error; - - fn try_from(value: serde_json::Value) -> Result { - ZSerde.serialize(&value) - } -} - -impl Serialize<&serde_json::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &serde_json::Value) -> Self::Output { - let mut bytes = ZBytes::empty(); - serde_json::to_writer(bytes.writer(), t)?; - Ok(bytes) - } -} - -impl TryFrom<&serde_json::Value> for ZBytes { - type Error = serde_json::Error; - - fn try_from(value: &serde_json::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&mut serde_json::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &mut serde_json::Value) -> Self::Output { - let mut bytes = ZBytes::empty(); - serde_json::to_writer(bytes.writer(), t)?; - Ok(bytes) - } -} - -impl TryFrom<&mut serde_json::Value> for ZBytes { - type Error = serde_json::Error; - - fn try_from(value: &mut serde_json::Value) -> Result { - ZSerde.serialize(&*value) - } -} - -impl<'a> Deserialize<'a, serde_json::Value> for ZSerde { - type Input = &'a ZBytes; - type Error = serde_json::Error; - - fn deserialize(self, v: Self::Input) -> Result { - serde_json::from_reader(v.reader()) - } -} - -impl TryFrom for serde_json::Value { - type Error = serde_json::Error; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for serde_json::Value { - type Error = serde_json::Error; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for serde_json::Value { - type Error = serde_json::Error; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// Yaml -impl Serialize for ZSerde { - type Output = Result; - - fn serialize(self, t: serde_yaml::Value) -> Self::Output { - Self.serialize(&t) - } -} - -impl TryFrom for ZBytes { - type Error = serde_yaml::Error; - - fn try_from(value: serde_yaml::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&serde_yaml::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &serde_yaml::Value) -> Self::Output { - let mut bytes = ZBytes::empty(); - serde_yaml::to_writer(bytes.writer(), t)?; - Ok(bytes) - } -} - -impl TryFrom<&serde_yaml::Value> for ZBytes { - type Error = serde_yaml::Error; - - fn try_from(value: &serde_yaml::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&mut serde_yaml::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &mut serde_yaml::Value) -> Self::Output { - let mut bytes = ZBytes::empty(); - serde_yaml::to_writer(bytes.writer(), t)?; - Ok(bytes) - } -} - -impl TryFrom<&mut serde_yaml::Value> for ZBytes { - type Error = serde_yaml::Error; - - fn try_from(value: &mut serde_yaml::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl<'a> Deserialize<'a, serde_yaml::Value> for ZSerde { - type Input = &'a ZBytes; - type Error = serde_yaml::Error; - - fn deserialize(self, v: Self::Input) -> Result { - serde_yaml::from_reader(v.reader()) - } -} - -impl TryFrom for serde_yaml::Value { - type Error = serde_yaml::Error; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for serde_yaml::Value { - type Error = serde_yaml::Error; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for serde_yaml::Value { - type Error = serde_yaml::Error; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// CBOR -impl Serialize for ZSerde { - type Output = Result; - - fn serialize(self, t: serde_cbor::Value) -> Self::Output { - Self.serialize(&t) - } -} - -impl TryFrom for ZBytes { - type Error = serde_cbor::Error; - - fn try_from(value: serde_cbor::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&serde_cbor::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &serde_cbor::Value) -> Self::Output { - let mut bytes = ZBytes::empty(); - serde_cbor::to_writer(bytes.0.writer(), t)?; - Ok(bytes) - } -} - -impl TryFrom<&serde_cbor::Value> for ZBytes { - type Error = serde_cbor::Error; - - fn try_from(value: &serde_cbor::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&mut serde_cbor::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &mut serde_cbor::Value) -> Self::Output { - ZSerde.serialize(&*t) - } -} - -impl TryFrom<&mut serde_cbor::Value> for ZBytes { - type Error = serde_cbor::Error; - - fn try_from(value: &mut serde_cbor::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl<'a> Deserialize<'a, serde_cbor::Value> for ZSerde { - type Input = &'a ZBytes; - type Error = serde_cbor::Error; - - fn deserialize(self, v: Self::Input) -> Result { - serde_cbor::from_reader(v.reader()) - } -} - -impl TryFrom for serde_cbor::Value { - type Error = serde_cbor::Error; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for serde_cbor::Value { - type Error = serde_cbor::Error; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for serde_cbor::Value { - type Error = serde_cbor::Error; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// Pickle -impl Serialize for ZSerde { - type Output = Result; - - fn serialize(self, t: serde_pickle::Value) -> Self::Output { - Self.serialize(&t) - } -} - -impl TryFrom for ZBytes { - type Error = serde_pickle::Error; - - fn try_from(value: serde_pickle::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&serde_pickle::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &serde_pickle::Value) -> Self::Output { - let mut bytes = ZBytes::empty(); - serde_pickle::value_to_writer( - &mut bytes.0.writer(), - t, - serde_pickle::SerOptions::default(), - )?; - Ok(bytes) - } -} - -impl TryFrom<&serde_pickle::Value> for ZBytes { - type Error = serde_pickle::Error; - - fn try_from(value: &serde_pickle::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&mut serde_pickle::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &mut serde_pickle::Value) -> Self::Output { - ZSerde.serialize(&*t) - } -} - -impl TryFrom<&mut serde_pickle::Value> for ZBytes { - type Error = serde_pickle::Error; - - fn try_from(value: &mut serde_pickle::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl<'a> Deserialize<'a, serde_pickle::Value> for ZSerde { - type Input = &'a ZBytes; - type Error = serde_pickle::Error; - - fn deserialize(self, v: Self::Input) -> Result { - serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) - } -} - -impl TryFrom for serde_pickle::Value { - type Error = serde_pickle::Error; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for serde_pickle::Value { - type Error = serde_pickle::Error; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for serde_pickle::Value { - type Error = serde_pickle::Error; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// Shared memory conversion -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl Serialize for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: ZSliceShm) -> Self::Output { - let slice: ZSlice = t.into(); - ZBytes::new(slice) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl From for ZBytes { - fn from(t: ZSliceShm) -> Self { - ZSerde.serialize(t) - } -} - -// Shared memory conversion -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl Serialize for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: ZSliceShmMut) -> Self::Output { - let slice: ZSlice = t.into(); - ZBytes::new(slice) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl From for ZBytes { - fn from(t: ZSliceShmMut) -> Self { - ZSerde.serialize(t) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a zsliceshm> for ZSerde { - type Input = &'a ZBytes; - type Error = ZDeserializeError; - - fn deserialize(self, v: Self::Input) -> Result<&'a zsliceshm, Self::Error> { - // A ZSliceShm is expected to have only one slice - let mut zslices = v.0.zslices(); - if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_ref::() { - return Ok(shmb.into()); - } - } - Err(ZDeserializeError) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a ZBytes> for &'a zsliceshm { - type Error = ZDeserializeError; - - fn try_from(value: &'a ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshm { - type Error = ZDeserializeError; - - fn try_from(value: &'a mut ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a mut zsliceshm> for ZSerde { - type Input = &'a mut ZBytes; - type Error = ZDeserializeError; - - fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshm, Self::Error> { - // A ZSliceShmBorrowMut is expected to have only one slice - let mut zslices = v.0.zslices_mut(); - if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_mut::() { - return Ok(shmb.into()); - } - } - Err(ZDeserializeError) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a mut zsliceshmmut> for ZSerde { - type Input = &'a mut ZBytes; - type Error = ZDeserializeError; - - fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshmmut, Self::Error> { - // A ZSliceShmBorrowMut is expected to have only one slice - let mut zslices = v.0.zslices_mut(); - if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_mut::() { - return shmb.try_into().map_err(|_| ZDeserializeError); - } - } - Err(ZDeserializeError) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshmmut { - type Error = ZDeserializeError; - - fn try_from(value: &'a mut ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -// Tuple -macro_rules! impl_tuple { - ($t:expr) => {{ - let (a, b) = $t; - - let codec = Zenoh080::new(); - let mut buffer: ZBuf = ZBuf::empty(); - let mut writer = buffer.writer(); - let apld: ZBytes = a.into(); - let bpld: ZBytes = b.into(); - - // SAFETY: we are serializing slices on a ZBuf, so serialization will never - // fail unless we run out of memory. In that case, Rust memory allocator - // will panic before the serializer has any chance to fail. - unsafe { - codec.write(&mut writer, &apld.0).unwrap_unchecked(); - codec.write(&mut writer, &bpld.0).unwrap_unchecked(); - } - - ZBytes::new(buffer) - }}; -} -impl Serialize<(A, B)> for ZSerde -where - A: Into, - B: Into, -{ - type Output = ZBytes; - - fn serialize(self, t: (A, B)) -> Self::Output { - impl_tuple!(t) - } -} - -impl Serialize<&(A, B)> for ZSerde -where - for<'a> &'a A: Into, - for<'b> &'b B: Into, -{ - type Output = ZBytes; - - fn serialize(self, t: &(A, B)) -> Self::Output { - impl_tuple!(t) - } -} - -impl From<(A, B)> for ZBytes -where - A: Into, - B: Into, -{ - fn from(value: (A, B)) -> Self { - ZSerde.serialize(value) - } -} - -impl<'s, A, B> Deserialize<'s, (A, B)> for ZSerde -where - A: TryFrom + 'static, - >::Error: Debug + 'static, - B: TryFrom + 'static, - >::Error: Debug + 'static, -{ - type Input = &'s ZBytes; - type Error = ZError; - - fn deserialize(self, bytes: Self::Input) -> Result<(A, B), Self::Error> { - let codec = Zenoh080::new(); - let mut reader = bytes.0.reader(); - - let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; - let apld = ZBytes::new(abuf); - - let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; - let bpld = ZBytes::new(bbuf); - - let a = A::try_from(apld).map_err(|e| zerror!("{:?}", e))?; - let b = B::try_from(bpld).map_err(|e| zerror!("{:?}", e))?; - Ok((a, b)) - } -} - -impl TryFrom for (A, B) -where - A: TryFrom + 'static, - >::Error: Debug + 'static, - B: TryFrom + 'static, - >::Error: Debug + 'static, -{ - type Error = ZError; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for (A, B) -where - A: TryFrom + 'static, - >::Error: Debug + 'static, - B: TryFrom + 'static, - >::Error: Debug + 'static, -{ - type Error = ZError; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for (A, B) -where - A: TryFrom + 'static, - >::Error: Debug + 'static, - B: TryFrom + 'static, - >::Error: Debug + 'static, -{ - type Error = ZError; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// For convenience to always convert a Value in the examples -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum StringOrBase64 { - String(String), - Base64(String), -} - -impl StringOrBase64 { - pub fn into_string(self) -> String { - match self { - StringOrBase64::String(s) | StringOrBase64::Base64(s) => s, - } - } -} - -impl Deref for StringOrBase64 { - type Target = String; - - fn deref(&self) -> &Self::Target { - match self { - Self::String(s) | Self::Base64(s) => s, - } - } -} - -impl std::fmt::Display for StringOrBase64 { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(self) - } -} - -impl From<&ZBytes> for StringOrBase64 { - fn from(v: &ZBytes) -> Self { - use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; - match v.deserialize::() { - Ok(s) => StringOrBase64::String(s), - Err(_) => StringOrBase64::Base64(b64_std_engine.encode(v.into::>())), - } - } -} - -impl From<&mut ZBytes> for StringOrBase64 { - fn from(v: &mut ZBytes) -> Self { - StringOrBase64::from(&*v) - } -} - -// Protocol attachment extension -impl From for AttachmentType { - fn from(this: ZBytes) -> Self { - AttachmentType { - buffer: this.into(), - } - } -} - -impl From> for ZBytes { - fn from(this: AttachmentType) -> Self { - this.buffer.into() - } -} - -mod tests { - #[test] - fn serializer() { - use super::ZBytes; - use rand::Rng; - use std::borrow::Cow; - use zenoh_buffers::{ZBuf, ZSlice}; - use zenoh_protocol::core::Properties; - - #[cfg(all(feature = "shared-memory", feature = "unstable"))] - use zenoh_shm::api::{ - protocol_implementations::posix::{ - posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, - protocol_id::POSIX_PROTOCOL_ID, - }, - provider::shared_memory_provider::SharedMemoryProviderBuilder, - slice::zsliceshm::{zsliceshm, ZSliceShm}, - }; - - const NUM: usize = 1_000; - - macro_rules! serialize_deserialize { - ($t:ty, $in:expr) => { - let i = $in; - let t = i.clone(); - println!("Serialize:\t{:?}", t); - let v = ZBytes::serialize(t); - println!("Deserialize:\t{:?}", v); - let o: $t = v.deserialize().unwrap(); - assert_eq!(i, o); - println!(""); - }; - } - - // WARN: test function body produces stack overflow, so I split it into subroutines - #[inline(never)] - fn numeric() { - let mut rng = rand::thread_rng(); - - // unsigned integer - serialize_deserialize!(u8, u8::MIN); - serialize_deserialize!(u16, u16::MIN); - serialize_deserialize!(u32, u32::MIN); - serialize_deserialize!(u64, u64::MIN); - serialize_deserialize!(usize, usize::MIN); - - serialize_deserialize!(u8, u8::MAX); - serialize_deserialize!(u16, u16::MAX); - serialize_deserialize!(u32, u32::MAX); - serialize_deserialize!(u64, u64::MAX); - serialize_deserialize!(usize, usize::MAX); - - for _ in 0..NUM { - serialize_deserialize!(u8, rng.gen::()); - serialize_deserialize!(u16, rng.gen::()); - serialize_deserialize!(u32, rng.gen::()); - serialize_deserialize!(u64, rng.gen::()); - serialize_deserialize!(usize, rng.gen::()); - } - - // signed integer - serialize_deserialize!(i8, i8::MIN); - serialize_deserialize!(i16, i16::MIN); - serialize_deserialize!(i32, i32::MIN); - serialize_deserialize!(i64, i64::MIN); - serialize_deserialize!(isize, isize::MIN); - - serialize_deserialize!(i8, i8::MAX); - serialize_deserialize!(i16, i16::MAX); - serialize_deserialize!(i32, i32::MAX); - serialize_deserialize!(i64, i64::MAX); - serialize_deserialize!(isize, isize::MAX); - - for _ in 0..NUM { - serialize_deserialize!(i8, rng.gen::()); - serialize_deserialize!(i16, rng.gen::()); - serialize_deserialize!(i32, rng.gen::()); - serialize_deserialize!(i64, rng.gen::()); - serialize_deserialize!(isize, rng.gen::()); - } - - // float - serialize_deserialize!(f32, f32::MIN); - serialize_deserialize!(f64, f64::MIN); - - serialize_deserialize!(f32, f32::MAX); - serialize_deserialize!(f64, f64::MAX); - - for _ in 0..NUM { - serialize_deserialize!(f32, rng.gen::()); - serialize_deserialize!(f64, rng.gen::()); - } - } - numeric(); - - // WARN: test function body produces stack overflow, so I split it into subroutines - #[inline(never)] - fn basic() { - // String - serialize_deserialize!(String, ""); - serialize_deserialize!(String, String::from("abcdef")); - - // Cow - serialize_deserialize!(Cow, Cow::from("")); - serialize_deserialize!(Cow, Cow::from(String::from("abcdef"))); - - // Vec - serialize_deserialize!(Vec, vec![0u8; 0]); - serialize_deserialize!(Vec, vec![0u8; 64]); - - // Cow<[u8]> - serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 0])); - serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 64])); - - // ZBuf - serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); - serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); - } - basic(); - - // SHM - #[cfg(all(feature = "shared-memory", feature = "unstable"))] - { - // create an SHM backend... - let backend = PosixSharedMemoryProviderBackend::builder() - .with_size(4096) - .unwrap() - .res() - .unwrap(); - // ...and an SHM provider - let provider = SharedMemoryProviderBuilder::builder() - .protocol_id::() - .backend(backend) - .res(); - - // Prepare a layout for allocations - let layout = provider.alloc_layout().size(1024).res().unwrap(); - - // allocate an SHM buffer - let mutable_shm_buf = layout.alloc().res().unwrap(); - - // convert to immutable SHM buffer - let immutable_shm_buf: ZSliceShm = mutable_shm_buf.into(); - - serialize_deserialize!(&zsliceshm, immutable_shm_buf); - } - - // Properties - serialize_deserialize!(Properties, Properties::from("")); - serialize_deserialize!(Properties, Properties::from("a=1;b=2;c3")); - - // Tuple - serialize_deserialize!((usize, usize), (0, 1)); - serialize_deserialize!((usize, String), (0, String::from("a"))); - serialize_deserialize!((String, String), (String::from("a"), String::from("b"))); - serialize_deserialize!( - (Cow<'static, [u8]>, Cow<'static, [u8]>), - (Cow::from(vec![0u8; 8]), Cow::from(vec![0u8; 8])) - ); - serialize_deserialize!( - (Cow<'static, str>, Cow<'static, str>), - (Cow::from("a"), Cow::from("b")) - ); - - // Iterator - let v: [usize; 5] = [0, 1, 2, 3, 4]; - println!("Serialize:\t{:?}", v); - let p = ZBytes::from_iter(v.iter()); - println!("Deserialize:\t{:?}\n", p); - for (i, t) in p.iter::().enumerate() { - assert_eq!(i, t); - } - - let mut v = vec![[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]; - println!("Serialize:\t{:?}", v); - let p = ZBytes::from_iter(v.drain(..)); - println!("Deserialize:\t{:?}\n", p); - let mut iter = p.iter::<[u8; 4]>(); - assert_eq!(iter.next().unwrap(), [0, 1, 2, 3]); - assert_eq!(iter.next().unwrap(), [4, 5, 6, 7]); - assert_eq!(iter.next().unwrap(), [8, 9, 10, 11]); - assert_eq!(iter.next().unwrap(), [12, 13, 14, 15]); - assert!(iter.next().is_none()); - - use std::collections::HashMap; - let mut hm: HashMap = HashMap::new(); - hm.insert(0, 0); - hm.insert(1, 1); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, usize)>()); - assert_eq!(hm, o); - - let mut hm: HashMap> = HashMap::new(); - hm.insert(0, vec![0u8; 8]); - hm.insert(1, vec![1u8; 16]); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); - assert_eq!(hm, o); - - let mut hm: HashMap> = HashMap::new(); - hm.insert(0, vec![0u8; 8]); - hm.insert(1, vec![1u8; 16]); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); - assert_eq!(hm, o); - - let mut hm: HashMap = HashMap::new(); - hm.insert(0, ZSlice::from(vec![0u8; 8])); - hm.insert(1, ZSlice::from(vec![1u8; 16])); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, ZSlice)>()); - assert_eq!(hm, o); - - let mut hm: HashMap = HashMap::new(); - hm.insert(0, ZBuf::from(vec![0u8; 8])); - hm.insert(1, ZBuf::from(vec![1u8; 16])); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, ZBuf)>()); - assert_eq!(hm, o); - - let mut hm: HashMap> = HashMap::new(); - hm.insert(0, vec![0u8; 8]); - hm.insert(1, vec![1u8; 16]); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().iter().map(|(k, v)| (k, Cow::from(v)))); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); - assert_eq!(hm, o); - - let mut hm: HashMap = HashMap::new(); - hm.insert(String::from("0"), String::from("a")); - hm.insert(String::from("1"), String::from("b")); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.iter()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(String, String)>()); - assert_eq!(hm, o); - - let mut hm: HashMap, Cow<'static, str>> = HashMap::new(); - hm.insert(Cow::from("0"), Cow::from("a")); - hm.insert(Cow::from("1"), Cow::from("b")); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.iter()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(Cow<'static, str>, Cow<'static, str>)>()); - assert_eq!(hm, o); - } -} From a724fa8500e4e3aa7d690d40f01cb6a8ab416c1a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 15:13:08 +0200 Subject: [PATCH 301/598] restored missed typedef usage --- zenoh/src/api/session.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 4fc0df5c1a..01fc345c3b 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -14,7 +14,8 @@ use super::{ admin, builders::publication::{ - PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, + PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, SessionDeleteBuilder, + SessionPutBuilder, }, bytes::ZBytes, encoding::Encoding, @@ -712,13 +713,13 @@ impl Session { &'a self, key_expr: TryIntoKeyExpr, payload: IntoZBytes, - ) -> PublicationBuilder, PublicationBuilderPut> + ) -> SessionPutBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, IntoZBytes: Into, { - PublicationBuilder { + SessionPutBuilder { publisher: self.declare_publisher(key_expr), kind: PublicationBuilderPut { payload: payload.into(), @@ -752,12 +753,12 @@ impl Session { pub fn delete<'a, 'b: 'a, TryIntoKeyExpr>( &'a self, key_expr: TryIntoKeyExpr, - ) -> PublicationBuilder, PublicationBuilderDelete> + ) -> SessionDeleteBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, { - PublicationBuilder { + SessionDeleteBuilder { publisher: self.declare_publisher(key_expr), kind: PublicationBuilderDelete, timestamp: None, From 0983d58ef38128566f9b18b26046b09c73d1ab3c Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 26 Apr 2024 16:26:11 +0200 Subject: [PATCH 302/598] Improve SHM examples --- examples/examples/z_pub_shm.rs | 5 ++-- examples/examples/z_sub_shm.rs | 50 +++++++++++++++++----------------- 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 8287509f1b..0dce88b8e7 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -25,7 +25,6 @@ use zenoh::shm::provider::types::MemoryLayout; use zenoh_examples::CommonArgs; const N: usize = 10; -const K: u32 = 3; #[tokio::main] async fn main() -> Result<(), zenoh::Error> { @@ -81,7 +80,9 @@ async fn main() -> Result<(), zenoh::Error> { .unwrap(); println!("Press CTRL-C to quit..."); - for idx in 0..(K * N as u32) { + for idx in 0..u32::MAX { + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + let mut sbuf = layout .alloc() .with_policy::>() diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 35fb80d833..319d8ecf90 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -36,39 +36,39 @@ async fn main() { let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); println!("Press CTRL-C to quit..."); - while let Ok(sample) = subscriber.recv_async().await { - match sample.payload().deserialize::<&zsliceshm>() { - Ok(payload) => println!( - ">> [Subscriber] Received {} ('{}': '{:02x?}')", - sample.kind(), - sample.key_expr().as_str(), - payload - ), - Err(e) => { - println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); - } - } - } - - // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber - // // holding a reference to the SHM buffer, then it will be able to get a mutable reference to it. - // // With the mutable reference at hand, it's possible to mutate in place the SHM buffer content. - // - // use zenoh::shm::slice::zsliceshmmut::zsliceshmmut; - - // while let Ok(mut sample) = subscriber.recv_async().await { - // let kind = sample.kind(); - // let key_expr = sample.key_expr().to_string(); - // match sample.payload_mut().deserialize_mut::<&mut zsliceshmmut>() { + // while let Ok(sample) = subscriber.recv_async().await { + // match sample.payload().deserialize::<&zsliceshm>() { // Ok(payload) => println!( // ">> [Subscriber] Received {} ('{}': '{:02x?}')", - // kind, key_expr, payload + // sample.kind(), + // sample.key_expr().as_str(), + // payload // ), // Err(e) => { // println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); // } // } // } + + // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber + // // holding a reference to the SHM buffer, then it will be able to get a mutable reference to it. + // // With the mutable reference at hand, it's possible to mutate in place the SHM buffer content. + // + use zenoh::shm::slice::zsliceshmmut::zsliceshmmut; + + while let Ok(mut sample) = subscriber.recv_async().await { + let kind = sample.kind(); + let key_expr = sample.key_expr().to_string(); + match sample.payload_mut().deserialize_mut::<&mut zsliceshmmut>() { + Ok(payload) => println!( + ">> [Subscriber] Received {} ('{}': '{:02x?}')", + kind, key_expr, payload + ), + Err(e) => { + println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); + } + } + } } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] From f08ea12c2760d42c122a228f565c9463b7df0ccb Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 26 Apr 2024 16:36:32 +0200 Subject: [PATCH 303/598] Fix merge --- examples/examples/z_sub_shm.rs | 51 +++++++++++++++++----------------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index fdb3204ac9..45180f598b 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -14,6 +14,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh::shm::zsliceshm; use zenoh_examples::CommonArgs; #[tokio::main] @@ -35,39 +36,39 @@ async fn main() { let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); println!("Press CTRL-C to quit..."); - // while let Ok(sample) = subscriber.recv_async().await { - // match sample.payload().deserialize::<&zsliceshm>() { - // Ok(payload) => println!( - // ">> [Subscriber] Received {} ('{}': '{:02x?}')", - // sample.kind(), - // sample.key_expr().as_str(), - // payload - // ), - // Err(e) => { - // println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); - // } - // } - // } - - // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber - // // holding a reference to the SHM buffer, then it will be able to get a mutable reference to it. - // // With the mutable reference at hand, it's possible to mutate in place the SHM buffer content. - // - use zenoh::shm::slice::zsliceshmmut::zsliceshmmut; - - while let Ok(mut sample) = subscriber.recv_async().await { - let kind = sample.kind(); - let key_expr = sample.key_expr().to_string(); - match sample.payload_mut().deserialize_mut::<&mut zsliceshmmut>() { + while let Ok(sample) = subscriber.recv_async().await { + match sample.payload().deserialize::<&zsliceshm>() { Ok(payload) => println!( ">> [Subscriber] Received {} ('{}': '{:02x?}')", - kind, key_expr, payload + sample.kind(), + sample.key_expr().as_str(), + payload ), Err(e) => { println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); } } } + + // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber + // // holding a reference to the SHM buffer, then it will be able to get a mutable reference to it. + // // With the mutable reference at hand, it's possible to mutate in place the SHM buffer content. + // + // use zenoh::shm::zsliceshmmut; + + // while let Ok(mut sample) = subscriber.recv_async().await { + // let kind = sample.kind(); + // let key_expr = sample.key_expr().to_string(); + // match sample.payload_mut().deserialize_mut::<&mut zsliceshmmut>() { + // Ok(payload) => println!( + // ">> [Subscriber] Received {} ('{}': '{:02x?}')", + // kind, key_expr, payload + // ), + // Err(e) => { + // println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); + // } + // } + // } } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] From 12376c0d968b60bc49da5643ca3ac6c470edb1ad Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 26 Apr 2024 18:03:09 +0200 Subject: [PATCH 304/598] Query/Reply shared memory examples --- examples/Cargo.toml | 10 ++ examples/examples/z_get.rs | 2 +- examples/examples/z_get_shm.rs | 158 +++++++++++++++++++++++++++ examples/examples/z_queryable.rs | 7 +- examples/examples/z_queryable_shm.rs | 134 +++++++++++++++++++++++ examples/examples/z_sub_shm.rs | 17 ++- zenoh/src/api/query.rs | 5 + zenoh/src/api/queryable.rs | 37 +++++-- zenoh/src/api/session.rs | 12 +- zenoh/src/net/runtime/adminspace.rs | 6 +- 10 files changed, 357 insertions(+), 31 deletions(-) create mode 100644 examples/examples/z_get_shm.rs create mode 100644 examples/examples/z_queryable_shm.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml index e117507ae9..ce268572a6 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -100,6 +100,11 @@ path = "examples/z_pull.rs" name = "z_queryable" path = "examples/z_queryable.rs" +[[example]] +name = "z_queryable_shm" +path = "examples/z_queryable_shm.rs" +required-features = ["unstable", "shared-memory"] + [[example]] name = "z_storage" path = "examples/z_storage.rs" @@ -108,6 +113,11 @@ path = "examples/z_storage.rs" name = "z_get" path = "examples/z_get.rs" +[[example]] +name = "z_get_shm" +path = "examples/z_get_shm.rs" +required-features = ["unstable", "shared-memory"] + [[example]] name = "z_forward" path = "examples/z_forward.rs" diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 56693d9fa1..76add34286 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -32,7 +32,7 @@ async fn main() { // // By default get receives replies from a FIFO. // // Uncomment this line to use a ring channel instead. // // More information on the ring channel are available in the z_pull example. - .with(zenoh::handlers::RingChannel::default()) + // .with(zenoh::handlers::RingChannel::default()) .value(value) .target(target) .timeout(timeout) diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs new file mode 100644 index 0000000000..c5f766f0f2 --- /dev/null +++ b/examples/examples/z_get_shm.rs @@ -0,0 +1,158 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use clap::Parser; +use std::time::Duration; +use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; + +const N: usize = 10; + +#[tokio::main] +async fn main() { + // initiate logging + zenoh_util::try_init_log_from_env(); + + let (mut config, selector, mut value, target, timeout) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); + + println!("Opening session..."); + let session = zenoh::open(config).res().await.unwrap(); + + println!("Creating POSIX SHM backend..."); + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + // The initialisation of SHM backend is completely backend-specific and user is free to do + // anything reasonable here. This code is execuated at the provider's first use + + // Alignment for POSIX SHM provider + // All allocations will be aligned corresponding to this alignment - + // that means that the provider will be able to satisfy allocation layouts + // with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // Create layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(N * 1024, provider_alignment).unwrap(); + + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + println!("Creating SHM Provider with POSIX backend..."); + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + println!("Allocating Shared Memory Buffer..."); + let layout = shared_memory_provider + .alloc_layout() + .size(1024) + .res() + .unwrap(); + + let mut sbuf = layout + .alloc() + .with_policy::>() + .res_async() + .await + .unwrap(); + + let content = value + .take() + .unwrap_or_else(|| "Get from SharedMemory Rust!".to_string()); + sbuf[0..content.len()].copy_from_slice(content.as_bytes()); + + println!("Sending Query '{selector}'..."); + let replies = session + .get(&selector) + .value(sbuf) + .target(target) + .timeout(timeout) + .res() + .await + .unwrap(); + + while let Ok(reply) = replies.recv_async().await { + match reply.result() { + Ok(sample) => { + print!(">> Received ('{}': ", sample.key_expr().as_str()); + match sample.payload().deserialize::<&zsliceshm>() { + Ok(payload) => println!("'{}')", String::from_utf8_lossy(payload),), + Err(e) => println!("'Not a SharedMemoryBuf: {:?}')", e), + } + } + Err(err) => { + let payload = err + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!(">> Received (ERROR: '{}')", payload); + } + } + } +} + +#[derive(clap::ValueEnum, Clone, Copy, Debug)] +#[value(rename_all = "SCREAMING_SNAKE_CASE")] +enum Qt { + BestMatching, + All, + AllComplete, +} + +#[derive(Parser, Clone, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/**")] + /// The selection of resources to query + selector: Selector<'static>, + /// The value to publish. + value: Option, + #[arg(short, long, default_value = "BEST_MATCHING")] + /// The target queryables of the query. + target: Qt, + #[arg(short = 'o', long, default_value = "10000")] + /// The query timeout in milliseconds. + timeout: u64, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> ( + Config, + Selector<'static>, + Option, + QueryTarget, + Duration, +) { + let args = Args::parse(); + ( + args.common.into(), + args.selector, + args.value, + match args.target { + Qt::BestMatching => QueryTarget::BestMatching, + Qt::All => QueryTarget::All, + Qt::AllComplete => QueryTarget::AllComplete, + }, + Duration::from_millis(args.timeout), + ) +} diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 47f70c30c3..8407f9f66f 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -20,7 +20,12 @@ async fn main() { // initiate logging zenoh_util::try_init_log_from_env(); - let (config, key_expr, value, complete) = parse_args(); + let (mut config, key_expr, value, complete) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_get_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); println!("Opening session..."); let session = zenoh::open(config).res().await.unwrap(); diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs new file mode 100644 index 0000000000..f689e15b51 --- /dev/null +++ b/examples/examples/z_queryable_shm.rs @@ -0,0 +1,134 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use clap::Parser; +use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; + +const N: usize = 10; + +#[tokio::main] +async fn main() { + // initiate logging + zenoh_util::try_init_log_from_env(); + + let (mut config, key_expr, value, complete) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_get_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); + + println!("Opening session..."); + let session = zenoh::open(config).res().await.unwrap(); + + println!("Creating POSIX SHM backend..."); + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + // The initialisation of SHM backend is completely backend-specific and user is free to do + // anything reasonable here. This code is execuated at the provider's first use + + // Alignment for POSIX SHM provider + // All allocations will be aligned corresponding to this alignment - + // that means that the provider will be able to satisfy allocation layouts + // with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // Create layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(N * 1024, provider_alignment).unwrap(); + + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + println!("Creating SHM Provider with POSIX backend..."); + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + println!("Declaring Queryable on '{key_expr}'..."); + let queryable = session + .declare_queryable(&key_expr) + .complete(complete) + .res() + .await + .unwrap(); + + println!("Press CTRL-C to quit..."); + while let Ok(query) = queryable.recv_async().await { + print!( + ">> [Queryable] Received Query '{}' ('{}'", + query.selector(), + query.key_expr().as_str(), + ); + if let Some(payload) = query.payload() { + match payload.deserialize::<&zsliceshm>() { + Ok(payload) => print!(": '{}'", String::from_utf8_lossy(payload)), + Err(e) => print!(": 'Not a SharedMemoryBuf: {:?}'", e), + } + } + println!(")"); + + println!("Allocating Shared Memory Buffer..."); + let layout = shared_memory_provider + .alloc_layout() + .size(1024) + .res() + .unwrap(); + + let mut sbuf = layout + .alloc() + .with_policy::>() + .res_async() + .await + .unwrap(); + + sbuf[0..value.len()].copy_from_slice(value.as_bytes()); + + println!( + ">> [Queryable] Responding ('{}': '{}')", + key_expr.as_str(), + value, + ); + query + .reply(key_expr.clone(), sbuf) + .res() + .await + .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); + } +} + +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/zenoh-rs-queryable")] + /// The key expression matching queries to reply to. + key: KeyExpr<'static>, + #[arg(short, long, default_value = "Queryable from SharedMemory Rust!")] + /// The value to reply to queries. + value: String, + #[arg(long)] + /// Declare the queryable as complete w.r.t. the key expression. + complete: bool, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> (Config, KeyExpr<'static>, String, bool) { + let args = Args::parse(); + (args.common.into(), args.key, args.value, args.complete) +} diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 45180f598b..2e0f5bf910 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -37,17 +37,16 @@ async fn main() { println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { + print!( + ">> [Subscriber] Received {} ('{}': ", + sample.kind(), + sample.key_expr().as_str(), + ); match sample.payload().deserialize::<&zsliceshm>() { - Ok(payload) => println!( - ">> [Subscriber] Received {} ('{}': '{:02x?}')", - sample.kind(), - sample.key_expr().as_str(), - payload - ), - Err(e) => { - println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); - } + Ok(payload) => print!("'{}'", String::from_utf8_lossy(payload)), + Err(e) => print!("'Not a SharedMemoryBuf: {:?}'", e), } + println!(")"); } // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 1cb4078ee6..d95a1bd417 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -88,6 +88,11 @@ impl Reply { self.result.as_ref() } + /// Gets the a mutable borrowed result of this `Reply`. Use [`Reply::into_result`] to take ownership of the result. + pub fn result_mut(&mut self) -> Result<&mut Sample, &mut Value> { + self.result.as_mut() + } + /// Converts this `Reply` into the its result. Use [`Reply::result`] it you don't want to take ownership. pub fn into_result(self) -> Result { self.result diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index dc13468181..53fea80b10 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -50,18 +50,11 @@ use { }; pub(crate) struct QueryInner { - /// The key expression of this Query. pub(crate) key_expr: KeyExpr<'static>, - /// This Query's selector parameters. pub(crate) parameters: Parameters<'static>, - /// This Query's body. - pub(crate) value: Option, - pub(crate) qid: RequestId, pub(crate) zid: ZenohId, pub(crate) primitives: Arc, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, } impl Drop for QueryInner { @@ -79,6 +72,9 @@ impl Drop for QueryInner { pub struct Query { pub(crate) inner: Arc, pub(crate) eid: EntityId, + pub(crate) value: Option, + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, } impl Query { @@ -106,24 +102,43 @@ impl Query { /// This Query's value. #[inline(always)] pub fn value(&self) -> Option<&Value> { - self.inner.value.as_ref() + self.value.as_ref() + } + + /// This Query's value. + #[inline(always)] + pub fn value_mut(&mut self) -> Option<&mut Value> { + self.value.as_mut() } /// This Query's payload. #[inline(always)] pub fn payload(&self) -> Option<&ZBytes> { - self.inner.value.as_ref().map(|v| &v.payload) + self.value.as_ref().map(|v| &v.payload) + } + + /// This Query's payload. + #[inline(always)] + pub fn payload_mut(&mut self) -> Option<&mut ZBytes> { + self.value.as_mut().map(|v| &mut v.payload) } /// This Query's encoding. #[inline(always)] pub fn encoding(&self) -> Option<&Encoding> { - self.inner.value.as_ref().map(|v| &v.encoding) + self.value.as_ref().map(|v| &v.encoding) } + /// This Query's attachment. #[zenoh_macros::unstable] pub fn attachment(&self) -> Option<&ZBytes> { - self.inner.attachment.as_ref() + self.attachment.as_ref() + } + + /// This Query's attachment. + #[zenoh_macros::unstable] + pub fn attachment_mut(&mut self) -> Option<&mut ZBytes> { + self.attachment.as_mut() } /// Sends a reply in the form of [`Sample`] to this Query. diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 01fc345c3b..eb70129e55 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -1806,10 +1806,6 @@ impl Session { let query_inner = Arc::new(QueryInner { key_expr, parameters: parameters.to_owned().into(), - value: body.map(|b| Value { - payload: b.payload.into(), - encoding: b.encoding.into(), - }), qid, zid, primitives: if local { @@ -1817,13 +1813,17 @@ impl Session { } else { primitives }, - #[cfg(feature = "unstable")] - attachment, }); for (eid, callback) in queryables { callback(Query { inner: query_inner.clone(), eid, + value: body.as_ref().map(|b| Value { + payload: b.payload.clone().into(), + encoding: b.encoding.clone().into(), + }), + #[cfg(feature = "unstable")] + attachment: attachment.clone(), }); } } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index ea084c453b..c13e64f71f 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -462,14 +462,14 @@ impl Primitives for AdminSpace { inner: Arc::new(QueryInner { key_expr: key_expr.clone(), parameters: query.parameters.into(), - value: query.ext_body.map(|b| Value::new(b.payload, b.encoding)), qid: msg.id, zid, primitives, - #[cfg(feature = "unstable")] - attachment: query.ext_attachment.map(Into::into), }), eid: self.queryable_id, + value: query.ext_body.map(|b| Value::new(b.payload, b.encoding)), + #[cfg(feature = "unstable")] + attachment: query.ext_attachment.map(Into::into), }; for (key, handler) in &self.handlers { From 2dbc20f1f1010ee0e6cdf4fe457ea0ac7c07f9f0 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Sat, 27 Apr 2024 11:57:33 +0300 Subject: [PATCH 305/598] rename payload tests to bytes tests --- zenoh/tests/{payload.rs => bytes.rs} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename zenoh/tests/{payload.rs => bytes.rs} (98%) diff --git a/zenoh/tests/payload.rs b/zenoh/tests/bytes.rs similarity index 98% rename from zenoh/tests/payload.rs rename to zenoh/tests/bytes.rs index 44daadf18c..41e6d14c6e 100644 --- a/zenoh/tests/payload.rs +++ b/zenoh/tests/bytes.rs @@ -14,7 +14,7 @@ #[test] #[cfg(all(feature = "shared-memory", feature = "unstable"))] -fn shm_payload_single_buf() { +fn shm_bytes_single_buf() { use zenoh::prelude::r#async::*; // create an SHM backend... From 6f8f6b745b062cb0b66123e36b3125d2a5a2c780 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Sat, 27 Apr 2024 12:23:22 +0300 Subject: [PATCH 306/598] - fix API exports - fix z_payload_shm example --- examples/examples/z_payload_shm.rs | 9 ++------- zenoh/src/lib.rs | 1 + 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/examples/examples/z_payload_shm.rs b/examples/examples/z_payload_shm.rs index 3b03b80502..4bf45381de 100644 --- a/examples/examples/z_payload_shm.rs +++ b/examples/examples/z_payload_shm.rs @@ -11,16 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::shm::slice::zsliceshm::{zsliceshm, ZSliceShm}; -use zenoh::shm::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; use zenoh::{ bytes::ZBytes, shm::{ - protocol_implementations::posix::{ - posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, - protocol_id::POSIX_PROTOCOL_ID, - }, - provider::shared_memory_provider::SharedMemoryProviderBuilder, + zsliceshm, zsliceshmmut, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, + ZSliceShm, ZSliceShmMut, POSIX_PROTOCOL_ID, }, }; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 3c011e2439..2a238ea875 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -371,6 +371,7 @@ pub mod shm { pub use zenoh_shm::api::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; pub use zenoh_shm::api::{ protocol_implementations::posix::{ + posix_shared_memory_client::PosixSharedMemoryClient, posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, protocol_id::POSIX_PROTOCOL_ID, }, From fcb0545a8fff945d12d80140e8f5d7e91053d4f4 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 29 Apr 2024 16:40:20 +0200 Subject: [PATCH 307/598] refactor: replace `AsyncResolve` with `IntoFuture` (#942) * refactor: replace `AsyncResolve` with `IntoFuture`, `SyncResolve` with `Wait`, and deprecate old API * fix: fix shared memory * fix: fix remaining test * fix: fix remaining test * Update examples/examples/z_get.rs Co-authored-by: Luca Cominardi * fix: put back (A)SyncResolve in prelude --------- Co-authored-by: Luca Cominardi --- Cargo.lock | 2 +- .../src/pub_sub/bin/z_pub_sub.rs | 15 +- .../src/queryable_get/bin/z_queryable_get.rs | 9 +- commons/zenoh-core/src/lib.rs | 107 +++++++-- commons/zenoh-core/src/macros.rs | 4 +- commons/zenoh-task/src/lib.rs | 6 +- examples/examples/z_alloc_shm.rs | 8 +- examples/examples/z_delete.rs | 8 +- examples/examples/z_forward.rs | 8 +- examples/examples/z_get.rs | 11 +- examples/examples/z_get_liveliness.rs | 5 +- examples/examples/z_info.rs | 10 +- examples/examples/z_liveliness.rs | 15 +- examples/examples/z_ping.rs | 12 +- examples/examples/z_ping_shm.rs | 12 +- examples/examples/z_pong.rs | 10 +- examples/examples/z_pub.rs | 13 +- examples/examples/z_pub_shm.rs | 8 +- examples/examples/z_pub_shm_thr.rs | 15 +- examples/examples/z_pub_thr.rs | 8 +- examples/examples/z_pull.rs | 5 +- examples/examples/z_put.rs | 6 +- examples/examples/z_put_float.rs | 8 +- examples/examples/z_queryable.rs | 10 +- examples/examples/z_scout.rs | 3 +- examples/examples/z_storage.rs | 9 +- examples/examples/z_sub.rs | 6 +- examples/examples/z_sub_liveliness.rs | 5 +- examples/examples/z_sub_shm.rs | 6 +- examples/examples/z_sub_thr.rs | 6 +- .../zenoh-link-unixpipe/src/unix/unicast.rs | 4 +- plugins/zenoh-backend-traits/src/lib.rs | 2 +- plugins/zenoh-plugin-example/src/lib.rs | 9 +- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 11 +- plugins/zenoh-plugin-rest/src/lib.rs | 22 +- .../zenoh-plugin-storage-manager/src/lib.rs | 5 +- .../src/replica/align_queryable.rs | 9 +- .../src/replica/aligner.rs | 3 +- .../src/replica/mod.rs | 12 +- .../src/replica/storage.rs | 7 +- .../tests/operations.rs | 16 +- .../tests/wildcard.rs | 16 +- rust-toolchain.toml | 2 +- zenoh-ext/examples/examples/z_member.rs | 4 +- zenoh-ext/examples/examples/z_pub_cache.rs | 8 +- zenoh-ext/examples/examples/z_query_sub.rs | 6 +- zenoh-ext/examples/examples/z_view_size.rs | 3 +- zenoh-ext/src/group.rs | 14 +- zenoh-ext/src/publication_cache.rs | 30 +-- zenoh-ext/src/querying_subscriber.rs | 84 ++++--- zenoh-ext/src/session_ext.rs | 6 +- zenoh-ext/src/subscriber_ext.rs | 36 ++- zenoh/src/api/admin.rs | 6 +- zenoh/src/api/builders/publication.rs | 84 +++---- zenoh/src/api/info.rs | 87 +++---- zenoh/src/api/key_expr.rs | 24 +- zenoh/src/api/liveliness.rs | 143 ++++++------ zenoh/src/api/publication.rs | 221 +++++++++--------- zenoh/src/api/query.rs | 36 ++- zenoh/src/api/queryable.rs | 128 +++++----- zenoh/src/api/scouting.rs | 43 ++-- zenoh/src/api/session.rs | 164 ++++++------- zenoh/src/api/subscriber.rs | 85 ++++--- zenoh/src/lib.rs | 23 +- zenoh/src/net/runtime/adminspace.rs | 20 +- zenoh/src/prelude.rs | 18 +- zenoh/tests/acl.rs | 141 ++++++----- zenoh/tests/attachments.rs | 22 +- zenoh/tests/connection_retry.rs | 6 +- zenoh/tests/events.rs | 45 ++-- zenoh/tests/handler.rs | 16 +- zenoh/tests/interceptors.rs | 36 +-- zenoh/tests/liveliness.rs | 25 +- zenoh/tests/matching.rs | 99 ++++---- zenoh/tests/payload.rs | 2 +- zenoh/tests/qos.rs | 18 +- zenoh/tests/routing.rs | 19 +- zenoh/tests/session.rs | 112 +++++---- zenoh/tests/shm.rs | 20 +- zenoh/tests/unicity.rs | 111 ++++----- zenohd/src/main.rs | 3 +- 81 files changed, 1148 insertions(+), 1268 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 55de3d50f9..db32920bdb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4436,7 +4436,7 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 0.1.10", "static_assertions", ] diff --git a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs index d44215cac5..2091f833a1 100644 --- a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs +++ b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs @@ -13,7 +13,7 @@ // use std::time::Duration; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; #[tokio::main] async fn main() { @@ -23,15 +23,11 @@ async fn main() { let sub_key_expr = KeyExpr::try_from("test/valgrind/**").unwrap(); println!("Declaring Publisher on '{pub_key_expr}'..."); - let pub_session = zenoh::open(Config::default()).res().await.unwrap(); - let publisher = pub_session - .declare_publisher(&pub_key_expr) - .res() - .await - .unwrap(); + let pub_session = zenoh::open(Config::default()).await.unwrap(); + let publisher = pub_session.declare_publisher(&pub_key_expr).await.unwrap(); println!("Declaring Subscriber on '{sub_key_expr}'..."); - let sub_session = zenoh::open(Config::default()).res().await.unwrap(); + let sub_session = zenoh::open(Config::default()).await.unwrap(); let _subscriber = sub_session .declare_subscriber(&sub_key_expr) .callback(|sample| { @@ -45,7 +41,6 @@ async fn main() { .unwrap_or_else(|e| format!("{}", e)) ); }) - .res() .await .unwrap(); @@ -53,7 +48,7 @@ async fn main() { tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] data"); println!("Putting Data ('{}': '{}')...", &pub_key_expr, buf); - publisher.put(buf).res().await.unwrap(); + publisher.put(buf).await.unwrap(); } tokio::time::sleep(Duration::from_secs(1)).await; diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 364617eb2a..43cb038f94 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -14,7 +14,7 @@ use std::convert::TryFrom; use std::time::Duration; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; #[tokio::main] async fn main() { @@ -24,7 +24,7 @@ async fn main() { let get_selector = Selector::try_from("test/valgrind/**").unwrap(); println!("Declaring Queryable on '{queryable_key_expr}'..."); - let queryable_session = zenoh::open(Config::default()).res().await.unwrap(); + let queryable_session = zenoh::open(Config::default()).await.unwrap(); let _queryable = queryable_session .declare_queryable(queryable_key_expr.clone()) .callback(move |query| { @@ -33,18 +33,16 @@ async fn main() { zenoh_runtime::ZRuntime::Application.block_in_place(async move { query .reply(queryable_key_expr, query.value().unwrap().payload().clone()) - .res() .await .unwrap(); }); }) .complete(true) - .res() .await .unwrap(); println!("Declaring Get session for '{get_selector}'..."); - let get_session = zenoh::open(Config::default()).res().await.unwrap(); + let get_session = zenoh::open(Config::default()).await.unwrap(); for idx in 0..5 { tokio::time::sleep(Duration::from_secs(1)).await; @@ -53,7 +51,6 @@ async fn main() { .get(&get_selector) .value(idx) .target(QueryTarget::All) - .res() .await .unwrap(); while let Ok(reply) = replies.recv_async().await { diff --git a/commons/zenoh-core/src/lib.rs b/commons/zenoh-core/src/lib.rs index e15ff1d3bf..19cf3751ff 100644 --- a/commons/zenoh-core/src/lib.rs +++ b/commons/zenoh-core/src/lib.rs @@ -20,7 +20,7 @@ pub use lazy_static::lazy_static; pub mod macros; -use std::future::{Future, Ready}; +use std::future::{Future, IntoFuture, Ready}; // Re-exports after moving ZError/ZResult to zenoh-result pub use zenoh_result::{bail, to_zerror, zerror}; @@ -30,12 +30,34 @@ pub mod zresult { pub use zresult::Error; pub use zresult::ZResult as Result; +/// A resolvable execution, either sync or async pub trait Resolvable { type To: Sized + Send; } +/// Trick used to mark `::IntoFuture` bound as Send +#[doc(hidden)] +pub trait IntoSendFuture: Resolvable { + type IntoFuture: Future + Send; +} + +impl IntoSendFuture for T +where + T: Resolvable + IntoFuture, + T::IntoFuture: Send, +{ + type IntoFuture = T::IntoFuture; +} + +/// Synchronous execution of a resolvable +pub trait Wait: Resolvable { + /// Synchronously execute and wait + fn wait(self) -> Self::To; +} + +#[deprecated = "use `.await` directly instead"] pub trait AsyncResolve: Resolvable { - type Future: Future::To> + Send; + type Future: Future + Send; fn res_async(self) -> Self::Future; @@ -47,10 +69,24 @@ pub trait AsyncResolve: Resolvable { } } +#[allow(deprecated)] +impl AsyncResolve for T +where + T: Resolvable + IntoFuture, + T::IntoFuture: Send, +{ + type Future = T::IntoFuture; + + fn res_async(self) -> Self::Future { + self.into_future() + } +} + +#[deprecated = "use `.wait()` instead`"] pub trait SyncResolve: Resolvable { - fn res_sync(self) -> ::To; + fn res_sync(self) -> Self::To; - fn res(self) -> ::To + fn res(self) -> Self::To where Self: Sized, { @@ -58,23 +94,42 @@ pub trait SyncResolve: Resolvable { } } +#[allow(deprecated)] +impl SyncResolve for T +where + T: Wait, +{ + fn res_sync(self) -> Self::To { + self.wait() + } +} + /// Zenoh's trait for resolving builder patterns. /// -/// Builder patterns in Zenoh can be resolved with [`AsyncResolve`] in async context and [`SyncResolve`] in sync context. -/// In both async and sync context calling `.res()` resolves the builder. -/// `.res()` maps to `.res_async()` in async context. -/// `.res()` maps to `.res_sync()` in sync context. -/// We advise to prefer the usage of [`AsyncResolve`] and to use [`SyncResolve`] with caution. -#[must_use = "Resolvables do nothing unless you resolve them using `.res()`."] -pub trait Resolve: Resolvable + SyncResolve + AsyncResolve + Send {} +/// Builder patterns in Zenoh can be resolved by awaiting them, in async context, +/// and [`Wait::wait`] in sync context. +/// We advise to prefer the usage of asynchronous execution, and to use synchronous one with caution +#[must_use = "Resolvables do nothing unless you resolve them using `.await` or synchronous `.wait()` method"] +pub trait Resolve: + Resolvable + + Wait + + IntoSendFuture + + IntoFuture::IntoFuture, Output = Output> + + Send +{ +} impl Resolve for T where - T: Resolvable + SyncResolve + AsyncResolve + Send + T: Resolvable + + Wait + + IntoSendFuture + + IntoFuture::IntoFuture, Output = Output> + + Send { } // Closure to wait -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[must_use = "Resolvables do nothing unless you resolve them using `.await` or synchronous `.wait()` method"] pub struct ResolveClosure(C) where To: Sized + Send, @@ -98,30 +153,31 @@ where type To = To; } -impl AsyncResolve for ResolveClosure +impl IntoFuture for ResolveClosure where To: Sized + Send, C: FnOnce() -> To + Send, { - type Future = Ready<::To>; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } -impl SyncResolve for ResolveClosure +impl Wait for ResolveClosure where To: Sized + Send, C: FnOnce() -> To + Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { self.0() } } // Future to wait -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[must_use = "Resolvables do nothing unless you resolve them using `.await` or synchronous `.wait()` method"] pub struct ResolveFuture(F) where To: Sized + Send, @@ -145,24 +201,25 @@ where type To = To; } -impl AsyncResolve for ResolveFuture +impl IntoFuture for ResolveFuture where To: Sized + Send, F: Future + Send, { - type Future = F; + type Output = To; + type IntoFuture = F; - fn res_async(self) -> Self::Future { + fn into_future(self) -> Self::IntoFuture { self.0 } } -impl SyncResolve for ResolveFuture +impl Wait for ResolveFuture where To: Sized + Send, F: Future + Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { zenoh_runtime::ZRuntime::Application.block_in_place(self.0) } } diff --git a/commons/zenoh-core/src/macros.rs b/commons/zenoh-core/src/macros.rs index d8f2f1fdc3..f20f22f41a 100644 --- a/commons/zenoh-core/src/macros.rs +++ b/commons/zenoh-core/src/macros.rs @@ -233,6 +233,8 @@ macro_rules! zcondfeat { #[macro_export] macro_rules! ztimeout { ($f:expr) => { - tokio::time::timeout(TIMEOUT, $f).await.unwrap() + tokio::time::timeout(TIMEOUT, ::core::future::IntoFuture::into_future($f)) + .await + .unwrap() }; } diff --git a/commons/zenoh-task/src/lib.rs b/commons/zenoh-task/src/lib.rs index 5f7c3c26d2..d41eb50f34 100644 --- a/commons/zenoh-task/src/lib.rs +++ b/commons/zenoh-task/src/lib.rs @@ -24,7 +24,7 @@ use std::time::Duration; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use tokio_util::task::TaskTracker; -use zenoh_core::{ResolveFuture, SyncResolve}; +use zenoh_core::{ResolveFuture, Wait}; use zenoh_runtime::ZRuntime; #[derive(Clone)] @@ -111,7 +111,7 @@ impl TaskController { /// The call blocks until all tasks yield or timeout duration expires. /// Returns 0 in case of success, number of non terminated tasks otherwise. pub fn terminate_all(&self, timeout: Duration) -> usize { - ResolveFuture::new(async move { self.terminate_all_async(timeout).await }).res_sync() + ResolveFuture::new(async move { self.terminate_all_async(timeout).await }).wait() } /// Async version of [`TaskController::terminate_all()`]. @@ -176,7 +176,7 @@ impl TerminatableTask { /// Attempts to terminate the task. /// Returns true if task completed / aborted within timeout duration, false otherwise. pub fn terminate(self, timeout: Duration) -> bool { - ResolveFuture::new(async move { self.terminate_async(timeout).await }).res_sync() + ResolveFuture::new(async move { self.terminate_async(timeout).await }).wait() } /// Async version of [`TerminatableTask::terminate()`]. diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index 34e1c07058..acff39379c 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; #[tokio::main] async fn main() { @@ -120,9 +120,9 @@ async fn run() -> ZResult<()> { sbuf[0..8].fill(0); // Declare Session and Publisher (common code) - let session = zenoh::open(Config::default()).res_async().await?; - let publisher = session.declare_publisher("my/key/expr").res_async().await?; + let session = zenoh::open(Config::default()).await?; + let publisher = session.declare_publisher("my/key/expr").await?; // Publish SHM buffer - publisher.put(sbuf).res_async().await + publisher.put(sbuf).await } diff --git a/examples/examples/z_delete.rs b/examples/examples/z_delete.rs index 7ee8c75421..4fbb46367c 100644 --- a/examples/examples/z_delete.rs +++ b/examples/examples/z_delete.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -23,12 +23,12 @@ async fn main() { let (config, key_expr) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Deleting resources matching '{key_expr}'..."); - session.delete(&key_expr).res().await.unwrap(); + session.delete(&key_expr).await.unwrap(); - session.close().res().await.unwrap(); + session.close().await.unwrap(); } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index 000b0f97ff..22a6ef4229 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; use zenoh_ext::SubscriberForward; @@ -24,12 +24,12 @@ async fn main() { let (config, key_expr, forward) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Subscriber on '{key_expr}'..."); - let mut subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); + let mut subscriber = session.declare_subscriber(&key_expr).await.unwrap(); println!("Declaring Publisher on '{forward}'..."); - let publisher = session.declare_publisher(&forward).res().await.unwrap(); + let publisher = session.declare_publisher(&forward).await.unwrap(); println!("Forwarding data from '{key_expr}' to '{forward}'..."); subscriber.forward(publisher).await.unwrap(); } diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 56693d9fa1..6b6326ebcf 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -24,19 +24,18 @@ async fn main() { let (config, selector, value, target, timeout) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Sending Query '{selector}'..."); let replies = session .get(&selector) - // // By default get receives replies from a FIFO. - // // Uncomment this line to use a ring channel instead. + // // By default get receives replies from a FIFO. + // // Uncomment this line to use a ring channel instead. // // More information on the ring channel are available in the z_pull example. - .with(zenoh::handlers::RingChannel::default()) + // .with(zenoh::handlers::RingChannel::default()) .value(value) .target(target) .timeout(timeout) - .res() .await .unwrap(); while let Ok(reply) = replies.recv_async().await { diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index bd8e62a78c..43747697b6 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -24,14 +24,13 @@ async fn main() { let (config, key_expr, timeout) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Sending Liveliness Query '{key_expr}'..."); let replies = session .liveliness() .get(&key_expr) .timeout(timeout) - .res() .await .unwrap(); while let Ok(reply) = replies.recv_async().await { diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index adde62f808..db28970897 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -23,17 +23,17 @@ async fn main() { let config = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); let info = session.info(); - println!("zid: {}", info.zid().res().await); + println!("zid: {}", info.zid().await); println!( "routers zid: {:?}", - info.routers_zid().res().await.collect::>() + info.routers_zid().await.collect::>() ); println!( "peers zid: {:?}", - info.peers_zid().res().await.collect::>() + info.peers_zid().await.collect::>() ); } diff --git a/examples/examples/z_liveliness.rs b/examples/examples/z_liveliness.rs index 1c78d3ad24..cee7a29376 100644 --- a/examples/examples/z_liveliness.rs +++ b/examples/examples/z_liveliness.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -23,17 +23,10 @@ async fn main() { let (config, key_expr) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring LivelinessToken on '{}'...", &key_expr); - let mut token = Some( - session - .liveliness() - .declare_token(&key_expr) - .res() - .await - .unwrap(), - ); + let mut token = Some(session.liveliness().declare_token(&key_expr).await.unwrap()); println!("Press CTRL-C to undeclare LivelinessToken and quit..."); std::thread::park(); @@ -41,7 +34,7 @@ async fn main() { // Use the code below to manually undeclare it if needed if let Some(token) = token.take() { println!("Undeclaring LivelinessToken..."); - token.undeclare().res().await.unwrap(); + token.undeclare().await.unwrap(); }; } diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index af1e9c977d..81181f1a81 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::{Duration, Instant}; -use zenoh::prelude::sync::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; fn main() { @@ -21,7 +21,7 @@ fn main() { zenoh_util::try_init_log_from_env(); let (config, warmup, size, n, express) = parse_args(); - let session = zenoh::open(config).res().unwrap(); + let session = zenoh::open(config).wait().unwrap(); // The key expression to publish data on let key_expr_ping = keyexpr::new("test/ping").unwrap(); @@ -29,12 +29,12 @@ fn main() { // The key expression to wait the response back let key_expr_pong = keyexpr::new("test/pong").unwrap(); - let sub = session.declare_subscriber(key_expr_pong).res().unwrap(); + let sub = session.declare_subscriber(key_expr_pong).wait().unwrap(); let publisher = session .declare_publisher(key_expr_ping) .congestion_control(CongestionControl::Block) .express(express) - .res() + .wait() .unwrap(); let data: ZBytes = (0usize..size) @@ -49,7 +49,7 @@ fn main() { let now = Instant::now(); while now.elapsed() < warmup { let data = data.clone(); - publisher.put(data).res().unwrap(); + publisher.put(data).wait().unwrap(); let _ = sub.recv(); } @@ -57,7 +57,7 @@ fn main() { for _ in 0..n { let data = data.clone(); let write_time = Instant::now(); - publisher.put(data).res().unwrap(); + publisher.put(data).wait().unwrap(); let _ = sub.recv(); let ts = write_time.elapsed().as_micros(); diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 98d9bae825..7a7bd61580 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::{Duration, Instant}; -use zenoh::prelude::sync::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; fn main() { @@ -27,7 +27,7 @@ fn main() { // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. config.transport.shared_memory.set_enabled(true).unwrap(); - let session = zenoh::open(config).res().unwrap(); + let session = zenoh::open(config).wait().unwrap(); // The key expression to publish data on let key_expr_ping = keyexpr::new("test/ping").unwrap(); @@ -35,11 +35,11 @@ fn main() { // The key expression to wait the response back let key_expr_pong = keyexpr::new("test/pong").unwrap(); - let sub = session.declare_subscriber(key_expr_pong).res().unwrap(); + let sub = session.declare_subscriber(key_expr_pong).wait().unwrap(); let publisher = session .declare_publisher(key_expr_ping) .congestion_control(CongestionControl::Block) - .res() + .wait() .unwrap(); let mut samples = Vec::with_capacity(n); @@ -87,14 +87,14 @@ fn main() { println!("Warming up for {warmup:?}..."); let now = Instant::now(); while now.elapsed() < warmup { - publisher.put(buf.clone()).res().unwrap(); + publisher.put(buf.clone()).wait().unwrap(); let _ = sub.recv().unwrap(); } for _ in 0..n { let buf = buf.clone(); let write_time = Instant::now(); - publisher.put(buf).res().unwrap(); + publisher.put(buf).wait().unwrap(); let _ = sub.recv(); let ts = write_time.elapsed().as_micros(); diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index b2fc075c10..7d7b60b6e9 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::sync::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; fn main() { @@ -26,7 +26,7 @@ fn main() { // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. config.transport.shared_memory.set_enabled(true).unwrap(); - let session = zenoh::open(config).res().unwrap().into_arc(); + let session = zenoh::open(config).wait().unwrap().into_arc(); // The key expression to read the data from let key_expr_ping = keyexpr::new("test/ping").unwrap(); @@ -38,13 +38,13 @@ fn main() { .declare_publisher(key_expr_pong) .congestion_control(CongestionControl::Block) .express(express) - .res() + .wait() .unwrap(); let _sub = session .declare_subscriber(key_expr_ping) - .callback(move |sample| publisher.put(sample.payload().clone()).res().unwrap()) - .res() + .callback(move |sample| publisher.put(sample.payload().clone()).wait().unwrap()) + .wait() .unwrap(); std::thread::park(); } diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 79de0e61d4..7c2c9f2c65 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -24,22 +24,17 @@ async fn main() { let (config, key_expr, value, attachment) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Publisher on '{key_expr}'..."); - let publisher = session.declare_publisher(&key_expr).res().await.unwrap(); + let publisher = session.declare_publisher(&key_expr).await.unwrap(); println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] {value}"); println!("Putting Data ('{}': '{}')...", &key_expr, buf); - publisher - .put(buf) - .attachment(&attachment) - .res() - .await - .unwrap(); + publisher.put(buf).attachment(&attachment).await.unwrap(); } } diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 79527c3e5f..92d19b6b06 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; const N: usize = 10; @@ -31,7 +31,7 @@ async fn main() -> Result<(), ZError> { config.transport.shared_memory.set_enabled(true).unwrap(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Creating POSIX SHM backend..."); // Construct an SHM backend @@ -62,7 +62,7 @@ async fn main() -> Result<(), ZError> { .backend(backend) .res(); - let publisher = session.declare_publisher(&path).res().await.unwrap(); + let publisher = session.declare_publisher(&path).await.unwrap(); println!("Allocating Shared Memory Buffer..."); let layout = shared_memory_provider @@ -95,7 +95,7 @@ async fn main() -> Result<(), ZError> { path, String::from_utf8_lossy(&sbuf[0..slice_len]) ); - publisher.put(sbuf).res().await?; + publisher.put(sbuf).await?; } Ok(()) diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 70a0bf0548..0b94304321 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -26,7 +26,7 @@ async fn main() { // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. config.transport.shared_memory.set_enabled(true).unwrap(); - let z = zenoh::open(config).res().await.unwrap(); + let z = zenoh::open(config).await.unwrap(); // Construct an SHM backend let backend = { @@ -68,15 +68,18 @@ async fn main() { *b = rand::random::(); } - let publisher = z.declare_publisher("test/thr") - // Make sure to not drop messages because of congestion control - .congestion_control(CongestionControl::Block).res().await.unwrap(); + let publisher = z + .declare_publisher("test/thr") + // Make sure to not drop messages because of congestion control + .congestion_control(CongestionControl::Block) + .await + .unwrap(); let buf: ZSlice = buf.into(); println!("Press CTRL-C to quit..."); loop { - publisher.put(buf.clone()).res().await.unwrap(); + publisher.put(buf.clone()).await.unwrap(); } } diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 5625c1b91d..5eb4f9e96e 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -14,7 +14,7 @@ use clap::Parser; use std::convert::TryInto; -use zenoh::prelude::sync::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; fn main() { @@ -34,21 +34,21 @@ fn main() { .collect::>() .into(); - let session = zenoh::open(args.common).res().unwrap(); + let session = zenoh::open(args.common).wait().unwrap(); let publisher = session .declare_publisher("test/thr") .congestion_control(CongestionControl::Block) .priority(prio) .express(args.express) - .res() + .wait() .unwrap(); println!("Press CTRL-C to quit..."); let mut count: usize = 0; let mut start = std::time::Instant::now(); loop { - publisher.put(data.clone()).res().unwrap(); + publisher.put(data.clone()).wait().unwrap(); if args.print { if count < args.number { diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 349779e574..55f211f111 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -24,13 +24,12 @@ async fn main() { let (config, key_expr, size, interval) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Subscriber on '{key_expr}'..."); let subscriber = session .declare_subscriber(&key_expr) .with(RingChannel::new(size)) - .res() .await .unwrap(); diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index bb1274a638..5d68d205f9 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -23,10 +23,10 @@ async fn main() { let (config, key_expr, value) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Putting Data ('{key_expr}': '{value}')..."); - session.put(&key_expr, value).res().await.unwrap(); + session.put(&key_expr, value).await.unwrap(); } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] diff --git a/examples/examples/z_put_float.rs b/examples/examples/z_put_float.rs index b9c2a4e019..97e4abd69d 100644 --- a/examples/examples/z_put_float.rs +++ b/examples/examples/z_put_float.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -23,12 +23,12 @@ async fn main() { let (config, key_expr, value) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Putting Float ('{key_expr}': '{value}')..."); - session.put(&key_expr, value).res().await.unwrap(); + session.put(&key_expr, value).await.unwrap(); - session.close().res().await.unwrap(); + session.close().await.unwrap(); } #[derive(clap::Parser, Clone, PartialEq, Debug)] diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 47f70c30c3..e24b8e80cb 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -23,17 +23,16 @@ async fn main() { let (config, key_expr, value, complete) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Queryable on '{key_expr}'..."); let queryable = session .declare_queryable(&key_expr) - // // By default queryable receives queries from a FIFO. - // // Uncomment this line to use a ring channel instead. + // // By default queryable receives queries from a FIFO. + // // Uncomment this line to use a ring channel instead. // // More information on the ring channel are available in the z_pull example. // .with(zenoh::handlers::RingChannel::default()) .complete(complete) - .res() .await .unwrap(); @@ -60,7 +59,6 @@ async fn main() { ); query .reply(key_expr.clone(), value.clone()) - .res() .await .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); } diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index 5ac06f37d4..bcd65ffb0e 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; #[tokio::main] async fn main() { @@ -20,7 +20,6 @@ async fn main() { println!("Scouting..."); let receiver = scout(WhatAmI::Peer | WhatAmI::Router, Config::default()) - .res() .await .unwrap(); diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index fd4337535c..2b03e32d06 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -16,7 +16,7 @@ use clap::Parser; use futures::select; use std::collections::HashMap; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -29,16 +29,15 @@ async fn main() { let mut stored: HashMap = HashMap::new(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Subscriber on '{key_expr}'..."); - let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); + let subscriber = session.declare_subscriber(&key_expr).await.unwrap(); println!("Declaring Queryable on '{key_expr}'..."); let queryable = session .declare_queryable(&key_expr) .complete(complete) - .res() .await .unwrap(); @@ -60,7 +59,7 @@ async fn main() { println!(">> [Queryable ] Received Query '{}'", query.selector()); for (stored_name, sample) in stored.iter() { if query.selector().key_expr().intersects(unsafe {keyexpr::from_str_unchecked(stored_name)}) { - query.reply(sample.key_expr().clone(), sample.payload().clone()).res().await.unwrap(); + query.reply(sample.key_expr().clone(), sample.payload().clone()).await.unwrap(); } } } diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index d2cc370306..156968eb36 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -28,10 +28,10 @@ async fn main() { config.transport.shared_memory.set_enabled(true).unwrap(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Subscriber on '{}'...", &key_expr); - let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); + let subscriber = session.declare_subscriber(&key_expr).await.unwrap(); println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { diff --git a/examples/examples/z_sub_liveliness.rs b/examples/examples/z_sub_liveliness.rs index 1df5b9422e..af2c02342d 100644 --- a/examples/examples/z_sub_liveliness.rs +++ b/examples/examples/z_sub_liveliness.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -23,14 +23,13 @@ async fn main() { let (config, key_expr) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Liveliness Subscriber on '{}'...", &key_expr); let subscriber = session .liveliness() .declare_subscriber(&key_expr) - .res() .await .unwrap(); diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 282fd8c776..5f5c77633f 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -13,7 +13,7 @@ // use clap::Parser; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -29,10 +29,10 @@ async fn main() { config.transport.shared_memory.set_enabled(true).unwrap(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Subscriber on '{}'...", &key_expr); - let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); + let subscriber = session.declare_subscriber(&key_expr).await.unwrap(); println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index 88105ca8aa..6913a7bf08 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::Instant; -use zenoh::prelude::sync::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; struct Stats { @@ -77,7 +77,7 @@ fn main() { // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. config.transport.shared_memory.set_enabled(true).unwrap(); - let session = zenoh::open(config).res().unwrap(); + let session = zenoh::open(config).wait().unwrap(); let key_expr = "test/thr"; @@ -90,7 +90,7 @@ fn main() { std::process::exit(0) } }) - .res() + .wait() .unwrap(); println!("Press CTRL-C to quit..."); diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index 12543b31a1..ea90630523 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -32,7 +32,7 @@ use tokio::io::unix::AsyncFd; use tokio::io::Interest; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; -use zenoh_core::{zasyncread, zasyncwrite, ResolveFuture, SyncResolve}; +use zenoh_core::{zasyncread, zasyncwrite, ResolveFuture, Wait}; use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_protocol::transport::BatchSize; use zenoh_runtime::ZRuntime; @@ -331,7 +331,7 @@ impl UnicastPipeListener { fn stop_listening(self) { self.token.cancel(); - let _ = ResolveFuture::new(self.handle).res_sync(); + let _ = ResolveFuture::new(self.handle).wait(); } } diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 5db79b57bd..761f653064 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -29,7 +29,7 @@ //! ``` //! use std::sync::Arc; //! use async_trait::async_trait; -//! use zenoh::prelude::r#async::*; +//! use zenoh::prelude::*; //! use zenoh_backend_traits::*; //! use zenoh_backend_traits::config::*; //! diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index f1deae363d..3c84e039a8 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -28,7 +28,6 @@ use zenoh::runtime::Runtime; use zenoh::sample::Sample; use zenoh::session::SessionDeclarations; use zenoh_core::zlock; -use zenoh_core::AsyncResolve; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, ZResult}; @@ -147,7 +146,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { zenoh_util::try_init_log_from_env(); // create a zenoh Session that shares the same Runtime than zenohd - let session = zenoh::session::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).await.unwrap(); // the HasMap used as a storage by this example of storage plugin let mut stored: HashMap = HashMap::new(); @@ -156,11 +155,11 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { // This storage plugin subscribes to the selector and will store in HashMap the received samples debug!("Create Subscriber on {}", selector); - let sub = session.declare_subscriber(&selector).res().await.unwrap(); + let sub = session.declare_subscriber(&selector).await.unwrap(); // This storage plugin declares a Queryable that will reply to queries with the samples stored in the HashMap debug!("Create Queryable on {}", selector); - let queryable = session.declare_queryable(&selector).res().await.unwrap(); + let queryable = session.declare_queryable(&selector).await.unwrap(); // Plugin's event loop, while the flag is true while flag.load(Relaxed) { @@ -178,7 +177,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { info!("Handling query '{}'", query.selector()); for (key_expr, sample) in stored.iter() { if query.selector().key_expr().intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { - query.reply_sample(sample.clone()).res().await.unwrap(); + query.reply_sample(sample.clone()).await.unwrap(); } } } diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 5e5485d0d2..59562391ea 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -14,7 +14,7 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::config::Config; -use zenoh::core::{try_init_log_from_env, AsyncResolve}; +use zenoh::core::try_init_log_from_env; use zenoh::key_expr::keyexpr; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; @@ -43,16 +43,16 @@ async fn main() { let value = "Pub from sse server!"; println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Queryable on '{key}'..."); - let queryable = session.declare_queryable(key).res().await.unwrap(); + let queryable = session.declare_queryable(key).await.unwrap(); async_std::task::spawn({ let receiver = queryable.handler().clone(); async move { while let Ok(request) = receiver.recv_async().await { - request.reply(key, HTML).res().await.unwrap(); + request.reply(key, HTML).await.unwrap(); } } }); @@ -63,7 +63,6 @@ async fn main() { let publisher = session .declare_publisher(&event_key) .congestion_control(CongestionControl::Block) - .res() .await .unwrap(); @@ -74,7 +73,7 @@ async fn main() { println!("Data updates are accessible through HTML5 SSE at http://:8000/{key}"); loop { - publisher.put(value).res().await.unwrap(); + publisher.put(value).await.unwrap(); async_std::task::sleep(Duration::from_secs(1)).await; } } diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 7fe591e3f7..c712a1add6 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -30,7 +30,7 @@ use tide::http::Mime; use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; use zenoh::bytes::{StringOrBase64, ZBytes}; -use zenoh::core::{try_init_log_from_env, AsyncResolve}; +use zenoh::core::try_init_log_from_env; use zenoh::encoding::Encoding; use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; @@ -350,13 +350,7 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result, String)>) -> tide::Result, String)>) -> tide::Result, String)>) -> tide::Result { if raw { Ok(to_raw_response(receiver).await) @@ -470,8 +464,8 @@ async fn write(mut req: Request<(Arc, String)>) -> tide::Result session.put(&key_expr, bytes).encoding(encoding).res().await, - SampleKind::Delete => session.delete(&key_expr).res().await, + SampleKind::Put => session.put(&key_expr, bytes).encoding(encoding).await, + SampleKind::Delete => session.delete(&key_expr).await, }; match res { Ok(_) => Ok(Response::new(StatusCode::Ok)), @@ -497,7 +491,7 @@ pub async fn run(runtime: Runtime, conf: Config) -> ZResult<()> { try_init_log_from_env(); let zid = runtime.zid().to_string(); - let session = zenoh::session::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).await.unwrap(); let mut app = Server::with_state((Arc::new(session), zid)); app.with( diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index e5ca51c5ef..8818d44688 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -29,7 +29,6 @@ use std::sync::Mutex; use storages_mgt::StorageMessage; use zenoh::core::try_init_log_from_env; use zenoh::core::Result as ZResult; -use zenoh::core::SyncResolve; use zenoh::internal::zlock; use zenoh::internal::LibLoader; use zenoh::key_expr::keyexpr; @@ -51,6 +50,8 @@ use zenoh_plugin_trait::PluginStatusRec; mod backends_mgt; use backends_mgt::*; +use zenoh::prelude::Wait; + mod memory_backend; mod replica; mod storages_mgt; @@ -117,7 +118,7 @@ impl StorageRuntimeInner { let plugins_manager = PluginsManager::dynamic(lib_loader.clone(), BACKEND_LIB_PREFIX) .declare_static_plugin::(true); - let session = Arc::new(zenoh::session::init(runtime.clone()).res_sync()?); + let session = Arc::new(zenoh::session::init(runtime.clone()).wait()?); // After this moment result should be only Ok. Failure of loading of one voulme or storage should not affect others. diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index c24639b6ca..694e259a18 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -18,7 +18,7 @@ use std::cmp::Ordering; use std::collections::{BTreeSet, HashMap, HashSet}; use std::str; use std::str::FromStr; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; pub struct AlignQueryable { session: Arc, @@ -68,7 +68,6 @@ impl AlignQueryable { .session .declare_queryable(&self.digest_key) .complete(true) // This queryable is meant to have all the history - .res() .await .unwrap(); @@ -97,7 +96,6 @@ impl AlignQueryable { query.key_expr().clone(), serde_json::to_string(&(i, c)).unwrap(), ) - .res() .await .unwrap(); } @@ -107,7 +105,6 @@ impl AlignQueryable { query.key_expr().clone(), serde_json::to_string(&(i, c)).unwrap(), ) - .res() .await .unwrap(); } @@ -117,7 +114,6 @@ impl AlignQueryable { query.key_expr().clone(), serde_json::to_string(&(i, c)).unwrap(), ) - .res() .await .unwrap(); } @@ -126,7 +122,6 @@ impl AlignQueryable { .reply(k, v.payload().clone()) .encoding(v.encoding().clone()) .timestamp(ts) - .res() .await .unwrap(); } @@ -226,7 +221,7 @@ impl AlignQueryable { impl AlignQueryable { async fn get_entry(&self, logentry: &LogEntry) -> Option { // get corresponding key from log - let replies = self.session.get(&logentry.key).res().await.unwrap(); + let replies = self.session.get(&logentry.key).await.unwrap(); if let Ok(reply) = replies.recv_async().await { match reply.into_result() { Ok(sample) => { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 3a9fd00558..46ccdc2935 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -18,7 +18,7 @@ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; pub struct Aligner { session: Arc, @@ -322,7 +322,6 @@ impl Aligner { .get(&selector) .consolidation(zenoh::query::ConsolidationMode::None) .accept_replies(zenoh::query::ReplyKeyExpr::Any) - .res() .await { Ok(replies) => { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index b951b23336..c9d9e03bcf 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -26,7 +26,7 @@ use std::str; use std::str::FromStr; use std::time::{Duration, SystemTime}; use urlencoding::encode; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_backend_traits::config::{ReplicaConfig, StorageConfig}; pub mod align_queryable; @@ -206,7 +206,6 @@ impl Replica { .session .declare_subscriber(&digest_key) .allowed_origin(Locality::Remote) - .res() .await .unwrap(); loop { @@ -265,12 +264,7 @@ impl Replica { .unwrap(); tracing::debug!("[DIGEST_PUB] Declaring Publisher on '{}'...", digest_key); - let publisher = self - .session - .declare_publisher(digest_key) - .res() - .await - .unwrap(); + let publisher = self.session.declare_publisher(digest_key).await.unwrap(); // Ensure digest gets published every interval, accounting for // time it takes to publish. @@ -287,7 +281,7 @@ impl Replica { drop(digest); tracing::trace!("[DIGEST_PUB] Putting Digest: {} ...", digest_json); - match publisher.put(digest_json).res().await { + match publisher.put(digest_json).await { Ok(()) => {} Err(e) => tracing::error!("[DIGEST_PUB] Digest publication failed: {}", e), } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index ba078c0012..476893539e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -23,7 +23,6 @@ use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::SplitBuffer; use zenoh::buffers::ZBuf; -use zenoh::core::AsyncResolve; use zenoh::internal::bail; use zenoh::internal::{zenoh_home, Timed, TimedEvent, Timer}; use zenoh::key_expr::keyexpr_tree::KeyedSetProvider; @@ -144,7 +143,7 @@ impl StorageService { t.add_async(gc).await; // subscribe on key_expr - let storage_sub = match self.session.declare_subscriber(&self.key_expr).res().await { + let storage_sub = match self.session.declare_subscriber(&self.key_expr).await { Ok(storage_sub) => storage_sub, Err(e) => { tracing::error!("Error starting storage '{}': {}", self.name, e); @@ -157,7 +156,6 @@ impl StorageService { .session .declare_queryable(&self.key_expr) .complete(self.complete) - .res() .await { Ok(storage_queryable) => storage_queryable, @@ -522,7 +520,6 @@ impl StorageService { .reply(key.clone(), entry.value.payload().clone()) .encoding(entry.value.encoding().clone()) .timestamp(entry.timestamp) - .res() .await { tracing::warn!( @@ -556,7 +553,6 @@ impl StorageService { .reply(q.key_expr().clone(), entry.value.payload().clone()) .encoding(entry.value.encoding().clone()) .timestamp(entry.timestamp) - .res() .await { tracing::warn!( @@ -644,7 +640,6 @@ impl StorageService { .get(Selector::new(&self.key_expr, "_time=[..]")) .target(QueryTarget::All) .consolidation(ConsolidationMode::None) - .res() .await { Ok(replies) => replies, diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 0678431b7e..b5384e13be 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -21,29 +21,23 @@ use std::thread::sleep; use async_std::task; use zenoh::internal::zasync_executor_init; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_plugin_trait::Plugin; async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { println!("Putting Data ('{key_expr}': '{value}')..."); // @TODO: how to add timestamp metadata with put, not manipulating sample... - session.put(key_expr, value).res().await.unwrap(); + session.put(key_expr, value).await.unwrap(); } async fn delete_data(session: &Session, key_expr: &str, _timestamp: Timestamp) { println!("Deleting Data '{key_expr}'..."); // @TODO: how to add timestamp metadata with delete, not manipulating sample... - session.delete(key_expr).res().await.unwrap(); + session.delete(key_expr).await.unwrap(); } async fn get_data(session: &Session, key_expr: &str) -> Vec { - let replies: Vec = session - .get(key_expr) - .res() - .await - .unwrap() - .into_iter() - .collect(); + let replies: Vec = session.get(key_expr).await.unwrap().into_iter().collect(); println!("Getting replies on '{key_expr}': '{replies:?}'..."); let mut samples = Vec::new(); for reply in replies { @@ -80,7 +74,7 @@ async fn test_updates_in_order() { let storage = zenoh_plugin_storage_manager::StoragesPlugin::start("storage-manager", &runtime).unwrap(); - let session = zenoh::session::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).await.unwrap(); sleep(std::time::Duration::from_secs(1)); diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 72fa62f3ca..bd38e834d7 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -22,29 +22,23 @@ use std::thread::sleep; // use std::collections::HashMap; use async_std::task; use zenoh::internal::zasync_executor_init; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_plugin_trait::Plugin; async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { println!("Putting Data ('{key_expr}': '{value}')..."); // @TODO: how to add timestamp metadata with put, not manipulating sample... - session.put(key_expr, value).res().await.unwrap(); + session.put(key_expr, value).await.unwrap(); } async fn delete_data(session: &Session, key_expr: &str, _timestamp: Timestamp) { println!("Deleting Data '{key_expr}'..."); // @TODO: how to add timestamp metadata with delete, not manipulating sample... - session.delete(key_expr).res().await.unwrap(); + session.delete(key_expr).await.unwrap(); } async fn get_data(session: &Session, key_expr: &str) -> Vec { - let replies: Vec = session - .get(key_expr) - .res() - .await - .unwrap() - .into_iter() - .collect(); + let replies: Vec = session.get(key_expr).await.unwrap().into_iter().collect(); println!("Getting replies on '{key_expr}': '{replies:?}'..."); let mut samples = Vec::new(); for reply in replies { @@ -81,7 +75,7 @@ async fn test_wild_card_in_order() { let storage = zenoh_plugin_storage_manager::StoragesPlugin::start("storage-manager", &runtime).unwrap(); - let session = zenoh::session::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).await.unwrap(); sleep(std::time::Duration::from_secs(1)); // put *, ts: 1 diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 743f7cd993..b7eadd649b 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.72.0" +channel = "1.72.0" \ No newline at end of file diff --git a/zenoh-ext/examples/examples/z_member.rs b/zenoh-ext/examples/examples/z_member.rs index 2dc1a242c9..35513b1b56 100644 --- a/zenoh-ext/examples/examples/z_member.rs +++ b/zenoh-ext/examples/examples/z_member.rs @@ -14,13 +14,13 @@ use futures::StreamExt; use std::sync::Arc; use std::time::Duration; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_ext::group::*; #[tokio::main] async fn main() { zenoh_util::try_init_log_from_env(); - let z = Arc::new(zenoh::open(Config::default()).res().await.unwrap()); + let z = Arc::new(zenoh::open(Config::default()).await.unwrap()); let member = Member::new(z.zid().to_string()) .unwrap() .lease(Duration::from_secs(3)); diff --git a/zenoh-ext/examples/examples/z_pub_cache.rs b/zenoh-ext/examples/examples/z_pub_cache.rs index 58eb7962c9..09c888cb0b 100644 --- a/zenoh-ext/examples/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/examples/z_pub_cache.rs @@ -14,7 +14,7 @@ use clap::{arg, Parser}; use std::time::Duration; use zenoh::config::{Config, ModeDependentValue}; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_ext::*; use zenoh_ext_examples::CommonArgs; @@ -26,7 +26,7 @@ async fn main() { let (config, key_expr, value, history, prefix, complete) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring PublicationCache on {}", &key_expr); let mut publication_cache_builder = session @@ -36,14 +36,14 @@ async fn main() { if let Some(prefix) = prefix { publication_cache_builder = publication_cache_builder.queryable_prefix(prefix); } - let _publication_cache = publication_cache_builder.res().await.unwrap(); + let _publication_cache = publication_cache_builder.await.unwrap(); println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] {value}"); println!("Put Data ('{}': '{}')", &key_expr, buf); - session.put(&key_expr, buf).res().await.unwrap(); + session.put(&key_expr, buf).await.unwrap(); } } diff --git a/zenoh-ext/examples/examples/z_query_sub.rs b/zenoh-ext/examples/examples/z_query_sub.rs index b34a5771a7..a735ecec66 100644 --- a/zenoh-ext/examples/examples/z_query_sub.rs +++ b/zenoh-ext/examples/examples/z_query_sub.rs @@ -14,7 +14,7 @@ use clap::arg; use clap::Parser; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_ext::*; use zenoh_ext_examples::CommonArgs; @@ -26,7 +26,7 @@ async fn main() { let (config, key_expr, query) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!( "Declaring QueryingSubscriber on {} with an initial query on {}", @@ -39,14 +39,12 @@ async fn main() { .querying() .query_selector(&selector) .query_accept_replies(ReplyKeyExpr::Any) - .res() .await .unwrap() } else { session .declare_subscriber(key_expr) .querying() - .res() .await .unwrap() }; diff --git a/zenoh-ext/examples/examples/z_view_size.rs b/zenoh-ext/examples/examples/z_view_size.rs index 66e79cd301..52e78790bb 100644 --- a/zenoh-ext/examples/examples/z_view_size.rs +++ b/zenoh-ext/examples/examples/z_view_size.rs @@ -15,7 +15,6 @@ use clap::{arg, Parser}; use std::sync::Arc; use std::time::Duration; use zenoh::config::Config; -use zenoh::prelude::r#async::*; use zenoh_ext::group::*; use zenoh_ext_examples::CommonArgs; @@ -25,7 +24,7 @@ async fn main() { let (config, group_name, id, size, timeout) = parse_args(); - let z = Arc::new(zenoh::open(config).res().await.unwrap()); + let z = Arc::new(zenoh::open(config).await.unwrap()); let member_id = id.unwrap_or_else(|| z.zid().to_string()); let member = Member::new(member_id.as_str()) .unwrap() diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 1bf37f365c..d764e5ed9c 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -25,7 +25,7 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; use zenoh::internal::{bail, Condition, TaskController}; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; const GROUP_PREFIX: &str = "zenoh/ext/net/group"; const EVENT_POSTFIX: &str = "evt"; @@ -177,7 +177,7 @@ async fn keep_alive_task(state: Arc) { loop { tokio::time::sleep(period).await; tracing::trace!("Sending Keep Alive for: {}", &state.local_member.mid); - let _ = state.group_publisher.put(buf.clone()).res().await; + let _ = state.group_publisher.put(buf.clone()).await; } } @@ -219,18 +219,17 @@ async fn query_handler(z: Arc, state: Arc) { .unwrap(); tracing::debug!("Started query handler for: {}", &qres); let buf = bincode::serialize(&state.local_member).unwrap(); - let queryable = z.declare_queryable(&qres).res().await.unwrap(); + let queryable = z.declare_queryable(&qres).await.unwrap(); while let Ok(query) = queryable.recv_async().await { tracing::trace!("Serving query for: {}", &qres); - query.reply(qres.clone(), buf.clone()).res().await.unwrap(); + query.reply(qres.clone(), buf.clone()).await.unwrap(); } } async fn net_event_handler(z: Arc, state: Arc) { let sub = z .declare_subscriber(state.group_publisher.key_expr()) - .res() .await .unwrap(); while let Ok(s) = sub.recv_async().await { @@ -288,7 +287,7 @@ async fn net_event_handler(z: Arc, state: Arc) { // @TODO: we could also send this member info let qc = ConsolidationMode::None; tracing::trace!("Issuing Query for {}", &qres); - let receiver = z.get(&qres).consolidation(qc).res().await.unwrap(); + let receiver = z.get(&qres).consolidation(qc).await.unwrap(); while let Ok(reply) = receiver.recv_async().await { match reply.result() { @@ -358,7 +357,6 @@ impl Group { let publisher = z .declare_publisher(event_expr) .priority(with.priority) - .res() .await .unwrap(); let state = Arc::new(GroupState { @@ -375,7 +373,7 @@ impl Group { tracing::debug!("Sending Join Message for local member: {:?}", &with); let join_evt = GroupNetEvent::Join(JoinEvent { member: with }); let buf = bincode::serialize(&join_evt).unwrap(); - let _ = state.group_publisher.put(buf).res().await; + let _ = state.group_publisher.put(buf).await; let task_controller = TaskController::default(); // If the liveliness is manual it is the user who has to assert it. diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 7080b44ac4..11fb8fb72a 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -13,12 +13,13 @@ // use std::collections::{HashMap, VecDeque}; use std::convert::TryInto; -use std::future::Ready; +use std::future::{IntoFuture, Ready}; use std::time::Duration; use zenoh::core::Error; -use zenoh::core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh::core::{Resolvable, Resolve}; use zenoh::internal::{ResolveFuture, TerminatableTask}; use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; +use zenoh::prelude::Wait; use zenoh::queryable::{Query, Queryable}; use zenoh::runtime::ZRuntime; use zenoh::sample::{Locality, Sample}; @@ -96,17 +97,18 @@ impl<'a> Resolvable for PublicationCacheBuilder<'a, '_, '_> { type To = ZResult>; } -impl SyncResolve for PublicationCacheBuilder<'_, '_, '_> { - fn res_sync(self) -> ::To { +impl Wait for PublicationCacheBuilder<'_, '_, '_> { + fn wait(self) -> ::To { PublicationCache::new(self) } } -impl<'a> AsyncResolve for PublicationCacheBuilder<'a, '_, '_> { - type Future = Ready; +impl<'a> IntoFuture for PublicationCacheBuilder<'a, '_, '_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -149,7 +151,7 @@ impl<'a> PublicationCache<'a> { .session .declare_subscriber(&key_expr) .allowed_origin(Locality::SessionLocal) - .res_sync()?; + .wait()?; // declare the queryable which returns the cached publications let mut queryable = conf.session.declare_queryable(&queryable_key_expr); @@ -159,7 +161,7 @@ impl<'a> PublicationCache<'a> { if let Some(complete) = conf.complete { queryable = queryable.complete(complete); } - let queryable = queryable.res_sync()?; + let queryable = queryable.wait()?; // take local ownership of stuff to be moved into task let sub_recv = local_sub.handler().clone(); @@ -215,7 +217,7 @@ impl<'a> PublicationCache<'a> { continue; } } - if let Err(e) = query.reply_sample(sample.clone()).res_async().await { + if let Err(e) = query.reply_sample(sample.clone()).await { tracing::warn!("Error replying to query: {}", e); } } @@ -229,7 +231,7 @@ impl<'a> PublicationCache<'a> { continue; } } - if let Err(e) = query.reply_sample(sample.clone()).res_async().await { + if let Err(e) = query.reply_sample(sample.clone()).await { tracing::warn!("Error replying to query: {}", e); } } @@ -261,8 +263,8 @@ impl<'a> PublicationCache<'a> { local_sub, task, } = self; - _queryable.undeclare().res_async().await?; - local_sub.undeclare().res_async().await?; + _queryable.undeclare().await?; + local_sub.undeclare().await?; task.terminate(Duration::from_secs(10)); Ok(()) }) diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 35eb9afe46..6febef7395 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -13,14 +13,15 @@ // use std::collections::{btree_map, BTreeMap, VecDeque}; use std::convert::TryInto; -use std::future::Ready; +use std::future::{IntoFuture, Ready}; use std::mem::swap; use std::sync::{Arc, Mutex}; use std::time::Duration; -use zenoh::core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh::core::{Resolvable, Resolve}; use zenoh::handlers::{locked, DefaultHandler, IntoHandler}; use zenoh::internal::zlock; use zenoh::key_expr::KeyExpr; +use zenoh::prelude::Wait; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; use zenoh::sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}; use zenoh::selector::Selector; @@ -223,13 +224,13 @@ where type To = ZResult>; } -impl SyncResolve for QueryingSubscriberBuilder<'_, '_, KeySpace, Handler> +impl Wait for QueryingSubscriberBuilder<'_, '_, KeySpace, Handler> where KeySpace: Into + Clone, Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let session = self.session.clone(); let key_expr = self.key_expr?; let key_space = self.key_space.clone().into(); @@ -257,31 +258,32 @@ where .consolidation(query_consolidation) .accept_replies(query_accept_replies) .timeout(query_timeout) - .res_sync(), + .wait(), crate::KeySpace::Liveliness => session .liveliness() .get(key_expr) .callback(cb) .timeout(query_timeout) - .res_sync(), + .wait(), }, handler: self.handler, phantom: std::marker::PhantomData, } - .res_sync() + .wait() } } -impl<'a, KeySpace, Handler> AsyncResolve for QueryingSubscriberBuilder<'a, '_, KeySpace, Handler> +impl<'a, KeySpace, Handler> IntoFuture for QueryingSubscriberBuilder<'a, '_, KeySpace, Handler> where KeySpace: Into + Clone, Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -551,14 +553,14 @@ impl< Handler, Fetch: FnOnce(Box) -> ZResult<()> + Send + Sync, TryIntoSample, - > SyncResolve for FetchingSubscriberBuilder<'_, '_, KeySpace, Handler, Fetch, TryIntoSample> + > Wait for FetchingSubscriberBuilder<'_, '_, KeySpace, Handler, Fetch, TryIntoSample> where KeySpace: Into, Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, TryIntoSample: ExtractSample + Send + Sync, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { FetchingSubscriber::new(self.with_static_keys()) } } @@ -569,17 +571,18 @@ impl< Handler, Fetch: FnOnce(Box) -> ZResult<()> + Send + Sync, TryIntoSample, - > AsyncResolve for FetchingSubscriberBuilder<'a, '_, KeySpace, Handler, Fetch, TryIntoSample> + > IntoFuture for FetchingSubscriberBuilder<'a, '_, KeySpace, Handler, Fetch, TryIntoSample> where KeySpace: Into, Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, TryIntoSample: ExtractSample + Send + Sync, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -595,20 +598,18 @@ where /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// use zenoh_ext::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { -/// use zenoh::prelude::sync::SyncResolve; /// session /// .get("key/expr") /// .callback(cb) -/// .res_sync() +/// .wait() /// }) -/// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -689,13 +690,13 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { .callback(sub_callback) .reliability(conf.reliability) .allowed_origin(conf.origin) - .res_sync()?, + .wait()?, crate::KeySpace::Liveliness => conf .session .liveliness() .declare_subscriber(&key_expr) .callback(sub_callback) - .res_sync()?, + .wait()?, }; let fetch_subscriber = FetchingSubscriber { @@ -732,10 +733,10 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let mut subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { @@ -743,9 +744,8 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { /// session /// .get("key/expr") /// .callback(cb) - /// .res_sync() + /// .wait() /// }) - /// .res() /// .await /// .unwrap(); /// @@ -756,9 +756,8 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { /// session /// .get("key/expr") /// .callback(cb) - /// .res_sync() + /// .wait() /// }) - /// .res() /// .await /// .unwrap(); /// # } @@ -814,10 +813,10 @@ impl Drop for RepliesHandler { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// # use zenoh::prelude::r#async::*; +/// # use zenoh::prelude::*; /// # use zenoh_ext::*; /// # -/// # let session = zenoh::open(config::peer()).res().await.unwrap(); +/// # let session = zenoh::open(config::peer()).await.unwrap(); /// # let mut fetching_subscriber = session /// # .declare_subscriber("key/expr") /// # .fetching( |cb| { @@ -825,9 +824,8 @@ impl Drop for RepliesHandler { /// # session /// # .get("key/expr") /// # .callback(cb) -/// # .res_sync() +/// # .wait() /// # }) -/// # .res() /// # .await /// # .unwrap(); /// # @@ -837,9 +835,8 @@ impl Drop for RepliesHandler { /// session /// .get("key/expr") /// .callback(cb) -/// .res_sync() +/// .wait() /// }) -/// .res() /// .await /// .unwrap(); /// # } @@ -865,26 +862,27 @@ where type To = ZResult<()>; } -impl) -> ZResult<()>, TryIntoSample> - SyncResolve for FetchBuilder +impl) -> ZResult<()>, TryIntoSample> Wait + for FetchBuilder where TryIntoSample: ExtractSample, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let handler = register_handler(self.state, self.callback); run_fetch(self.fetch, handler) } } impl) -> ZResult<()>, TryIntoSample> - AsyncResolve for FetchBuilder + IntoFuture for FetchBuilder where TryIntoSample: ExtractSample, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index 3f23239b29..d005cafc86 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -62,14 +62,14 @@ impl<'s> SessionExt<'s, 'static> for Arc { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh::config::ModeDependentValue::Unique; /// use zenoh_ext::SessionExt; /// /// let mut config = config::default(); /// config.timestamping.set_enabled(Some(Unique(true))); - /// let session = zenoh::open(config).res().await.unwrap().into_arc(); - /// let publication_cache = session.declare_publication_cache("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config).await.unwrap().into_arc(); + /// let publication_cache = session.declare_publication_cache("key/expression").await.unwrap(); /// tokio::task::spawn(async move { /// publication_cache.key_expr(); /// }).await; diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 3176745c95..8c3b1239b6 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -60,10 +60,10 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { @@ -71,9 +71,8 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// session /// .get("key/expr") /// .callback(cb) - /// .res_sync() + /// .wait() /// }) - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -106,14 +105,13 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .querying() - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -141,10 +139,10 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> for SubscriberBuilde /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { @@ -152,9 +150,8 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> for SubscriberBuilde /// session /// .get("key/expr") /// .callback(cb) - /// .res_sync() + /// .wait() /// }) - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -199,14 +196,13 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> for SubscriberBuilde /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .querying() - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -254,10 +250,10 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .liveliness() /// .declare_subscriber("key/expr") @@ -267,9 +263,8 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// .liveliness() /// .get("key/expr") /// .callback(cb) - /// .res_sync() + /// .wait() /// }) - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -315,15 +310,14 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .liveliness() /// .declare_subscriber("key/expr") /// .querying() - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index c221d7f27c..e720fde1c3 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -25,7 +25,7 @@ use std::{ hash::{Hash, Hasher}, sync::Arc, }; -use zenoh_core::{Result as ZResult, SyncResolve}; +use zenoh_core::{Result as ZResult, Wait}; use zenoh_keyexpr::keyexpr; use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; use zenoh_transport::{ @@ -72,7 +72,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { if let Ok(value) = serde_json::value::to_value(peer.clone()) { match ZBytes::try_from(value) { Ok(zbuf) => { - let _ = query.reply(key_expr, zbuf).res_sync(); + let _ = query.reply(key_expr, zbuf).wait(); } Err(e) => tracing::debug!("Admin query error: {}", e), } @@ -89,7 +89,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { if let Ok(value) = serde_json::value::to_value(link) { match ZBytes::try_from(value) { Ok(zbuf) => { - let _ = query.reply(key_expr, zbuf).res_sync(); + let _ = query.reply(key_expr, zbuf).wait(); } Err(e) => tracing::debug!("Admin query error: {}", e), } diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index 711cb063f6..5285825b29 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -1,5 +1,3 @@ -use std::future::Ready; - // // Copyright (c) 2024 ZettaScale Technology // @@ -27,7 +25,8 @@ use crate::api::sample::SourceInfo; use crate::api::session::SessionRef; use crate::api::value::Value; use crate::api::{encoding::Encoding, publication::Publisher}; -use zenoh_core::{AsyncResolve, Resolvable, Result as ZResult, SyncResolve}; +use std::future::{IntoFuture, Ready}; +use zenoh_core::{Resolvable, Result as ZResult, Wait}; use zenoh_protocol::core::CongestionControl; use zenoh_protocol::network::Mapping; @@ -57,14 +56,13 @@ pub struct PublicationBuilderDelete; /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// session /// .put("key/expression", "payload") /// .encoding(Encoding::TEXT_PLAIN) /// .congestion_control(CongestionControl::Block) -/// .res() /// .await /// .unwrap(); /// # } @@ -179,9 +177,9 @@ impl Resolvable for PublicationBuilder { type To = ZResult<()>; } -impl SyncResolve for PublicationBuilder, PublicationBuilderPut> { +impl Wait for PublicationBuilder, PublicationBuilderPut> { #[inline] - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let publisher = self.publisher.create_one_shot_publisher()?; publisher.resolve_put( self.kind.payload, @@ -196,9 +194,9 @@ impl SyncResolve for PublicationBuilder, PublicationBui } } -impl SyncResolve for PublicationBuilder, PublicationBuilderDelete> { +impl Wait for PublicationBuilder, PublicationBuilderDelete> { #[inline] - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let publisher = self.publisher.create_one_shot_publisher()?; publisher.resolve_put( ZBytes::empty(), @@ -213,19 +211,21 @@ impl SyncResolve for PublicationBuilder, PublicationBui } } -impl AsyncResolve for PublicationBuilder, PublicationBuilderPut> { - type Future = Ready; +impl IntoFuture for PublicationBuilder, PublicationBuilderPut> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } -impl AsyncResolve for PublicationBuilder, PublicationBuilderDelete> { - type Future = Ready; +impl IntoFuture for PublicationBuilder, PublicationBuilderDelete> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -235,13 +235,12 @@ impl AsyncResolve for PublicationBuilder, PublicationBu /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let publisher = session /// .declare_publisher("key/expression") /// .congestion_control(CongestionControl::Block) -/// .res() /// .await /// .unwrap(); /// # } @@ -327,12 +326,12 @@ impl<'a, 'b> Resolvable for PublisherBuilder<'a, 'b> { type To = ZResult>; } -impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { - fn res_sync(self) -> ::To { +impl<'a, 'b> Wait for PublisherBuilder<'a, 'b> { + fn wait(self) -> ::To { let mut key_expr = self.key_expr?; if !key_expr.is_fully_optimized(&self.session) { let session_id = self.session.id; - let expr_id = self.session.declare_prefix(key_expr.as_str()).res_sync(); + let expr_id = self.session.declare_prefix(key_expr.as_str()).wait(); let prefix_len = key_expr .len() .try_into() @@ -362,7 +361,7 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { } self.session .declare_publication_intent(key_expr.clone()) - .res_sync()?; + .wait()?; #[cfg(feature = "unstable")] let eid = self.session.runtime.next_id(); let publisher = Publisher { @@ -380,16 +379,17 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { } } -impl<'a, 'b> AsyncResolve for PublisherBuilder<'a, 'b> { - type Future = Ready; +impl<'a, 'b> IntoFuture for PublisherBuilder<'a, 'b> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } -impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { - fn res_sync(self) -> ::To { +impl Wait for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { + fn wait(self) -> ::To { self.publisher.resolve_put( self.kind.payload, SampleKind::Put, @@ -403,8 +403,8 @@ impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { } } -impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { - fn res_sync(self) -> ::To { +impl Wait for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { + fn wait(self) -> ::To { self.publisher.resolve_put( ZBytes::empty(), SampleKind::Delete, @@ -418,18 +418,20 @@ impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete } } -impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { - type Future = Ready; +impl IntoFuture for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } -impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { - type Future = Ready; +impl IntoFuture for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index dbcad9c50c..a6f8ff1629 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -14,8 +14,8 @@ //! Tools to access information about the current zenoh [`Session`](crate::Session). use super::session::SessionRef; -use std::future::Ready; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use std::future::{IntoFuture, Ready}; +use zenoh_core::{Resolvable, Wait}; use zenoh_protocol::core::{WhatAmI, ZenohId}; /// A builder retuned by [`SessionInfo::zid()`](SessionInfo::zid) that allows @@ -25,10 +25,10 @@ use zenoh_protocol::core::{WhatAmI, ZenohId}; /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let zid = session.info().zid().res().await; +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let zid = session.info().zid().await; /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -41,17 +41,18 @@ impl<'a> Resolvable for ZidBuilder<'a> { type To = ZenohId; } -impl<'a> SyncResolve for ZidBuilder<'a> { - fn res_sync(self) -> Self::To { +impl<'a> Wait for ZidBuilder<'a> { + fn wait(self) -> Self::To { self.session.runtime.zid() } } -impl<'a> AsyncResolve for ZidBuilder<'a> { - type Future = Ready; +impl<'a> IntoFuture for ZidBuilder<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -63,10 +64,10 @@ impl<'a> AsyncResolve for ZidBuilder<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let mut routers_zid = session.info().routers_zid().res().await; +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let mut routers_zid = session.info().routers_zid().await; /// while let Some(router_zid) = routers_zid.next() {} /// # } /// ``` @@ -80,8 +81,8 @@ impl<'a> Resolvable for RoutersZidBuilder<'a> { type To = Box + Send + Sync>; } -impl<'a> SyncResolve for RoutersZidBuilder<'a> { - fn res_sync(self) -> Self::To { +impl<'a> Wait for RoutersZidBuilder<'a> { + fn wait(self) -> Self::To { Box::new( zenoh_runtime::ZRuntime::Application .block_in_place(self.session.runtime.manager().get_transports_unicast()) @@ -96,11 +97,12 @@ impl<'a> SyncResolve for RoutersZidBuilder<'a> { } } -impl<'a> AsyncResolve for RoutersZidBuilder<'a> { - type Future = Ready; +impl<'a> IntoFuture for RoutersZidBuilder<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -111,11 +113,11 @@ impl<'a> AsyncResolve for RoutersZidBuilder<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let zid = session.info().zid().res().await; -/// let mut peers_zid = session.info().peers_zid().res().await; +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let zid = session.info().zid().await; +/// let mut peers_zid = session.info().peers_zid().await; /// while let Some(peer_zid) = peers_zid.next() {} /// # } /// ``` @@ -129,8 +131,8 @@ impl<'a> Resolvable for PeersZidBuilder<'a> { type To = Box + Send + Sync>; } -impl<'a> SyncResolve for PeersZidBuilder<'a> { - fn res_sync(self) -> ::To { +impl<'a> Wait for PeersZidBuilder<'a> { + fn wait(self) -> ::To { Box::new( zenoh_runtime::ZRuntime::Application .block_in_place(self.session.runtime.manager().get_transports_unicast()) @@ -145,11 +147,12 @@ impl<'a> SyncResolve for PeersZidBuilder<'a> { } } -impl<'a> AsyncResolve for PeersZidBuilder<'a> { - type Future = Ready; +impl<'a> IntoFuture for PeersZidBuilder<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -160,11 +163,11 @@ impl<'a> AsyncResolve for PeersZidBuilder<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let info = session.info(); -/// let zid = info.zid().res().await; +/// let zid = info.zid().await; /// # } /// ``` pub struct SessionInfo<'a> { @@ -178,10 +181,10 @@ impl SessionInfo<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let zid = session.info().zid().res().await; + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let zid = session.info().zid().await; /// # } /// ``` pub fn zid(&self) -> ZidBuilder<'_> { @@ -197,10 +200,10 @@ impl SessionInfo<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let mut routers_zid = session.info().routers_zid().res().await; + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let mut routers_zid = session.info().routers_zid().await; /// while let Some(router_zid) = routers_zid.next() {} /// # } /// ``` @@ -216,10 +219,10 @@ impl SessionInfo<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let mut peers_zid = session.info().peers_zid().res().await; + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let mut peers_zid = session.info().peers_zid().await; /// while let Some(peer_zid) = peers_zid.next() {} /// # } /// ``` diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 774cf28790..20dcf9cbee 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -14,12 +14,13 @@ use super::session::{Session, Undeclarable}; use crate::net::primitives::Primitives; +use std::future::IntoFuture; use std::{ convert::{TryFrom, TryInto}, future::Ready, str::FromStr, }; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_core::{Resolvable, Wait}; use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; use zenoh_protocol::{ core::{key_expr::canon::Canonizable, ExprId, WireExpr}, @@ -562,11 +563,11 @@ impl<'a> Undeclarable<&'a Session, KeyExprUndeclaration<'a>> for KeyExpr<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let key_expr = session.declare_keyexpr("key/expression").res().await.unwrap(); -/// session.undeclare(key_expr).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let key_expr = session.declare_keyexpr("key/expression").await.unwrap(); +/// session.undeclare(key_expr).await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -579,8 +580,8 @@ impl Resolvable for KeyExprUndeclaration<'_> { type To = ZResult<()>; } -impl SyncResolve for KeyExprUndeclaration<'_> { - fn res_sync(self) -> ::To { +impl Wait for KeyExprUndeclaration<'_> { + fn wait(self) -> ::To { let KeyExprUndeclaration { session, expr } = self; let expr_id = match &expr.0 { KeyExprInner::Wire { @@ -629,11 +630,12 @@ impl SyncResolve for KeyExprUndeclaration<'_> { } } -impl AsyncResolve for KeyExprUndeclaration<'_> { - type Future = Ready; +impl IntoFuture for KeyExprUndeclaration<'_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 761704d7d2..f7235426c3 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -21,10 +21,11 @@ use super::{ subscriber::{Subscriber, SubscriberInner}, Id, }; +use std::future::IntoFuture; use std::{convert::TryInto, future::Ready, sync::Arc, time::Duration}; use zenoh_config::unwrap_or_default; -use zenoh_core::Resolve; -use zenoh_core::{AsyncResolve, Resolvable, Result as ZResult, SyncResolve}; +use zenoh_core::{Resolvable, Result as ZResult}; +use zenoh_core::{Resolve, Wait}; use zenoh_keyexpr::keyexpr; use zenoh_protocol::network::{declare::subscriber::ext::SubscriberInfo, request}; @@ -55,13 +56,12 @@ lazy_static::lazy_static!( /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") -/// .res() /// .await /// .unwrap(); /// # } @@ -83,13 +83,12 @@ impl<'a> Liveliness<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") - /// .res() /// .await /// .unwrap(); /// # } @@ -119,10 +118,10 @@ impl<'a> Liveliness<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let subscriber = session.liveliness().declare_subscriber("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let subscriber = session.liveliness().declare_subscriber("key/expression").await.unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { /// match sample.kind() { /// SampleKind::Put => println!("New liveliness: {}", sample.key_expr()), @@ -157,10 +156,10 @@ impl<'a> Liveliness<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let replies = session.liveliness().get("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let replies = session.liveliness().get("key/expression").await.unwrap(); /// while let Ok(reply) = replies.recv_async().await { /// if let Ok(sample) = reply.result() { /// println!(">> Liveliness token {}", sample.key_expr()); @@ -197,13 +196,12 @@ impl<'a> Liveliness<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") -/// .res() /// .await /// .unwrap(); /// # } @@ -222,9 +220,9 @@ impl<'a> Resolvable for LivelinessTokenBuilder<'a, '_> { } #[zenoh_macros::unstable] -impl SyncResolve for LivelinessTokenBuilder<'_, '_> { +impl Wait for LivelinessTokenBuilder<'_, '_> { #[inline] - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let session = self.session; let key_expr = self.key_expr?.into_owned(); session @@ -238,11 +236,12 @@ impl SyncResolve for LivelinessTokenBuilder<'_, '_> { } #[zenoh_macros::unstable] -impl AsyncResolve for LivelinessTokenBuilder<'_, '_> { - type Future = Ready; +impl IntoFuture for LivelinessTokenBuilder<'_, '_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -272,13 +271,12 @@ pub(crate) struct LivelinessTokenState { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") -/// .res() /// .await /// .unwrap(); /// # } @@ -297,17 +295,16 @@ pub struct LivelinessToken<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") -/// .res() /// .await /// .unwrap(); /// -/// liveliness.undeclare().res().await.unwrap(); +/// liveliness.undeclare().await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -322,19 +319,20 @@ impl Resolvable for LivelinessTokenUndeclaration<'_> { } #[zenoh_macros::unstable] -impl SyncResolve for LivelinessTokenUndeclaration<'_> { - fn res_sync(mut self) -> ::To { +impl Wait for LivelinessTokenUndeclaration<'_> { + fn wait(mut self) -> ::To { self.token.alive = false; self.token.session.undeclare_liveliness(self.token.state.id) } } #[zenoh_macros::unstable] -impl<'a> AsyncResolve for LivelinessTokenUndeclaration<'a> { - type Future = Ready; +impl<'a> IntoFuture for LivelinessTokenUndeclaration<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -350,17 +348,16 @@ impl<'a> LivelinessToken<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") - /// .res() /// .await /// .unwrap(); /// - /// liveliness.undeclare().res().await.unwrap(); + /// liveliness.undeclare().await.unwrap(); /// # } /// ``` #[inline] @@ -391,13 +388,12 @@ impl Drop for LivelinessToken<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .best_effort() -/// .res() /// .await /// .unwrap(); /// # } @@ -419,13 +415,12 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) - /// .res() /// .await /// .unwrap(); /// # } @@ -460,14 +455,13 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let mut n = 0; /// let subscriber = session /// .declare_subscriber("key/expression") /// .callback_mut(move |_sample| { n += 1; }) - /// .res() /// .await /// .unwrap(); /// # } @@ -490,13 +484,12 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -533,13 +526,13 @@ where } #[zenoh_macros::unstable] -impl<'a, Handler> SyncResolve for LivelinessSubscriberBuilder<'a, '_, Handler> +impl<'a, Handler> Wait for LivelinessSubscriberBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, { #[zenoh_macros::unstable] - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let key_expr = self.key_expr?; let session = self.session; let (callback, handler) = self.handler.into_handler(); @@ -563,16 +556,17 @@ where } #[zenoh_macros::unstable] -impl<'a, Handler> AsyncResolve for LivelinessSubscriberBuilder<'a, '_, Handler> +impl<'a, Handler> IntoFuture for LivelinessSubscriberBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; #[zenoh_macros::unstable] - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -583,13 +577,12 @@ where /// # #[tokio::main] /// # async fn main() { /// # use std::convert::TryFrom; -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let tokens = session /// .liveliness() /// .get("key/expression") -/// .res() /// .await /// .unwrap(); /// while let Ok(token) = tokens.recv_async().await { @@ -616,14 +609,13 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let queryable = session /// .liveliness() /// .get("key/expression") /// .callback(|reply| { println!("Received {:?}", reply.result()); }) - /// .res() /// .await /// .unwrap(); /// # } @@ -656,15 +648,14 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let mut n = 0; /// let queryable = session /// .liveliness() /// .get("key/expression") /// .callback_mut(move |reply| {n += 1;}) - /// .res() /// .await /// .unwrap(); /// # } @@ -686,14 +677,13 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let replies = session /// .liveliness() /// .get("key/expression") /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(reply) = replies.recv_async().await { @@ -738,12 +728,12 @@ where type To = ZResult; } -impl SyncResolve for LivelinessGetBuilder<'_, '_, Handler> +impl Wait for LivelinessGetBuilder<'_, '_, Handler> where Handler: IntoHandler<'static, Reply> + Send, Handler::Handler: Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let (callback, receiver) = self.handler.into_handler(); self.session .query( @@ -764,14 +754,15 @@ where } } -impl AsyncResolve for LivelinessGetBuilder<'_, '_, Handler> +impl IntoFuture for LivelinessGetBuilder<'_, '_, Handler> where Handler: IntoHandler<'static, Reply> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index f41c35b720..518ddc4d1b 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -25,13 +25,14 @@ use super::{ }; use crate::net::primitives::Primitives; use futures::Sink; +use std::future::IntoFuture; use std::{ convert::TryFrom, future::Ready, pin::Pin, task::{Context, Poll}, }; -use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh_core::{zread, Resolvable, Resolve, Wait}; use zenoh_keyexpr::keyexpr; use zenoh_protocol::{ core::CongestionControl, @@ -86,11 +87,11 @@ impl std::fmt::Debug for PublisherRef<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); -/// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); -/// publisher.put("value").res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); +/// let publisher = session.declare_publisher("key/expression").await.unwrap(); +/// publisher.put("value").await.unwrap(); /// # } /// ``` /// @@ -101,11 +102,11 @@ impl std::fmt::Debug for PublisherRef<'_> { /// # #[tokio::main] /// # async fn main() { /// use futures::StreamExt; -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); -/// let mut subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); -/// let publisher = session.declare_publisher("another/key/expression").res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); +/// let mut subscriber = session.declare_subscriber("key/expression").await.unwrap(); +/// let publisher = session.declare_publisher("another/key/expression").await.unwrap(); /// subscriber.stream().map(Ok).forward(publisher).await.unwrap(); /// # } /// ``` @@ -128,11 +129,10 @@ impl<'a> Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let publisher = session.declare_publisher("key/expression") - /// .res() /// .await /// .unwrap(); /// let publisher_id = publisher.id(); @@ -184,11 +184,11 @@ impl<'a> Publisher<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); - /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); + /// let matching_listener = publisher.matching_listener().await.unwrap(); /// /// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -212,11 +212,11 @@ impl<'a> Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.put("value").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); + /// publisher.put("value").await.unwrap(); /// # } /// ``` #[inline] @@ -244,11 +244,11 @@ impl<'a> Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.delete().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); + /// publisher.delete().await.unwrap(); /// # } /// ``` pub fn delete(&self) -> PublisherDeleteBuilder<'_> { @@ -272,13 +272,12 @@ impl<'a> Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_subscribers: bool = publisher /// .matching_status() - /// .res() /// .await /// .unwrap() /// .matching_subscribers(); @@ -301,11 +300,11 @@ impl<'a> Publisher<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); + /// let matching_listener = publisher.matching_listener().await.unwrap(); /// while let Ok(matching_status) = matching_listener.recv_async().await { /// if matching_status.matching_subscribers() { /// println!("Publisher has matching subscribers."); @@ -329,11 +328,11 @@ impl<'a> Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.undeclare().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); + /// publisher.undeclare().await.unwrap(); /// # } /// ``` pub fn undeclare(self) -> impl Resolve> + 'a { @@ -353,11 +352,11 @@ impl<'a> Publisher<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); -/// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); -/// let matching_listener = publisher.matching_listener().res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); +/// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); +/// let matching_listener = publisher.matching_listener().await.unwrap(); /// /// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -376,11 +375,11 @@ pub trait PublisherDeclarations { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); - /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); + /// let matching_listener = publisher.matching_listener().await.unwrap(); /// /// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -403,11 +402,11 @@ impl PublisherDeclarations for std::sync::Arc> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); - /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); + /// let matching_listener = publisher.matching_listener().await.unwrap(); /// /// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -441,11 +440,11 @@ impl<'a> Undeclarable<(), PublisherUndeclaration<'a>> for Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); -/// publisher.undeclare().res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let publisher = session.declare_publisher("key/expression").await.unwrap(); +/// publisher.undeclare().await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -457,24 +456,25 @@ impl Resolvable for PublisherUndeclaration<'_> { type To = ZResult<()>; } -impl SyncResolve for PublisherUndeclaration<'_> { - fn res_sync(mut self) -> ::To { +impl Wait for PublisherUndeclaration<'_> { + fn wait(mut self) -> ::To { let Publisher { session, key_expr, .. } = &self.publisher; session .undeclare_publication_intent(key_expr.clone()) - .res_sync()?; + .wait()?; self.publisher.key_expr = unsafe { keyexpr::from_str_unchecked("") }.into(); Ok(()) } } -impl AsyncResolve for PublisherUndeclaration<'_> { - type Future = Ready; +impl IntoFuture for PublisherUndeclaration<'_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -484,7 +484,7 @@ impl Drop for Publisher<'_> { let _ = self .session .undeclare_publication_intent(self.key_expr.clone()) - .res_sync(); + .wait(); } } } @@ -726,11 +726,11 @@ impl TryFrom for Priority { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); -/// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); -/// let matching_status = publisher.matching_status().res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); +/// let publisher = session.declare_publisher("key/expression").await.unwrap(); +/// let matching_status = publisher.matching_status().await.unwrap(); /// # } /// ``` #[zenoh_macros::unstable] @@ -747,13 +747,12 @@ impl MatchingStatus { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_subscribers: bool = publisher /// .matching_status() - /// .res() /// .await /// .unwrap() /// .matching_subscribers(); @@ -780,10 +779,10 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_listener = publisher /// .matching_listener() /// .callback(|matching_status| { @@ -793,7 +792,6 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// println!("Publisher has NO MORE matching subscribers."); /// } /// }) - /// .res() /// .await /// .unwrap(); /// # } @@ -820,15 +818,14 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// /// let mut n = 0; - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_listener = publisher /// .matching_listener() /// .callback_mut(move |_matching_status| { n += 1; }) - /// .res() /// .await /// .unwrap(); /// # } @@ -851,14 +848,13 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_listener = publisher /// .matching_listener() /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -894,13 +890,13 @@ where } #[zenoh_macros::unstable] -impl<'a, Handler> SyncResolve for MatchingListenerBuilder<'a, Handler> +impl<'a, Handler> Wait for MatchingListenerBuilder<'a, Handler> where Handler: IntoHandler<'static, MatchingStatus> + Send, Handler::Handler: Send, { #[zenoh_macros::unstable] - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let (callback, receiver) = self.handler.into_handler(); self.publisher .session @@ -917,16 +913,17 @@ where } #[zenoh_macros::unstable] -impl<'a, Handler> AsyncResolve for MatchingListenerBuilder<'a, Handler> +impl<'a, Handler> IntoFuture for MatchingListenerBuilder<'a, Handler> where Handler: IntoHandler<'static, MatchingStatus> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; #[zenoh_macros::unstable] - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -978,11 +975,11 @@ impl<'a> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListene /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); -/// let matching_listener = publisher.matching_listener().res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let publisher = session.declare_publisher("key/expression").await.unwrap(); +/// let matching_listener = publisher.matching_listener().await.unwrap(); /// while let Ok(matching_status) = matching_listener.recv_async().await { /// if matching_status.matching_subscribers() { /// println!("Publisher has matching subscribers."); @@ -1009,12 +1006,12 @@ impl<'a, Receiver> MatchingListener<'a, Receiver> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// let matching_listener = publisher.matching_listener().res().await.unwrap(); - /// matching_listener.undeclare().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); + /// let matching_listener = publisher.matching_listener().await.unwrap(); + /// matching_listener.undeclare().await.unwrap(); /// # } /// ``` #[inline] @@ -1056,8 +1053,8 @@ impl Resolvable for MatchingListenerUndeclaration<'_> { } #[zenoh_macros::unstable] -impl SyncResolve for MatchingListenerUndeclaration<'_> { - fn res_sync(mut self) -> ::To { +impl Wait for MatchingListenerUndeclaration<'_> { + fn wait(mut self) -> ::To { self.subscriber.alive = false; self.subscriber .publisher @@ -1067,11 +1064,12 @@ impl SyncResolve for MatchingListenerUndeclaration<'_> { } #[zenoh_macros::unstable] -impl AsyncResolve for MatchingListenerUndeclaration<'_> { - type Future = Ready; +impl IntoFuture for MatchingListenerUndeclaration<'_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -1091,7 +1089,7 @@ impl Drop for MatchingListenerInner<'_> { mod tests { use crate::api::{sample::SampleKind, session::SessionDeclarations}; use zenoh_config::Config; - use zenoh_core::SyncResolve; + use zenoh_core::Wait; #[test] fn priority_from() { @@ -1125,13 +1123,13 @@ mod tests { const VALUE: &str = "zenoh"; fn sample_kind_integrity_in_publication_with(kind: SampleKind) { - let session = open(Config::default()).res().unwrap(); - let sub = session.declare_subscriber(KEY_EXPR).res().unwrap(); - let pub_ = session.declare_publisher(KEY_EXPR).res().unwrap(); + let session = open(Config::default()).wait().unwrap(); + let sub = session.declare_subscriber(KEY_EXPR).wait().unwrap(); + let pub_ = session.declare_publisher(KEY_EXPR).wait().unwrap(); match kind { - SampleKind::Put => pub_.put(VALUE).res().unwrap(), - SampleKind::Delete => pub_.delete().res().unwrap(), + SampleKind::Put => pub_.put(VALUE).wait().unwrap(), + SampleKind::Delete => pub_.delete().wait().unwrap(), } let sample = sub.recv().unwrap(); @@ -1148,18 +1146,17 @@ mod tests { #[test] fn sample_kind_integrity_in_put_builder() { use crate::api::session::open; - use zenoh_core::SyncResolve; const KEY_EXPR: &str = "test/sample_kind_integrity/put_builder"; const VALUE: &str = "zenoh"; fn sample_kind_integrity_in_put_builder_with(kind: SampleKind) { - let session = open(Config::default()).res().unwrap(); - let sub = session.declare_subscriber(KEY_EXPR).res().unwrap(); + let session = open(Config::default()).wait().unwrap(); + let sub = session.declare_subscriber(KEY_EXPR).wait().unwrap(); match kind { - SampleKind::Put => session.put(KEY_EXPR, VALUE).res().unwrap(), - SampleKind::Delete => session.delete(KEY_EXPR).res().unwrap(), + SampleKind::Put => session.put(KEY_EXPR, VALUE).wait().unwrap(), + SampleKind::Delete => session.delete(KEY_EXPR).wait().unwrap(), } let sample = sub.recv().unwrap(); diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 1cb4078ee6..311402b618 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -24,8 +24,9 @@ use super::{ session::Session, value::Value, }; +use std::future::IntoFuture; use std::{collections::HashMap, future::Ready, time::Duration}; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_core::{Resolvable, Wait}; use zenoh_keyexpr::OwnedKeyExpr; use zenoh_protocol::core::{CongestionControl, ZenohId}; use zenoh_result::ZResult; @@ -120,14 +121,13 @@ pub(crate) struct QueryState { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let replies = session /// .get("key/expression?value>1") /// .target(QueryTarget::All) /// .consolidation(ConsolidationMode::None) -/// .res() /// .await /// .unwrap(); /// while let Ok(reply) = replies.recv_async().await { @@ -225,13 +225,12 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let queryable = session /// .get("key/expression") /// .callback(|reply| {println!("Received {:?}", reply.result());}) - /// .res() /// .await /// .unwrap(); /// # } @@ -284,14 +283,13 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let mut n = 0; /// let queryable = session /// .get("key/expression") /// .callback_mut(move |reply| {n += 1;}) - /// .res() /// .await /// .unwrap(); /// # } @@ -313,13 +311,12 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let replies = session /// .get("key/expression") /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(reply) = replies.recv_async().await { @@ -444,12 +441,12 @@ where type To = ZResult; } -impl SyncResolve for GetBuilder<'_, '_, Handler> +impl Wait for GetBuilder<'_, '_, Handler> where Handler: IntoHandler<'static, Reply> + Send, Handler::Handler: Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let (callback, receiver) = self.handler.into_handler(); self.session @@ -472,14 +469,15 @@ where } } -impl AsyncResolve for GetBuilder<'_, '_, Handler> +impl IntoFuture for GetBuilder<'_, '_, Handler> where Handler: IntoHandler<'static, Reply> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index dc13468181..c83b4b6081 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -25,6 +25,7 @@ use super::{ Id, }; use crate::net::primitives::Primitives; +use std::future::IntoFuture; use std::{ fmt, future::Ready, @@ -32,7 +33,7 @@ use std::{ sync::Arc, }; use uhlc::Timestamp; -use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh_core::{Resolvable, Resolve, Wait}; use zenoh_protocol::{ core::{CongestionControl, EntityId, WireExpr, ZenohId}, network::{response, Mapping, RequestId, Response, ResponseFinal}, @@ -265,17 +266,18 @@ impl Resolvable for ReplySample<'_> { type To = ZResult<()>; } -impl SyncResolve for ReplySample<'_> { - fn res_sync(self) -> ::To { +impl Wait for ReplySample<'_> { + fn wait(self) -> ::To { self.query._reply_sample(self.sample) } } -impl AsyncResolve for ReplySample<'_> { - type Future = Ready; +impl IntoFuture for ReplySample<'_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -383,8 +385,8 @@ impl Resolvable for ReplyBuilder<'_, '_, T> { type To = ZResult<()>; } -impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderPut> { - fn res_sync(self) -> ::To { +impl Wait for ReplyBuilder<'_, '_, ReplyBuilderPut> { + fn wait(self) -> ::To { let key_expr = self.key_expr?.into_owned(); let sample = SampleBuilder::put(key_expr, self.kind.payload) .encoding(self.kind.encoding) @@ -398,8 +400,8 @@ impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderPut> { } } -impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderDelete> { - fn res_sync(self) -> ::To { +impl Wait for ReplyBuilder<'_, '_, ReplyBuilderDelete> { + fn wait(self) -> ::To { let key_expr = self.key_expr?.into_owned(); let sample = SampleBuilder::delete(key_expr) .timestamp(self.timestamp) @@ -472,19 +474,21 @@ impl Query { } } -impl AsyncResolve for ReplyBuilder<'_, '_, ReplyBuilderPut> { - type Future = Ready; +impl IntoFuture for ReplyBuilder<'_, '_, ReplyBuilderPut> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } -impl AsyncResolve for ReplyBuilder<'_, '_, ReplyBuilderDelete> { - type Future = Ready; +impl IntoFuture for ReplyBuilder<'_, '_, ReplyBuilderDelete> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -521,8 +525,8 @@ impl<'a> Resolvable for ReplyErrBuilder<'a> { type To = ZResult<()>; } -impl SyncResolve for ReplyErrBuilder<'_> { - fn res_sync(self) -> ::To { +impl Wait for ReplyErrBuilder<'_> { + fn wait(self) -> ::To { self.query.inner.primitives.send_response(Response { rid: self.query.inner.qid, wire_expr: WireExpr { @@ -549,11 +553,12 @@ impl SyncResolve for ReplyErrBuilder<'_> { } } -impl<'a> AsyncResolve for ReplyErrBuilder<'a> { - type Future = Ready; +impl<'a> IntoFuture for ReplyErrBuilder<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -589,14 +594,13 @@ impl fmt::Debug for QueryableState { /// # #[tokio::main] /// # async fn main() { /// use futures::prelude::*; -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let queryable = session.declare_queryable("key/expression").await.unwrap(); /// while let Ok(query) = queryable.recv_async().await { /// println!(">> Handling query '{}'", query.selector()); /// query.reply(KeyExpr::try_from("key/expression").unwrap(), "value") -/// .res() /// .await /// .unwrap(); /// } @@ -621,11 +625,11 @@ impl<'a> Undeclarable<(), QueryableUndeclaration<'a>> for CallbackQueryable<'a> /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); -/// queryable.undeclare().res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let queryable = session.declare_queryable("key/expression").await.unwrap(); +/// queryable.undeclare().await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -637,8 +641,8 @@ impl Resolvable for QueryableUndeclaration<'_> { type To = ZResult<()>; } -impl SyncResolve for QueryableUndeclaration<'_> { - fn res_sync(mut self) -> ::To { +impl Wait for QueryableUndeclaration<'_> { + fn wait(mut self) -> ::To { self.queryable.alive = false; self.queryable .session @@ -646,11 +650,12 @@ impl SyncResolve for QueryableUndeclaration<'_> { } } -impl<'a> AsyncResolve for QueryableUndeclaration<'a> { - type Future = Ready; +impl<'a> IntoFuture for QueryableUndeclaration<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -668,10 +673,10 @@ impl Drop for CallbackQueryable<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let queryable = session.declare_queryable("key/expression").await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -691,13 +696,12 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let queryable = session /// .declare_queryable("key/expression") /// .callback(|query| {println!(">> Handling query '{}'", query.selector());}) - /// .res() /// .await /// .unwrap(); /// # } @@ -732,14 +736,13 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let mut n = 0; /// let queryable = session /// .declare_queryable("key/expression") /// .callback_mut(move |query| {n += 1;}) - /// .res() /// .await /// .unwrap(); /// # } @@ -761,13 +764,12 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let queryable = session /// .declare_queryable("key/expression") /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(query) = queryable.recv_async().await { @@ -827,19 +829,17 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let queryable = session /// .declare_queryable("key/expression") /// .with(flume::bounded(32)) -/// .res() /// .await /// .unwrap(); /// while let Ok(query) = queryable.recv_async().await { /// println!(">> Handling query '{}'", query.selector()); /// query.reply(KeyExpr::try_from("key/expression").unwrap(), "value") -/// .res() /// .await /// .unwrap(); /// } @@ -859,11 +859,10 @@ impl<'a, Handler> Queryable<'a, Handler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let queryable = session.declare_queryable("key/expression") - /// .res() /// .await /// .unwrap(); /// let queryable_id = queryable.id(); @@ -925,12 +924,12 @@ where type To = ZResult>; } -impl<'a, Handler> SyncResolve for QueryableBuilder<'a, '_, Handler> +impl<'a, Handler> Wait for QueryableBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Query> + Send, Handler::Handler: Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let session = self.session; let (callback, receiver) = self.handler.into_handler(); session @@ -951,14 +950,15 @@ where } } -impl<'a, Handler> AsyncResolve for QueryableBuilder<'a, '_, Handler> +impl<'a, Handler> IntoFuture for QueryableBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Query> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index c4e411dec9..8e7853a411 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -13,10 +13,11 @@ // use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::net::runtime::{orchestrator::Loop, Runtime}; +use std::future::IntoFuture; use std::time::Duration; use std::{fmt, future::Ready, net::SocketAddr, ops::Deref}; use tokio::net::UdpSocket; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_core::{Resolvable, Wait}; use zenoh_protocol::{core::WhatAmIMatcher, scouting::Hello}; use zenoh_result::ZResult; use zenoh_task::TerminatableTask; @@ -27,10 +28,9 @@ use zenoh_task::TerminatableTask; /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) -/// .res() /// .await /// .unwrap(); /// while let Ok(hello) = receiver.recv_async().await { @@ -53,11 +53,10 @@ impl ScoutBuilder { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .callback(|hello| { println!("{}", hello); }) - /// .res() /// .await /// .unwrap(); /// # } @@ -88,12 +87,11 @@ impl ScoutBuilder { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// /// let mut n = 0; /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .callback_mut(move |_hello| { n += 1; }) - /// .res() /// .await /// .unwrap(); /// # } @@ -115,11 +113,10 @@ impl ScoutBuilder { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(hello) = receiver.recv_async().await { @@ -153,26 +150,27 @@ where type To = ZResult>; } -impl SyncResolve for ScoutBuilder +impl Wait for ScoutBuilder where Handler: IntoHandler<'static, Hello> + Send, Handler::Handler: Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let (callback, receiver) = self.handler.into_handler(); _scout(self.what, self.config?, callback).map(|scout| Scout { scout, receiver }) } } -impl AsyncResolve for ScoutBuilder +impl IntoFuture for ScoutBuilder where Handler: IntoHandler<'static, Hello> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -182,11 +180,10 @@ where /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .callback(|hello| { println!("{}", hello); }) -/// .res() /// .await /// .unwrap(); /// # } @@ -203,11 +200,10 @@ impl ScoutInner { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .callback(|hello| { println!("{}", hello); }) - /// .res() /// .await /// .unwrap(); /// scout.stop(); @@ -239,11 +235,10 @@ impl fmt::Debug for ScoutInner { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .with(flume::bounded(32)) -/// .res() /// .await /// .unwrap(); /// while let Ok(hello) = receiver.recv_async().await { @@ -273,11 +268,10 @@ impl Scout { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// /// let scout = zenoh::scout(WhatAmI::Router, config::default()) /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// let _router = scout.recv_async().await; @@ -350,11 +344,10 @@ fn _scout( /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// use zenoh::scouting::WhatAmI; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) -/// .res() /// .await /// .unwrap(); /// while let Ok(hello) = receiver.recv_async().await { diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 01fc345c3b..dea322419c 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -32,6 +32,7 @@ use super::{ Id, }; use crate::net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}; +use std::future::IntoFuture; use std::{ collections::HashMap, convert::{TryFrom, TryInto}, @@ -49,9 +50,7 @@ use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; use zenoh_config::{unwrap_or_default, Config, Notifier}; -use zenoh_core::{ - zconfigurable, zread, Resolvable, Resolve, ResolveClosure, ResolveFuture, SyncResolve, -}; +use zenoh_core::{zconfigurable, zread, Resolvable, Resolve, ResolveClosure, ResolveFuture, Wait}; #[cfg(feature = "unstable")] use zenoh_protocol::network::{declare::SubscriberId, ext}; use zenoh_protocol::{ @@ -78,7 +77,6 @@ use zenoh_result::ZResult; #[cfg(all(feature = "unstable", feature = "shared-memory"))] use zenoh_shm::api::client_storage::SharedMemoryClientStorage; use zenoh_task::TaskController; -use zenoh_util::core::AsyncResolve; #[cfg(feature = "unstable")] use super::{ @@ -452,11 +450,10 @@ impl Session { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") - /// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -486,10 +483,10 @@ impl Session { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = Session::leak(zenoh::open(config::peer()).res().await.unwrap()); - /// let subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); + /// let session = Session::leak(zenoh::open(config::peer()).await.unwrap()); + /// let subscriber = session.declare_subscriber("key/expression").await.unwrap(); /// tokio::task::spawn(async move { /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {:?}", sample); @@ -504,7 +501,7 @@ impl Session { /// Returns the identifier of the current session. `zid()` is a convenient shortcut. /// See [`Session::info()`](`Session::info()`) and [`SessionInfo::zid()`](`SessionInfo::zid()`) for more details. pub fn zid(&self) -> ZenohId { - self.info().zid().res_sync() + self.info().zid().wait() } pub fn hlc(&self) -> Option<&HLC> { @@ -520,10 +517,10 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// session.close().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// session.close().await.unwrap(); /// # } /// ``` pub fn close(mut self) -> impl Resolve> { @@ -563,9 +560,9 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let peers = session.config().get("connect/endpoints").unwrap(); /// # } /// ``` @@ -574,9 +571,9 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let _ = session.config().insert_json5("connect/endpoints", r#"["tcp/127.0.0.1/7447"]"#); /// # } /// ``` @@ -635,10 +632,10 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let key_expr = session.declare_keyexpr("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let key_expr = session.declare_keyexpr("key/expression").await.unwrap(); /// # } /// ``` pub fn declare_keyexpr<'a, 'b: 'a, TryIntoKeyExpr>( @@ -661,7 +658,7 @@ impl Session { ResolveClosure::new(move || { let key_expr: KeyExpr = key_expr?; let prefix_len = key_expr.len() as u32; - let expr_id = self.declare_prefix(key_expr.as_str()).res_sync(); + let expr_id = self.declare_prefix(key_expr.as_str()).wait(); let key_expr = match key_expr.0 { KeyExprInner::Borrowed(key_expr) | KeyExprInner::BorrowedWire { key_expr, .. } => { KeyExpr(KeyExprInner::BorrowedWire { @@ -697,13 +694,12 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// session /// .put("key/expression", "payload") /// .encoding(Encoding::TEXT_PLAIN) - /// .res() /// .await /// .unwrap(); /// # } @@ -743,10 +739,10 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// session.delete("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// session.delete("key/expression").await.unwrap(); /// # } /// ``` #[inline] @@ -781,10 +777,10 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let replies = session.get("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let replies = session.get("key/expression").await.unwrap(); /// while let Ok(reply) = replies.recv_async().await { /// println!(">> Received {:?}", reply.result()); /// } @@ -858,7 +854,6 @@ impl Session { aggregated_subscribers, aggregated_publishers, ) - .res_async() .await; session.owns_runtime = true; runtime.start().await?; @@ -1091,14 +1086,14 @@ impl Session { // match key_expr.as_str().find('*') { // Some(0) => key_expr.to_wire(self), // Some(pos) => { - // let expr_id = self.declare_prefix(&key_expr.as_str()[..pos]).res_sync(); + // let expr_id = self.declare_prefix(&key_expr.as_str()[..pos]).wait(); // WireExpr { // scope: expr_id, // suffix: std::borrow::Cow::Borrowed(&key_expr.as_str()[pos..]), // } // } // None => { - // let expr_id = self.declare_prefix(key_expr.as_str()).res_sync(); + // let expr_id = self.declare_prefix(key_expr.as_str()).wait(); // WireExpr { // scope: expr_id, // suffix: std::borrow::Cow::Borrowed(""), @@ -1840,11 +1835,10 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") - /// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -1882,11 +1876,10 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let queryable = session.declare_queryable("key/expression") - /// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -1894,7 +1887,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// query.reply( /// KeyExpr::try_from("key/expression").unwrap(), /// "value", - /// ).res().await.unwrap(); + /// ).await.unwrap(); /// } /// }).await; /// # } @@ -1926,14 +1919,13 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression") - /// .res() /// .await /// .unwrap(); - /// publisher.put("value").res().await.unwrap(); + /// publisher.put("value").await.unwrap(); /// # } /// ``` fn declare_publisher<'b, TryIntoKeyExpr>( @@ -1960,13 +1952,12 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") - /// .res() /// .await /// .unwrap(); /// # } @@ -2424,7 +2415,7 @@ impl Primitives for Session { impl Drop for Session { fn drop(&mut self) { if self.alive { - let _ = self.clone().close().res_sync(); + let _ = self.clone().close().wait(); } } } @@ -2448,11 +2439,10 @@ impl fmt::Debug for Session { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); +/// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") -/// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -2473,11 +2463,10 @@ pub trait SessionDeclarations<'s, 'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") - /// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -2506,11 +2495,10 @@ pub trait SessionDeclarations<'s, 'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let queryable = session.declare_queryable("key/expression") - /// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -2518,7 +2506,7 @@ pub trait SessionDeclarations<'s, 'a> { /// query.reply( /// KeyExpr::try_from("key/expression").unwrap(), /// "value", - /// ).res().await.unwrap(); + /// ).await.unwrap(); /// } /// }).await; /// # } @@ -2541,14 +2529,13 @@ pub trait SessionDeclarations<'s, 'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression") - /// .res() /// .await /// .unwrap(); - /// publisher.put("value").res().await.unwrap(); + /// publisher.put("value").await.unwrap(); /// # } /// ``` fn declare_publisher<'b, TryIntoKeyExpr>( @@ -2565,13 +2552,12 @@ pub trait SessionDeclarations<'s, 'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") - /// .res() /// .await /// .unwrap(); /// # } @@ -2584,9 +2570,9 @@ pub trait SessionDeclarations<'s, 'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let info = session.info(); /// # } /// ``` @@ -2639,9 +2625,9 @@ impl crate::net::primitives::EPrimitives for Session { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// # } /// ``` /// @@ -2649,13 +2635,13 @@ impl crate::net::primitives::EPrimitives for Session { /// # #[tokio::main] /// # async fn main() { /// use std::str::FromStr; -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// /// let mut config = config::peer(); /// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); /// config.connect.endpoints.extend("tcp/10.10.10.10:7447,tcp/11.11.11.11:7447".split(',').map(|s|s.parse().unwrap())); /// -/// let session = zenoh::open(config).res().await.unwrap(); +/// let session = zenoh::open(config).await.unwrap(); /// # } /// ``` pub fn open(config: TryIntoConfig) -> OpenBuilder @@ -2676,9 +2662,9 @@ where /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -2712,12 +2698,12 @@ where type To = ZResult; } -impl SyncResolve for OpenBuilder +impl Wait for OpenBuilder where TryIntoConfig: std::convert::TryInto + Send + 'static, >::Error: std::fmt::Debug, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let config: crate::config::Config = self .config .try_into() @@ -2727,19 +2713,20 @@ where #[cfg(all(feature = "unstable", feature = "shared-memory"))] self.shm_clients, ) - .res_sync() + .wait() } } -impl AsyncResolve for OpenBuilder +impl IntoFuture for OpenBuilder where TryIntoConfig: std::convert::TryInto + Send + 'static, >::Error: std::fmt::Debug, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -2786,22 +2773,23 @@ impl Resolvable for InitBuilder { } #[zenoh_macros::unstable] -impl SyncResolve for InitBuilder { - fn res_sync(self) -> ::To { +impl Wait for InitBuilder { + fn wait(self) -> ::To { Ok(Session::init( self.runtime, self.aggregated_subscribers, self.aggregated_publishers, ) - .res_sync()) + .wait()) } } #[zenoh_macros::unstable] -impl AsyncResolve for InitBuilder { - type Future = Ready; +impl IntoFuture for InitBuilder { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 4ac035d736..0c4e21b547 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -19,13 +19,14 @@ use super::{ session::{SessionRef, Undeclarable}, Id, }; +use std::future::IntoFuture; use std::{ fmt, future::Ready, ops::{Deref, DerefMut}, sync::Arc, }; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_core::{Resolvable, Wait}; use zenoh_protocol::{core::Reliability, network::declare::subscriber::ext::SubscriberInfo}; use zenoh_result::ZResult; @@ -63,13 +64,12 @@ impl fmt::Debug for SubscriberState { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()) }) -/// .res() /// .await /// .unwrap(); /// # } @@ -91,17 +91,16 @@ impl<'a> SubscriberInner<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// # fn data_handler(_sample: Sample) { }; /// let subscriber = session /// .declare_subscriber("key/expression") /// .callback(data_handler) - /// .res() /// .await /// .unwrap(); - /// subscriber.undeclare().res().await.unwrap(); + /// subscriber.undeclare().await.unwrap(); /// # } /// ``` #[inline] @@ -122,15 +121,14 @@ impl<'a> Undeclarable<(), SubscriberUndeclaration<'a>> for SubscriberInner<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") -/// .res() /// .await /// .unwrap(); -/// subscriber.undeclare().res().await.unwrap(); +/// subscriber.undeclare().await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -142,8 +140,8 @@ impl Resolvable for SubscriberUndeclaration<'_> { type To = ZResult<()>; } -impl SyncResolve for SubscriberUndeclaration<'_> { - fn res_sync(mut self) -> ::To { +impl Wait for SubscriberUndeclaration<'_> { + fn wait(mut self) -> ::To { self.subscriber.alive = false; self.subscriber .session @@ -151,11 +149,12 @@ impl SyncResolve for SubscriberUndeclaration<'_> { } } -impl AsyncResolve for SubscriberUndeclaration<'_> { - type Future = Ready; +impl IntoFuture for SubscriberUndeclaration<'_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -173,13 +172,12 @@ impl Drop for SubscriberInner<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .best_effort() -/// .res() /// .await /// .unwrap(); /// # } @@ -220,13 +218,12 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) - /// .res() /// .await /// .unwrap(); /// # } @@ -263,14 +260,13 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let mut n = 0; /// let subscriber = session /// .declare_subscriber("key/expression") /// .callback_mut(move |_sample| { n += 1; }) - /// .res() /// .await /// .unwrap(); /// # } @@ -292,13 +288,12 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -369,12 +364,12 @@ where type To = ZResult>; } -impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, Handler> +impl<'a, Handler> Wait for SubscriberBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let key_expr = self.key_expr?; let session = self.session; let (callback, receiver) = self.handler.into_handler(); @@ -399,15 +394,16 @@ where } } -impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, Handler> +impl<'a, Handler> IntoFuture for SubscriberBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -424,13 +420,12 @@ where /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .with(flume::bounded(32)) -/// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -452,11 +447,10 @@ impl<'a, Handler> Subscriber<'a, Handler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session.declare_subscriber("key/expression") - /// .res() /// .await /// .unwrap(); /// let subscriber_id = subscriber.id(); @@ -498,14 +492,13 @@ impl<'a, Handler> Subscriber<'a, Handler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session.declare_subscriber("key/expression") - /// .res() /// .await /// .unwrap(); - /// subscriber.undeclare().res().await.unwrap(); + /// subscriber.undeclare().await.unwrap(); /// # } /// ``` #[inline] diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 3c011e2439..6f679407c8 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -32,13 +32,13 @@ //! ### Publishing Data //! The example below shows how to produce a value for a key expression. //! ``` -//! use zenoh::prelude::r#async::*; +//! use zenoh::prelude::*; //! //! #[tokio::main] //! async fn main() { -//! let session = zenoh::open(config::default()).res().await.unwrap(); -//! session.put("key/expression", "value").res().await.unwrap(); -//! session.close().res().await.unwrap(); +//! let session = zenoh::open(config::default()).await.unwrap(); +//! session.put("key/expression", "value").await.unwrap(); +//! session.close().await.unwrap(); //! } //! ``` //! @@ -46,12 +46,12 @@ //! The example below shows how to consume values for a key expresison. //! ```no_run //! use futures::prelude::*; -//! use zenoh::prelude::r#async::*; +//! use zenoh::prelude::*; //! //! #[tokio::main] //! async fn main() { -//! let session = zenoh::open(config::default()).res().await.unwrap(); -//! let subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); +//! let session = zenoh::open(config::default()).await.unwrap(); +//! let subscriber = session.declare_subscriber("key/expression").await.unwrap(); //! while let Ok(sample) = subscriber.recv_async().await { //! println!("Received: {:?}", sample); //! }; @@ -63,12 +63,12 @@ //! resources whose key match the given *key expression*. //! ``` //! use futures::prelude::*; -//! use zenoh::prelude::r#async::*; +//! use zenoh::prelude::*; //! //! #[tokio::main] //! async fn main() { -//! let session = zenoh::open(config::default()).res().await.unwrap(); -//! let replies = session.get("key/expression").res().await.unwrap(); +//! let session = zenoh::open(config::default()).await.unwrap(); +//! let replies = session.get("key/expression").await.unwrap(); //! while let Ok(reply) = replies.recv_async().await { //! println!(">> Received {:?}", reply.result()); //! } @@ -117,10 +117,13 @@ pub mod prelude; /// Zenoh core types pub mod core { + #[allow(deprecated)] pub use zenoh_core::AsyncResolve; pub use zenoh_core::Resolvable; pub use zenoh_core::Resolve; + #[allow(deprecated)] pub use zenoh_core::SyncResolve; + pub use zenoh_core::Wait; /// A zenoh error. pub use zenoh_result::Error; /// A zenoh result. diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index ea084c453b..9ea54b8d88 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -31,7 +31,7 @@ use std::sync::Mutex; use tracing::{error, trace}; use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{unwrap_or_default, ConfigValidator, ValidatedMap, WhatAmI}; -use zenoh_core::SyncResolve; +use zenoh_core::Wait; #[cfg(all(feature = "unstable", feature = "plugins"))] use zenoh_plugin_trait::{PluginControl, PluginStatus}; #[cfg(all(feature = "unstable", feature = "plugins"))] @@ -630,7 +630,7 @@ fn local_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(reply_key, payload) .encoding(Encoding::APPLICATION_JSON) - .res_sync() + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -662,7 +662,7 @@ zenoh_build{{version="{}"}} 1 .openmetrics_text(), ); - if let Err(e) = query.reply(reply_key, metrics).res() { + if let Err(e) = query.reply(reply_key, metrics).wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -679,7 +679,7 @@ fn routers_linkstate_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(reply_key, tables.hat_code.info(&tables, WhatAmI::Router)) - .res() + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -697,7 +697,7 @@ fn peers_linkstate_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(reply_key, tables.hat_code.info(&tables, WhatAmI::Peer)) - .res() + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -719,7 +719,7 @@ fn subscribers_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(key, payload) .encoding(Encoding::APPLICATION_JSON) - .res_sync() + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -743,7 +743,7 @@ fn queryables_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(key, payload) .encoding(Encoding::APPLICATION_JSON) - .res_sync() + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -768,7 +768,7 @@ fn plugins_data(context: &AdminContext, query: Query) { let status = serde_json::to_value(status).unwrap(); match ZBytes::try_from(status) { Ok(zbuf) => { - if let Err(e) = query.reply(key, zbuf).res_sync() { + if let Err(e) = query.reply(key, zbuf).wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -793,7 +793,7 @@ fn plugins_status(context: &AdminContext, query: Query) { with_extended_string(plugin_key, &["/__path__"], |plugin_path_key| { if let Ok(key_expr) = KeyExpr::try_from(plugin_path_key.clone()) { if query.key_expr().intersects(&key_expr) { - if let Err(e) = query.reply(key_expr, plugin.path()).res() { + if let Err(e) = query.reply(key_expr, plugin.path()).wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -817,7 +817,7 @@ fn plugins_status(context: &AdminContext, query: Query) { if let Ok(key_expr) = KeyExpr::try_from(response.key) { match ZBytes::try_from(response.value) { Ok(zbuf) => { - if let Err(e) = query.reply(key_expr, zbuf).res_sync() { + if let Err(e) = query.reply(key_expr, zbuf).wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } }, diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 17286ddeea..ac466ae50b 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -18,22 +18,11 @@ //! almost always want to import its entire contents, but unlike the standard //! library's prelude you'll have to do so manually. //! -//! There are three variants of the prelude: full, sync and async. The sync one excludes the [`AsyncResolve`](crate::core::AsyncResolve) trait and the async one excludes the [`SyncResolve`](crate::core::SyncResolve) trait. -//! When specific sync or async prelude is included, the `res()` function of buildes works synchronously or asynchronously, respectively. -//! -//! If root prelude is included, the `res_sync()` or `res_async()` function of builders should be called explicitly. -//! //! Examples: //! //! ``` //!use zenoh::prelude::*; //! ``` -//! ``` -//!use zenoh::prelude::sync::*; -//! ``` -//! ``` -//!use zenoh::prelude::r#async::*; -//! ``` // Reexport API in flat namespace pub(crate) mod flat { @@ -81,20 +70,27 @@ pub(crate) mod mods { pub use crate::value; } +#[allow(deprecated)] pub use crate::core::AsyncResolve; +#[allow(deprecated)] pub use crate::core::SyncResolve; +pub use crate::core::Wait; pub use flat::*; pub use mods::*; /// Prelude to import when using Zenoh's sync API. +#[deprecated = "use `zenoh::prelude` instead"] pub mod sync { pub use super::flat::*; pub use super::mods::*; + #[allow(deprecated)] pub use crate::core::SyncResolve; } /// Prelude to import when using Zenoh's async API. +#[deprecated = "use `zenoh::prelude` instead"] pub mod r#async { pub use super::flat::*; pub use super::mods::*; + #[allow(deprecated)] pub use crate::core::AsyncResolve; } diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index dd1aa1271d..5f3c482581 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -16,7 +16,7 @@ mod test { use std::sync::{Arc, Mutex}; use std::time::Duration; use tokio::runtime::Handle; - use zenoh::prelude::r#async::*; + use zenoh::prelude::*; use zenoh_core::{zlock, ztimeout}; const TIMEOUT: Duration = Duration::from_secs(60); @@ -46,22 +46,22 @@ mod test { async fn close_router_session(s: Session) { println!("Closing router session"); - ztimeout!(s.close().res_async()).unwrap(); + ztimeout!(s.close()).unwrap(); } async fn get_client_sessions() -> (Session, Session) { println!("Opening client sessions"); let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); - let s01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); - let s02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); (s01, s02) } async fn close_sessions(s01: Session, s02: Session) { println!("Closing client sessions"); - ztimeout!(s01.close().res_async()).unwrap(); - ztimeout!(s02.close().res_async()).unwrap(); + ztimeout!(s01.close()).unwrap(); + ztimeout!(s02.close()).unwrap(); } async fn test_pub_sub_deny() { @@ -82,15 +82,11 @@ mod test { .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (sub_session, pub_session) = get_client_sessions().await; { - let publisher = pub_session - .declare_publisher(KEY_EXPR) - .res_async() - .await - .unwrap(); + let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); let subscriber = sub_session @@ -99,15 +95,14 @@ mod test { let mut temp_value = zlock!(temp_recv_value); *temp_value = sample.payload().deserialize::().unwrap(); }) - .res_async() .await .unwrap(); tokio::time::sleep(SLEEP).await; - publisher.put(VALUE).res_async().await.unwrap(); + publisher.put(VALUE).await.unwrap(); tokio::time::sleep(SLEEP).await; assert_ne!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); + ztimeout!(subscriber.undeclare()).unwrap(); } close_sessions(sub_session, pub_session).await; close_router_session(session).await; @@ -132,28 +127,28 @@ mod test { .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (sub_session, pub_session) = get_client_sessions().await; { - let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); - let subscriber = ztimeout!(sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.payload().deserialize::().unwrap(); - }) - .res_async()) - .unwrap(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); tokio::time::sleep(SLEEP).await; - ztimeout!(publisher.put(VALUE).res_async()).unwrap(); + ztimeout!(publisher.put(VALUE)).unwrap(); tokio::time::sleep(SLEEP).await; assert_eq!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); + ztimeout!(subscriber.undeclare()).unwrap(); } close_sessions(sub_session, pub_session).await; @@ -193,28 +188,28 @@ mod test { .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (sub_session, pub_session) = get_client_sessions().await; { - let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); - let subscriber = ztimeout!(sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.payload().deserialize::().unwrap(); - }) - .res_async()) - .unwrap(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); tokio::time::sleep(SLEEP).await; - ztimeout!(publisher.put(VALUE).res_async()).unwrap(); + ztimeout!(publisher.put(VALUE)).unwrap(); tokio::time::sleep(SLEEP).await; assert_ne!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); + ztimeout!(subscriber.undeclare()).unwrap(); } close_sessions(sub_session, pub_session).await; close_router_session(session).await; @@ -253,28 +248,28 @@ mod test { .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (sub_session, pub_session) = get_client_sessions().await; { - let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); - let subscriber = ztimeout!(sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.payload().deserialize::().unwrap(); - }) - .res_async()) - .unwrap(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); tokio::time::sleep(SLEEP).await; - ztimeout!(publisher.put(VALUE).res_async()).unwrap(); + ztimeout!(publisher.put(VALUE)).unwrap(); tokio::time::sleep(SLEEP).await; assert_eq!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); + ztimeout!(subscriber.undeclare()).unwrap(); } close_sessions(sub_session, pub_session).await; close_router_session(session).await; @@ -298,7 +293,7 @@ mod test { .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (get_session, qbl_session) = get_client_sessions().await; { @@ -309,15 +304,14 @@ mod test { .callback(move |sample| { tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() }); }); - }) - .res_async()) + })) .unwrap(); tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { match reply.result() { Ok(sample) => { @@ -329,7 +323,7 @@ mod test { } tokio::time::sleep(SLEEP).await; assert_ne!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + ztimeout!(qbl.undeclare()).unwrap(); } close_sessions(get_session, qbl_session).await; close_router_session(session).await; @@ -353,7 +347,7 @@ mod test { .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (get_session, qbl_session) = get_client_sessions().await; { @@ -364,15 +358,14 @@ mod test { .callback(move |sample| { tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() }); }); - }) - .res_async()) + })) .unwrap(); tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { match reply.result() { Ok(sample) => { @@ -384,7 +377,7 @@ mod test { } tokio::time::sleep(SLEEP).await; assert_eq!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + ztimeout!(qbl.undeclare()).unwrap(); } close_sessions(get_session, qbl_session).await; close_router_session(session).await; @@ -423,7 +416,7 @@ mod test { println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (get_session, qbl_session) = get_client_sessions().await; { @@ -434,15 +427,14 @@ mod test { .callback(move |sample| { tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() }); }); - }) - .res_async()) + })) .unwrap(); tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { match reply.result() { Ok(sample) => { @@ -454,7 +446,7 @@ mod test { } tokio::time::sleep(SLEEP).await; assert_eq!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + ztimeout!(qbl.undeclare()).unwrap(); } close_sessions(get_session, qbl_session).await; close_router_session(session).await; @@ -492,7 +484,7 @@ mod test { .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (get_session, qbl_session) = get_client_sessions().await; { @@ -503,15 +495,14 @@ mod test { .callback(move |sample| { tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() }); }); - }) - .res_async()) + })) .unwrap(); tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { match reply.result() { Ok(sample) => { @@ -523,7 +514,7 @@ mod test { } tokio::time::sleep(SLEEP).await; assert_ne!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + ztimeout!(qbl.undeclare()).unwrap(); } close_sessions(get_session, qbl_session).await; close_router_session(session).await; diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index b98a656089..836845a645 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -15,8 +15,8 @@ #[test] fn attachment_pubsub() { use zenoh::bytes::ZBytes; - use zenoh::prelude::sync::*; - let zenoh = zenoh::open(Config::default()).res().unwrap(); + use zenoh::prelude::*; + let zenoh = zenoh::open(Config::default()).wait().unwrap(); let _sub = zenoh .declare_subscriber("test/attachment") .callback(|sample| { @@ -28,10 +28,10 @@ fn attachment_pubsub() { assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)) } }) - .res() + .wait() .unwrap(); - let publisher = zenoh.declare_publisher("test/attachment").res().unwrap(); + let publisher = zenoh.declare_publisher("test/attachment").wait().unwrap(); for i in 0..10 { let mut backer = [( [0; std::mem::size_of::()], @@ -44,12 +44,12 @@ fn attachment_pubsub() { zenoh .put("test/attachment", "put") .attachment(ZBytes::from_iter(backer.iter())) - .res() + .wait() .unwrap(); publisher .put("publisher") .attachment(ZBytes::from_iter(backer.iter())) - .res() + .wait() .unwrap(); } } @@ -57,8 +57,8 @@ fn attachment_pubsub() { #[cfg(feature = "unstable")] #[test] fn attachment_queries() { - use zenoh::prelude::sync::*; - let zenoh = zenoh::open(Config::default()).res().unwrap(); + use zenoh::prelude::*; + let zenoh = zenoh::open(Config::default()).wait().unwrap(); let _sub = zenoh .declare_queryable("test/attachment") .callback(|query| { @@ -90,10 +90,10 @@ fn attachment_queries() { )>() .map(|(k, _)| (k, k)), )) - .res() + .wait() .unwrap(); }) - .res() + .wait() .unwrap(); for i in 0..10 { let mut backer = [( @@ -108,7 +108,7 @@ fn attachment_queries() { .get("test/attachment") .payload("query") .attachment(ZBytes::from_iter(backer.iter())) - .res() + .wait() .unwrap(); while let Ok(reply) = get.recv() { let response = reply.result().unwrap(); diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index d99017ff43..67a1c9c093 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::prelude::sync::*; +use zenoh::prelude::*; #[test] fn retry_config_overriding() { @@ -164,7 +164,7 @@ fn listen_no_retry() { .unwrap(); config.insert_json5("listen/timeout_ms", "0").unwrap(); - zenoh::open(config).res().unwrap(); + zenoh::open(config).wait().unwrap(); } #[test] @@ -177,5 +177,5 @@ fn listen_with_retry() { config.insert_json5("listen/timeout_ms", "1000").unwrap(); - zenoh::open(config).res().unwrap(); + zenoh::open(config).wait().unwrap(); } diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index cbb38e90fc..99ca6055da 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -13,7 +13,7 @@ // use std::time::Duration; use zenoh::internal::ztimeout; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; const TIMEOUT: Duration = Duration::from_secs(10); @@ -29,25 +29,24 @@ async fn open_session(listen: &[&str], connect: &[&str]) -> Session { .collect::>(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Opening session"); - ztimeout!(zenoh::open(config).res_async()).unwrap() + ztimeout!(zenoh::open(config)).unwrap() } async fn close_session(session: Session) { println!("[ ][01d] Closing session"); - ztimeout!(session.close().res_async()).unwrap(); + ztimeout!(session.close()).unwrap(); } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_events() { let session = open_session(&["tcp/127.0.0.1:18447"], &[]).await; let zid = session.zid(); - let sub1 = ztimeout!(session - .declare_subscriber(format!("@/session/{zid}/transport/unicast/*")) - .res()) - .unwrap(); - let sub2 = ztimeout!(session - .declare_subscriber(format!("@/session/{zid}/transport/unicast/*/link/*")) - .res()) + let sub1 = + ztimeout!(session.declare_subscriber(format!("@/session/{zid}/transport/unicast/*"))) + .unwrap(); + let sub2 = ztimeout!( + session.declare_subscriber(format!("@/session/{zid}/transport/unicast/*/link/*")) + ) .unwrap(); let session2 = open_session(&["tcp/127.0.0.1:18448"], &["tcp/127.0.0.1:18447"]).await; @@ -65,23 +64,21 @@ async fn zenoh_events() { assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); assert!(sample.as_ref().unwrap().kind() == SampleKind::Put); - let replies: Vec = ztimeout!(session - .get(format!("@/session/{zid}/transport/unicast/*")) - .res_async()) - .unwrap() - .into_iter() - .collect(); + let replies: Vec = + ztimeout!(session.get(format!("@/session/{zid}/transport/unicast/*"))) + .unwrap() + .into_iter() + .collect(); assert!(replies.len() == 1); assert!(replies[0].result().is_ok()); let key_expr = replies[0].result().unwrap().key_expr().as_str(); assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); - let replies: Vec = ztimeout!(session - .get(format!("@/session/{zid}/transport/unicast/*/link/*")) - .res_async()) - .unwrap() - .into_iter() - .collect(); + let replies: Vec = + ztimeout!(session.get(format!("@/session/{zid}/transport/unicast/*/link/*"))) + .unwrap() + .into_iter() + .collect(); assert!(replies.len() == 1); assert!(replies[0].result().is_ok()); let key_expr = replies[0].result().unwrap().key_expr().as_str(); @@ -101,7 +98,7 @@ async fn zenoh_events() { assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); assert!(sample.as_ref().unwrap().kind() == SampleKind::Delete); - ztimeout!(sub2.undeclare().res()).unwrap(); - ztimeout!(sub1.undeclare().res()).unwrap(); + ztimeout!(sub2.undeclare()).unwrap(); + ztimeout!(sub1.undeclare()).unwrap(); close_session(session).await; } diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index b83fead54b..0862f9ee89 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -12,20 +12,20 @@ // ZettaScale Zenoh Team, // use std::{thread, time::Duration}; -use zenoh::prelude::sync::*; +use zenoh::prelude::*; #[test] fn pubsub_with_ringbuffer() { - let zenoh = zenoh::open(Config::default()).res().unwrap(); + let zenoh = zenoh::open(Config::default()).wait().unwrap(); let sub = zenoh .declare_subscriber("test/ringbuffer") .with(RingChannel::new(3)) - .res() + .wait() .unwrap(); for i in 0..10 { zenoh .put("test/ringbuffer", format!("put{i}")) - .res() + .wait() .unwrap(); } // Should only receive the last three samples ("put7", "put8", "put9") @@ -45,22 +45,22 @@ fn pubsub_with_ringbuffer() { #[test] fn query_with_ringbuffer() { - let zenoh = zenoh::open(Config::default()).res().unwrap(); + let zenoh = zenoh::open(Config::default()).wait().unwrap(); let queryable = zenoh .declare_queryable("test/ringbuffer_query") .with(RingChannel::new(1)) - .res() + .wait() .unwrap(); let _reply1 = zenoh .get("test/ringbuffer_query") .payload("query1") - .res() + .wait() .unwrap(); let _reply2 = zenoh .get("test/ringbuffer_query") .payload("query2") - .res() + .wait() .unwrap(); let query = queryable.recv().unwrap(); diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 7a3a9c80d6..f6e876d92e 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -13,7 +13,7 @@ // use std::sync::{Arc, Mutex}; use zenoh::internal::zlock; -use zenoh::prelude::sync::*; +use zenoh::prelude::*; struct IntervalCounter { first_tick: bool, @@ -89,7 +89,7 @@ fn downsampling_by_keyexpr_impl(egress: bool) { .multicast .set_enabled(Some(false)) .unwrap(); - let zenoh_sub = zenoh::open(config_sub).res().unwrap(); + let zenoh_sub = zenoh::open(config_sub).wait().unwrap(); let counter_r100 = Arc::new(Mutex::new(IntervalCounter::new())); let counter_r100_clone = counter_r100.clone(); @@ -110,7 +110,7 @@ fn downsampling_by_keyexpr_impl(egress: bool) { zlock!(counter_r50).tick(); } }) - .res() + .wait() .unwrap(); // declare publisher @@ -126,29 +126,29 @@ fn downsampling_by_keyexpr_impl(egress: bool) { .multicast .set_enabled(Some(false)) .unwrap(); - let zenoh_pub = zenoh::open(config_pub).res().unwrap(); + let zenoh_pub = zenoh::open(config_pub).wait().unwrap(); let publisher_r100 = zenoh_pub .declare_publisher("test/downsamples_by_keyexp/r100") - .res() + .wait() .unwrap(); let publisher_r50 = zenoh_pub .declare_publisher("test/downsamples_by_keyexp/r50") - .res() + .wait() .unwrap(); let publisher_all = zenoh_pub .declare_publisher("test/downsamples_by_keyexp/all") - .res() + .wait() .unwrap(); // WARN(yuyuan): 2 ms is the limit of tokio let interval = std::time::Duration::from_millis(2); let messages_count = 1000; for i in 0..messages_count { - publisher_r100.put(format!("message {}", i)).res().unwrap(); - publisher_r50.put(format!("message {}", i)).res().unwrap(); - publisher_all.put(format!("message {}", i)).res().unwrap(); + publisher_r100.put(format!("message {}", i)).wait().unwrap(); + publisher_r50.put(format!("message {}", i)).wait().unwrap(); + publisher_all.put(format!("message {}", i)).wait().unwrap(); std::thread::sleep(interval); } @@ -205,7 +205,7 @@ fn downsampling_by_interface_impl(egress: bool) { if !egress { config_sub.insert_json5("downsampling", &ds_cfg).unwrap(); }; - let zenoh_sub = zenoh::open(config_sub).res().unwrap(); + let zenoh_sub = zenoh::open(config_sub).wait().unwrap(); let counter_r100 = Arc::new(Mutex::new(IntervalCounter::new())); let counter_r100_clone = counter_r100.clone(); @@ -222,7 +222,7 @@ fn downsampling_by_interface_impl(egress: bool) { zlock!(counter_r100).tick(); } }) - .res() + .wait() .unwrap(); // declare publisher @@ -233,23 +233,23 @@ fn downsampling_by_interface_impl(egress: bool) { if egress { config_pub.insert_json5("downsampling", &ds_cfg).unwrap(); } - let zenoh_pub = zenoh::open(config_pub).res().unwrap(); + let zenoh_pub = zenoh::open(config_pub).wait().unwrap(); let publisher_r100 = zenoh_pub .declare_publisher("test/downsamples_by_interface/r100") - .res() + .wait() .unwrap(); let publisher_all = zenoh_pub .declare_publisher("test/downsamples_by_interface/all") - .res() + .wait() .unwrap(); // WARN(yuyuan): 2 ms is the limit of tokio let interval = std::time::Duration::from_millis(2); let messages_count = 1000; for i in 0..messages_count { - publisher_r100.put(format!("message {}", i)).res().unwrap(); - publisher_all.put(format!("message {}", i)).res().unwrap(); + publisher_r100.put(format!("message {}", i)).wait().unwrap(); + publisher_all.put(format!("message {}", i)).wait().unwrap(); std::thread::sleep(interval); } @@ -295,5 +295,5 @@ fn downsampling_config_error_wrong_strategy() { ) .unwrap(); - zenoh::open(config).res().unwrap(); + zenoh::open(config).wait().unwrap(); } diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 1cd0830ea2..0456361419 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -23,33 +23,24 @@ async fn zenoh_liveliness() { .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) .unwrap(); c1.scouting.multicast.set_enabled(Some(false)).unwrap(); - let session1 = ztimeout!(zenoh::open(c1).res_async()).unwrap(); + let session1 = ztimeout!(zenoh::open(c1)).unwrap(); let mut c2 = config::peer(); c2.connect .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) .unwrap(); c2.scouting.multicast.set_enabled(Some(false)).unwrap(); - let session2 = ztimeout!(zenoh::open(c2).res_async()).unwrap(); + let session2 = ztimeout!(zenoh::open(c2)).unwrap(); let sub = ztimeout!(session2 .liveliness() - .declare_subscriber("zenoh_liveliness_test") - .res_async()) + .declare_subscriber("zenoh_liveliness_test")) .unwrap(); - let token = ztimeout!(session1 - .liveliness() - .declare_token("zenoh_liveliness_test") - .res_async()) - .unwrap(); + let token = ztimeout!(session1.liveliness().declare_token("zenoh_liveliness_test")).unwrap(); tokio::time::sleep(SLEEP).await; - let replies = ztimeout!(session2 - .liveliness() - .get("zenoh_liveliness_test") - .res_async()) - .unwrap(); + let replies = ztimeout!(session2.liveliness().get("zenoh_liveliness_test")).unwrap(); let sample: Sample = ztimeout!(replies.recv_async()) .unwrap() .into_result() @@ -67,11 +58,7 @@ async fn zenoh_liveliness() { tokio::time::sleep(SLEEP).await; - let replies = ztimeout!(session2 - .liveliness() - .get("zenoh_liveliness_test") - .res_async()) - .unwrap(); + let replies = ztimeout!(session2.liveliness().get("zenoh_liveliness_test")).unwrap(); assert!(ztimeout!(replies.recv_async()).is_err()); assert!(replies.try_recv().is_err()); diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index 4e838f98a1..1473d7f6fc 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -35,8 +35,8 @@ async fn create_session_pair(locator: &str) -> (Session, Session) { }; let config2 = zenoh::config::client([Locator::from_str(locator).unwrap()]); - let session1 = ztimeout!(zenoh::open(config1).res_async()).unwrap(); - let session2 = ztimeout!(zenoh::open(config2).res_async()).unwrap(); + let session1 = ztimeout!(zenoh::open(config1)).unwrap(); + let session2 = ztimeout!(zenoh::open(config2)).unwrap(); (session1, session2) } @@ -47,54 +47,47 @@ async fn zenoh_matching_status_any() -> ZResult<()> { let publisher1 = ztimeout!(session1 .declare_publisher("zenoh_matching_status_any_test") - .allowed_destination(Locality::Any) - .res_async()) + .allowed_destination(Locality::Any)) .unwrap(); - let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); + let matching_listener = ztimeout!(publisher1.matching_listener()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session1 - .declare_subscriber("zenoh_matching_status_any_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session1.declare_subscriber("zenoh_matching_status_any_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session2 - .declare_subscriber("zenoh_matching_status_any_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session2.declare_subscriber("zenoh_matching_status_any_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); Ok(()) } @@ -102,60 +95,53 @@ async fn zenoh_matching_status_any() -> ZResult<()> { #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_remote() -> ZResult<()> { - let session1 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); + let session1 = ztimeout!(zenoh::open(peer())).unwrap(); - let session2 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); + let session2 = ztimeout!(zenoh::open(peer())).unwrap(); let publisher1 = ztimeout!(session1 .declare_publisher("zenoh_matching_status_remote_test") - .allowed_destination(Locality::Remote) - .res_async()) + .allowed_destination(Locality::Remote)) .unwrap(); - let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); + let matching_listener = ztimeout!(publisher1.matching_listener()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session1 - .declare_subscriber("zenoh_matching_status_remote_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session1.declare_subscriber("zenoh_matching_status_remote_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session2 - .declare_subscriber("zenoh_matching_status_remote_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session2.declare_subscriber("zenoh_matching_status_remote_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); Ok(()) @@ -164,60 +150,53 @@ async fn zenoh_matching_status_remote() -> ZResult<()> { #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_local() -> ZResult<()> { - let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + let session1 = ztimeout!(zenoh::open(config::peer())).unwrap(); - let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + let session2 = ztimeout!(zenoh::open(config::peer())).unwrap(); let publisher1 = ztimeout!(session1 .declare_publisher("zenoh_matching_status_local_test") - .allowed_destination(Locality::SessionLocal) - .res_async()) + .allowed_destination(Locality::SessionLocal)) .unwrap(); - let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); + let matching_listener = ztimeout!(publisher1.matching_listener()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session1 - .declare_subscriber("zenoh_matching_status_local_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session1.declare_subscriber("zenoh_matching_status_local_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session2 - .declare_subscriber("zenoh_matching_status_local_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session2.declare_subscriber("zenoh_matching_status_local_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); Ok(()) diff --git a/zenoh/tests/payload.rs b/zenoh/tests/payload.rs index fac5d37367..fecf10a608 100644 --- a/zenoh/tests/payload.rs +++ b/zenoh/tests/payload.rs @@ -15,7 +15,7 @@ #[test] #[cfg(all(feature = "shared-memory", feature = "unstable"))] fn shm_payload_single_buf() { - use zenoh::prelude::r#async::*; + use zenoh::prelude::*; // create an SHM backend... let backend = PosixSharedMemoryProviderBackend::builder() diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index b70d01ec79..6f44b2d0be 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -13,40 +13,38 @@ // use std::time::Duration; use zenoh::internal::ztimeout; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn pubsub() { - let session1 = ztimeout!(zenoh::open(zenoh_config::peer()).res_async()).unwrap(); - let session2 = ztimeout!(zenoh::open(zenoh_config::peer()).res_async()).unwrap(); + let session1 = ztimeout!(zenoh::open(zenoh_config::peer())).unwrap(); + let session2 = ztimeout!(zenoh::open(zenoh_config::peer())).unwrap(); let publisher1 = ztimeout!(session1 .declare_publisher("test/qos") .priority(Priority::DataHigh) - .congestion_control(CongestionControl::Drop) - .res()) + .congestion_control(CongestionControl::Drop)) .unwrap(); let publisher2 = ztimeout!(session1 .declare_publisher("test/qos") .priority(Priority::DataLow) - .congestion_control(CongestionControl::Block) - .res()) + .congestion_control(CongestionControl::Block)) .unwrap(); - let subscriber = ztimeout!(session2.declare_subscriber("test/qos").res()).unwrap(); + let subscriber = ztimeout!(session2.declare_subscriber("test/qos")).unwrap(); tokio::time::sleep(SLEEP).await; - ztimeout!(publisher1.put("qos").res_async()).unwrap(); + ztimeout!(publisher1.put("qos")).unwrap(); let sample = ztimeout!(subscriber.recv_async()).unwrap(); assert_eq!(sample.priority(), Priority::DataHigh); assert_eq!(sample.congestion_control(), CongestionControl::Drop); - ztimeout!(publisher2.put("qos").res_async()).unwrap(); + ztimeout!(publisher2.put("qos")).unwrap(); let sample = ztimeout!(subscriber.recv_async()).unwrap(); assert_eq!(sample.priority(), Priority::DataLow); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index dd6e7fd715..3c9f2723a6 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -18,7 +18,7 @@ use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::core::Result; use zenoh::internal::{bail, ztimeout}; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; const TIMEOUT: Duration = Duration::from_secs(10); const MSG_COUNT: usize = 50; @@ -47,7 +47,7 @@ impl Task { match self { // The Sub task checks if the incoming message matches the expected size until it receives enough counts. Self::Sub(ke, expected_size) => { - let sub = ztimeout!(session.declare_subscriber(ke).res_async())?; + let sub = ztimeout!(session.declare_subscriber(ke))?; let mut counter = 0; loop { tokio::select! { @@ -77,10 +77,11 @@ impl Task { _ = token.cancelled() => break, // WARN: this won't yield after a timeout since the put is a blocking call - res = tokio::time::timeout(std::time::Duration::from_secs(1), session + res = tokio::time::timeout(std::time::Duration::from_secs(1), async {session .put(ke, vec![0u8; *payload_size]) .congestion_control(CongestionControl::Block) - .res()) => { + .await + }) => { let _ = res?; } } @@ -94,7 +95,7 @@ impl Task { while counter < MSG_COUNT { tokio::select! { _ = token.cancelled() => break, - replies = session.get(ke).timeout(Duration::from_secs(10)).res() => { + replies = async { session.get(ke).timeout(Duration::from_secs(10)).await } => { let replies = replies?; while let Ok(reply) = replies.recv_async().await { match reply.result() { @@ -124,14 +125,14 @@ impl Task { // The Queryable task keeps replying to requested messages until all checkpoints are finished. Self::Queryable(ke, payload_size) => { - let queryable = ztimeout!(session.declare_queryable(ke).res_async())?; + let queryable = ztimeout!(session.declare_queryable(ke))?; let payload = vec![0u8; *payload_size]; loop { tokio::select! { _ = token.cancelled() => break, query = queryable.recv_async() => { - ztimeout!(query?.reply(ke.to_owned(), payload.clone()).res_async())?; + ztimeout!(query?.reply(ke.to_owned(), payload.clone()))?; }, } } @@ -276,7 +277,7 @@ impl Recipe { // In case of client can't connect to some peers/routers loop { - if let Ok(session) = ztimeout!(zenoh::open(config.clone()).res_async()) { + if let Ok(session) = ztimeout!(zenoh::open(config.clone())) { break session.into_arc(); } else { tokio::time::sleep(Duration::from_secs(1)).await; @@ -312,7 +313,7 @@ impl Recipe { // node_task_tracker.wait().await; // Close the session once all the task assoicated with the node are done. - ztimeout!(Arc::try_unwrap(session).unwrap().close().res_async())?; + ztimeout!(Arc::try_unwrap(session).unwrap().close())?; println!("Node: {} is closed.", &node.name); Result::Ok(()) diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 91d9b6d95b..b52dbb90b8 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -15,7 +15,8 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::internal::ztimeout; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; +#[cfg(feature = "unstable")] use zenoh::runtime::Runtime; const TIMEOUT: Duration = Duration::from_secs(60); @@ -33,7 +34,7 @@ async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { .collect::>(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Opening peer01 session: {:?}", endpoints); - let peer01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); config.connect.endpoints = endpoints @@ -42,7 +43,7 @@ async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { .collect::>(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][02a] Opening peer02 session: {:?}", endpoints); - let peer02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer02 = ztimeout!(zenoh::open(config)).unwrap(); (peer01, peer02) } @@ -53,22 +54,22 @@ async fn open_session_multicast(endpoint01: &str, endpoint02: &str) -> (Session, config.listen.endpoints = vec![endpoint01.parse().unwrap()]; config.scouting.multicast.set_enabled(Some(true)).unwrap(); println!("[ ][01a] Opening peer01 session: {}", endpoint01); - let peer01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); config.listen.endpoints = vec![endpoint02.parse().unwrap()]; config.scouting.multicast.set_enabled(Some(true)).unwrap(); println!("[ ][02a] Opening peer02 session: {}", endpoint02); - let peer02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer02 = ztimeout!(zenoh::open(config)).unwrap(); (peer01, peer02) } async fn close_session(peer01: Session, peer02: Session) { println!("[ ][01d] Closing peer01 session"); - ztimeout!(peer01.close().res_async()).unwrap(); + ztimeout!(peer01.close()).unwrap(); println!("[ ][02d] Closing peer02 session"); - ztimeout!(peer02.close().res_async()).unwrap(); + ztimeout!(peer02.close()).unwrap(); } async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Reliability) { @@ -85,13 +86,10 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re // Subscribe to data println!("[PS][01b] Subscribing on peer01 session"); let c_msgs = msgs.clone(); - let sub = ztimeout!(peer01 - .declare_subscriber(key_expr) - .callback(move |sample| { - assert_eq!(sample.payload().len(), size); - c_msgs.fetch_add(1, Ordering::Relaxed); - }) - .res_async()) + let sub = ztimeout!(peer01.declare_subscriber(key_expr).callback(move |sample| { + assert_eq!(sample.payload().len(), size); + c_msgs.fetch_add(1, Ordering::Relaxed); + })) .unwrap(); // Wait for the declaration to propagate @@ -102,8 +100,7 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re for _ in 0..msg_count { ztimeout!(peer02 .put(key_expr, vec![0u8; size]) - .congestion_control(CongestionControl::Block) - .res_async()) + .congestion_control(CongestionControl::Block)) .unwrap(); } @@ -123,7 +120,7 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re tokio::time::sleep(SLEEP).await; println!("[PS][03b] Unsubscribing on peer01 session"); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); // Wait for the declaration to propagate tokio::time::sleep(SLEEP).await; @@ -144,43 +141,36 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re // Queryable to data println!("[QR][01c] Queryable on peer01 session"); let c_msgs = msgs.clone(); - let qbl = ztimeout!(peer01 - .declare_queryable(key_expr) - .callback(move |query| { - c_msgs.fetch_add(1, Ordering::Relaxed); - match query.parameters().as_str() { - "ok_put" => { - tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { - ztimeout!(query - .reply( - KeyExpr::try_from(key_expr).unwrap(), - vec![0u8; size].to_vec() - ) - .res_async()) - .unwrap() - }) - }); - } - "ok_del" => { - tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { - ztimeout!(query.reply_del(key_expr).res_async()).unwrap() - }) - }); - } - "err" => { - let rep = Value::from(vec![0u8; size]); - tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { - ztimeout!(query.reply_err(rep).res_async()).unwrap() - }) - }); - } - _ => panic!("Unknown query parameter"), + let qbl = ztimeout!(peer01.declare_queryable(key_expr).callback(move |query| { + c_msgs.fetch_add(1, Ordering::Relaxed); + match query.parameters().as_str() { + "ok_put" => { + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { + ztimeout!(query.reply( + KeyExpr::try_from(key_expr).unwrap(), + vec![0u8; size].to_vec() + )) + .unwrap() + }) + }); } - }) - .res_async()) + "ok_del" => { + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current() + .block_on(async { ztimeout!(query.reply_del(key_expr)).unwrap() }) + }); + } + "err" => { + let rep = Value::from(vec![0u8; size]); + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current() + .block_on(async { ztimeout!(query.reply_err(rep)).unwrap() }) + }); + } + _ => panic!("Unknown query parameter"), + } + })) .unwrap(); // Wait for the declaration to propagate @@ -191,7 +181,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let mut cnt = 0; for _ in 0..msg_count { let selector = format!("{}?ok_put", key_expr); - let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); + let rs = ztimeout!(peer02.get(selector)).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { let s = s.result().unwrap(); assert_eq!(s.kind(), SampleKind::Put); @@ -209,7 +199,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let mut cnt = 0; for _ in 0..msg_count { let selector = format!("{}?ok_del", key_expr); - let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); + let rs = ztimeout!(peer02.get(selector)).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { let s = s.result().unwrap(); assert_eq!(s.kind(), SampleKind::Delete); @@ -227,7 +217,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let mut cnt = 0; for _ in 0..msg_count { let selector = format!("{}?err", key_expr); - let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); + let rs = ztimeout!(peer02.get(selector)).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { let e = s.result().unwrap_err(); assert_eq!(e.payload().len(), size); @@ -239,7 +229,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re assert_eq!(cnt, msg_count); println!("[PS][03c] Unqueryable on peer01 session"); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + ztimeout!(qbl.undeclare()).unwrap(); // Wait for the declaration to propagate tokio::time::sleep(SLEEP).await; @@ -264,6 +254,7 @@ async fn zenoh_session_multicast() { close_session(peer01, peer02).await; } +#[cfg(feature = "unstable")] async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) { // Open the sessions let mut config = config::peer(); @@ -287,15 +278,16 @@ async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) (r1, r2) } +#[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_2sessions_1runtime_init() { let (r1, r2) = open_session_unicast_runtime(&["tcp/127.0.0.1:17449"]).await; println!("[RI][02a] Creating peer01 session from runtime 1"); - let peer01 = zenoh::session::init(r1.clone()).res_async().await.unwrap(); + let peer01 = zenoh::session::init(r1.clone()).await.unwrap(); println!("[RI][02b] Creating peer02 session from runtime 2"); - let peer02 = zenoh::session::init(r2.clone()).res_async().await.unwrap(); + let peer02 = zenoh::session::init(r2.clone()).await.unwrap(); println!("[RI][02c] Creating peer01a session from runtime 1"); - let peer01a = zenoh::session::init(r1.clone()).res_async().await.unwrap(); + let peer01a = zenoh::session::init(r1.clone()).await.unwrap(); println!("[RI][03c] Closing peer01a session"); std::mem::drop(peer01a); test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index a7bc481e27..ec77890c1e 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -17,7 +17,7 @@ mod tests { use std::sync::Arc; use std::time::Duration; use zenoh::internal::ztimeout; - use zenoh::prelude::r#async::*; + use zenoh::prelude::*; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); @@ -35,7 +35,7 @@ mod tests { config.scouting.multicast.set_enabled(Some(false)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][01a] Opening peer01 session: {:?}", endpoints); - let peer01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); config.connect.endpoints = endpoints @@ -45,7 +45,7 @@ mod tests { config.scouting.multicast.set_enabled(Some(false)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][02a] Opening peer02 session: {:?}", endpoints); - let peer02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer02 = ztimeout!(zenoh::open(config)).unwrap(); (peer01, peer02) } @@ -57,23 +57,23 @@ mod tests { config.scouting.multicast.set_enabled(Some(true)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][01a] Opening peer01 session: {}", endpoint01); - let peer01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); config.listen.endpoints = vec![endpoint02.parse().unwrap()]; config.scouting.multicast.set_enabled(Some(true)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][02a] Opening peer02 session: {}", endpoint02); - let peer02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer02 = ztimeout!(zenoh::open(config)).unwrap(); (peer01, peer02) } async fn close_session(peer01: Session, peer02: Session) { println!("[ ][01d] Closing peer02 session"); - ztimeout!(peer01.close().res_async()).unwrap(); + ztimeout!(peer01.close()).unwrap(); println!("[ ][02d] Closing peer02 session"); - ztimeout!(peer02.close().res_async()).unwrap(); + ztimeout!(peer02.close()).unwrap(); } async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Reliability) { @@ -96,8 +96,7 @@ mod tests { .callback(move |sample| { assert_eq!(sample.payload().len(), size); c_msgs.fetch_add(1, Ordering::Relaxed); - }) - .res_async()) + })) .unwrap(); // Wait for the declaration to propagate @@ -135,8 +134,7 @@ mod tests { // Publish this message ztimeout!(peer02 .put(&key_expr, sbuf) - .congestion_control(CongestionControl::Block) - .res_async()) + .congestion_control(CongestionControl::Block)) .unwrap(); println!("{c} putted"); } diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index fcddcf3b3e..c5be555a00 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -16,7 +16,7 @@ use std::sync::Arc; use std::time::Duration; use tokio::runtime::Handle; use zenoh::internal::ztimeout; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); @@ -29,14 +29,14 @@ async fn open_p2p_sessions() -> (Session, Session, Session) { config.listen.endpoints = vec!["tcp/127.0.0.1:27447".parse().unwrap()]; config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Opening s01 session"); - let s01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); config.listen.endpoints = vec!["tcp/127.0.0.1:27448".parse().unwrap()]; config.connect.endpoints = vec!["tcp/127.0.0.1:27447".parse().unwrap()]; config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][02a] Opening s02 session"); - let s02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); config.connect.endpoints = vec![ @@ -45,7 +45,7 @@ async fn open_p2p_sessions() -> (Session, Session, Session) { ]; config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][03a] Opening s03 session"); - let s03 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s03 = ztimeout!(zenoh::open(config)).unwrap(); (s01, s02, s03) } @@ -57,38 +57,38 @@ async fn open_router_session() -> Session { config.listen.endpoints = vec!["tcp/127.0.0.1:37447".parse().unwrap()]; config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][00a] Opening router session"); - ztimeout!(zenoh::open(config).res_async()).unwrap() + ztimeout!(zenoh::open(config)).unwrap() } async fn close_router_session(s: Session) { println!("[ ][01d] Closing router session"); - ztimeout!(s.close().res_async()).unwrap(); + ztimeout!(s.close()).unwrap(); } async fn open_client_sessions() -> (Session, Session, Session) { // Open the sessions let config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); println!("[ ][01a] Opening s01 session"); - let s01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); let config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); println!("[ ][02a] Opening s02 session"); - let s02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); let config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); println!("[ ][03a] Opening s03 session"); - let s03 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s03 = ztimeout!(zenoh::open(config)).unwrap(); (s01, s02, s03) } async fn close_sessions(s01: Session, s02: Session, s03: Session) { println!("[ ][01d] Closing s01 session"); - ztimeout!(s01.close().res_async()).unwrap(); + ztimeout!(s01.close()).unwrap(); println!("[ ][02d] Closing s02 session"); - ztimeout!(s02.close().res_async()).unwrap(); + ztimeout!(s02.close()).unwrap(); println!("[ ][03d] Closing s03 session"); - ztimeout!(s03.close().res_async()).unwrap(); + ztimeout!(s03.close()).unwrap(); } async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { @@ -104,25 +104,19 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { // Subscribe to data println!("[PS][01b] Subscribing on s01 session"); let c_msgs1 = msgs1.clone(); - let sub1 = ztimeout!(s01 - .declare_subscriber(key_expr) - .callback(move |sample| { - assert_eq!(sample.payload().len(), size); - c_msgs1.fetch_add(1, Ordering::Relaxed); - }) - .res_async()) + let sub1 = ztimeout!(s01.declare_subscriber(key_expr).callback(move |sample| { + assert_eq!(sample.payload().len(), size); + c_msgs1.fetch_add(1, Ordering::Relaxed); + })) .unwrap(); // Subscribe to data println!("[PS][02b] Subscribing on s02 session"); let c_msgs2 = msgs2.clone(); - let sub2 = ztimeout!(s02 - .declare_subscriber(key_expr) - .callback(move |sample| { - assert_eq!(sample.payload().len(), size); - c_msgs2.fetch_add(1, Ordering::Relaxed); - }) - .res_async()) + let sub2 = ztimeout!(s02.declare_subscriber(key_expr).callback(move |sample| { + assert_eq!(sample.payload().len(), size); + c_msgs2.fetch_add(1, Ordering::Relaxed); + })) .unwrap(); // Wait for the declaration to propagate @@ -133,8 +127,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { for _ in 0..msg_count { ztimeout!(s03 .put(key_expr, vec![0u8; size]) - .congestion_control(CongestionControl::Block) - .res_async()) + .congestion_control(CongestionControl::Block)) .unwrap(); } @@ -162,10 +155,10 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { assert_eq!(cnt2, msg_count); println!("[PS][02b] Unsubscribing on s02 session"); - ztimeout!(sub2.undeclare().res_async()).unwrap(); + ztimeout!(sub2.undeclare()).unwrap(); println!("[PS][01b] Unsubscribing on s01 session"); - ztimeout!(sub1.undeclare().res_async()).unwrap(); + ztimeout!(sub1.undeclare()).unwrap(); // Wait for the declaration to propagate tokio::time::sleep(SLEEP).await; @@ -186,42 +179,34 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { println!("[QR][01c] Queryable on s01 session"); let cke = key_expr.clone(); let c_msgs1 = msgs1.clone(); - let qbl1 = ztimeout!(s01 - .declare_queryable(cke.clone()) - .callback(move |sample| { - c_msgs1.fetch_add(1, Ordering::Relaxed); - tokio::task::block_in_place({ - let cke2 = cke.clone(); - move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(cke2.clone(), vec![0u8; size]).res_async()) - .unwrap() - }); - } - }); - }) - .res_async()) + let qbl1 = ztimeout!(s01.declare_queryable(cke.clone()).callback(move |sample| { + c_msgs1.fetch_add(1, Ordering::Relaxed); + tokio::task::block_in_place({ + let cke2 = cke.clone(); + move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(cke2.clone(), vec![0u8; size])).unwrap() + }); + } + }); + })) .unwrap(); // Queryable to data println!("[QR][02c] Queryable on s02 session"); let cke = key_expr.clone(); let c_msgs2 = msgs2.clone(); - let qbl2 = ztimeout!(s02 - .declare_queryable(cke.clone()) - .callback(move |sample| { - c_msgs2.fetch_add(1, Ordering::Relaxed); - tokio::task::block_in_place({ - let cke2 = cke.clone(); - move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(cke2.clone(), vec![0u8; size]).res_async()) - .unwrap() - }); - } - }); - }) - .res_async()) + let qbl2 = ztimeout!(s02.declare_queryable(cke.clone()).callback(move |sample| { + c_msgs2.fetch_add(1, Ordering::Relaxed); + tokio::task::block_in_place({ + let cke2 = cke.clone(); + move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(cke2.clone(), vec![0u8; size])).unwrap() + }); + } + }); + })) .unwrap(); // Wait for the declaration to propagate @@ -232,7 +217,7 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { let cke = key_expr.clone(); let mut cnt = 0; for _ in 0..msg_count { - let rs = ztimeout!(s03.get(cke.clone()).res_async()).unwrap(); + let rs = ztimeout!(s03.get(cke.clone())).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { assert_eq!(s.result().unwrap().payload().len(), size); cnt += 1; @@ -248,10 +233,10 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { assert_eq!(cnt, msg_count); println!("[PS][01c] Unqueryable on s01 session"); - ztimeout!(qbl1.undeclare().res_async()).unwrap(); + ztimeout!(qbl1.undeclare()).unwrap(); println!("[PS][02c] Unqueryable on s02 session"); - ztimeout!(qbl2.undeclare().res_async()).unwrap(); + ztimeout!(qbl2.undeclare()).unwrap(); // Wait for the declaration to propagate tokio::time::sleep(SLEEP).await; diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index d8fed7eeb4..cabee33333 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -19,7 +19,6 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; use zenoh::config::EndPoint; use zenoh::config::{Config, ModeDependentValue, PermissionsConf, ValidatedMap}; -use zenoh::core::AsyncResolve; use zenoh::core::Result; use zenoh::scouting::WhatAmI; @@ -107,7 +106,7 @@ fn main() { let config = config_from_args(&args); tracing::info!("Initial conf: {}", &config); - let _session = match zenoh::open(config).res_async().await { + let _session = match zenoh::open(config).await { Ok(runtime) => runtime, Err(e) => { println!("{e}. Exiting..."); From b1b1e91011072402744836ce0d66160d710bbc8b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 29 Apr 2024 16:41:03 +0200 Subject: [PATCH 308/598] missing API items reexported (#989) --- zenoh/src/lib.rs | 52 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 6f679407c8..c4247b73da 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -181,6 +181,7 @@ pub mod key_expr { pub use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; } pub use crate::api::key_expr::KeyExpr; + pub use crate::api::key_expr::KeyExprUndeclaration; pub use zenoh_keyexpr::keyexpr; pub use zenoh_keyexpr::OwnedKeyExpr; pub use zenoh_keyexpr::SetIntersectionLevel; @@ -202,24 +203,42 @@ pub mod session { #[doc(hidden)] pub use crate::api::session::init; pub use crate::api::session::open; + #[zenoh_macros::unstable] + #[doc(hidden)] + pub use crate::api::session::InitBuilder; + pub use crate::api::session::OpenBuilder; pub use crate::api::session::Session; pub use crate::api::session::SessionDeclarations; pub use crate::api::session::SessionRef; + pub use crate::api::session::Undeclarable; +} + +/// Tools to access information about the current zenoh [`Session`](crate::Session). +pub mod info { + pub use crate::api::info::PeersZidBuilder; + pub use crate::api::info::RoutersZidBuilder; + pub use crate::api::info::SessionInfo; + pub use crate::api::info::ZidBuilder; } /// Sample primitives pub mod sample { pub use crate::api::builders::sample::QoSBuilderTrait; pub use crate::api::builders::sample::SampleBuilder; + pub use crate::api::builders::sample::SampleBuilderAny; + pub use crate::api::builders::sample::SampleBuilderDelete; + pub use crate::api::builders::sample::SampleBuilderPut; pub use crate::api::builders::sample::SampleBuilderTrait; pub use crate::api::builders::sample::TimestampBuilderTrait; pub use crate::api::builders::sample::ValueBuilderTrait; #[zenoh_macros::unstable] pub use crate::api::sample::Locality; pub use crate::api::sample::Sample; + pub use crate::api::sample::SampleFields; pub use crate::api::sample::SampleKind; #[zenoh_macros::unstable] pub use crate::api::sample::SourceInfo; + pub use crate::api::sample::SourceSn; } /// Value primitives @@ -235,10 +254,14 @@ pub mod encoding { /// Payload primitives pub mod bytes { pub use crate::api::bytes::Deserialize; + pub use crate::api::bytes::OptionZBytes; pub use crate::api::bytes::Serialize; pub use crate::api::bytes::StringOrBase64; pub use crate::api::bytes::ZBytes; + pub use crate::api::bytes::ZBytesIterator; pub use crate::api::bytes::ZBytesReader; + pub use crate::api::bytes::ZBytesWriter; + pub use crate::api::bytes::ZDeserializeError; pub use crate::api::bytes::ZSerde; } @@ -262,18 +285,31 @@ pub mod subscriber { /// Publishing primitives pub mod publication { + pub use crate::api::builders::publication::PublicationBuilderDelete; + pub use crate::api::builders::publication::PublicationBuilderPut; pub use crate::api::builders::publication::PublisherBuilder; + pub use crate::api::builders::publication::PublisherDeleteBuilder; #[zenoh_macros::unstable] pub use crate::api::publication::MatchingListener; + #[zenoh_macros::unstable] + pub use crate::api::publication::MatchingListenerBuilder; + #[zenoh_macros::unstable] + pub use crate::api::publication::MatchingListenerUndeclaration; + #[zenoh_macros::unstable] + pub use crate::api::publication::MatchingStatus; pub use crate::api::publication::Priority; pub use crate::api::publication::Publisher; #[zenoh_macros::unstable] pub use crate::api::publication::PublisherDeclarations; + #[zenoh_macros::unstable] + pub use crate::api::publication::PublisherRef; + pub use crate::api::publication::PublisherUndeclaration; pub use zenoh_protocol::core::CongestionControl; } /// Query primitives pub mod query { + pub use crate::api::query::GetBuilder; pub use crate::api::query::Reply; #[zenoh_macros::unstable] pub use crate::api::query::ReplyKeyExpr; @@ -287,19 +323,31 @@ pub mod queryable { pub use crate::api::queryable::Query; pub use crate::api::queryable::Queryable; pub use crate::api::queryable::QueryableBuilder; + pub use crate::api::queryable::QueryableUndeclaration; + pub use crate::api::queryable::ReplyBuilder; + pub use crate::api::queryable::ReplyBuilderDelete; + pub use crate::api::queryable::ReplyBuilderPut; + pub use crate::api::queryable::ReplyErrBuilder; + #[zenoh_macros::unstable] + pub use crate::api::queryable::ReplySample; } /// Callback handler trait pub mod handlers { pub use crate::api::handlers::locked; + pub use crate::api::handlers::Callback; + pub use crate::api::handlers::CallbackDrop; pub use crate::api::handlers::DefaultHandler; + pub use crate::api::handlers::FifoChannel; pub use crate::api::handlers::IntoHandler; pub use crate::api::handlers::RingChannel; + pub use crate::api::handlers::RingChannelHandler; } /// Scouting primitives pub mod scouting { pub use crate::api::scouting::scout; + pub use crate::api::scouting::Scout; pub use crate::api::scouting::ScoutBuilder; /// Constants and helpers for zenoh `whatami` flags. pub use zenoh_protocol::core::WhatAmI; @@ -311,8 +359,11 @@ pub mod scouting { #[cfg(feature = "unstable")] pub mod liveliness { pub use crate::api::liveliness::Liveliness; + pub use crate::api::liveliness::LivelinessGetBuilder; pub use crate::api::liveliness::LivelinessSubscriberBuilder; pub use crate::api::liveliness::LivelinessToken; + pub use crate::api::liveliness::LivelinessTokenBuilder; + pub use crate::api::liveliness::LivelinessTokenUndeclaration; } /// Timestamp support @@ -345,6 +396,7 @@ pub mod plugins { pub use crate::api::plugins::PluginsManager; pub use crate::api::plugins::Response; pub use crate::api::plugins::RunningPlugin; + pub use crate::api::plugins::PLUGIN_PREFIX; pub use crate::api::plugins::{RunningPluginTrait, ZenohPlugin}; } From c3f993da4baf385adb04d17f911790ca0becec41 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 29 Apr 2024 18:44:09 +0200 Subject: [PATCH 309/598] Add attachment_mut to Sample --- zenoh/src/api/sample.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 365c2a7728..11bfc92c0b 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -355,6 +355,13 @@ impl Sample { pub fn attachment(&self) -> Option<&ZBytes> { self.attachment.as_ref() } + + /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. + #[zenoh_macros::unstable] + #[inline] + pub fn attachment_mut(&mut self) -> Option<&mut ZBytes> { + self.attachment.as_mut() + } } impl From for Value { From d4218fccddf6e5b52539082dd823a19817b3ef44 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 30 Apr 2024 13:04:05 +0300 Subject: [PATCH 310/598] [skip ci] fix SHM exports in new api export mechanism --- zenoh/src/lib.rs | 39 ++++++++++++++++++++++++++------------- zenoh/src/prelude.rs | 4 ++-- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 2a238ea875..b8be3d905c 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -362,19 +362,32 @@ pub mod internal { #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub mod shm { - pub use zenoh_shm::api::client_storage::SharedMemoryClientStorage; - pub use zenoh_shm::api::provider::shared_memory_provider::{BlockOn, GarbageCollect}; - pub use zenoh_shm::api::provider::shared_memory_provider::{Deallocate, Defragment}; - pub use zenoh_shm::api::provider::types::AllocAlignment; - pub use zenoh_shm::api::provider::types::MemoryLayout; - pub use zenoh_shm::api::slice::zsliceshm::{zsliceshm, ZSliceShm}; - pub use zenoh_shm::api::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; - pub use zenoh_shm::api::{ - protocol_implementations::posix::{ - posix_shared_memory_client::PosixSharedMemoryClient, - posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, - protocol_id::POSIX_PROTOCOL_ID, + pub use zenoh_shm::api::client::{ + shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, + }; + pub use zenoh_shm::api::client_storage::{SharedMemoryClientStorage, GLOBAL_CLIENT_STORAGE}; + pub use zenoh_shm::api::common::types::{ChunkID, ProtocolID, SegmentID}; + pub use zenoh_shm::api::protocol_implementations::posix::{ + posix_shared_memory_client::PosixSharedMemoryClient, + posix_shared_memory_provider_backend::{ + LayoutedPosixSharedMemoryProviderBackendBuilder, PosixSharedMemoryProviderBackend, + PosixSharedMemoryProviderBackendBuilder, }, - provider::shared_memory_provider::SharedMemoryProviderBuilder, + protocol_id::POSIX_PROTOCOL_ID, + }; + pub use zenoh_shm::api::provider::shared_memory_provider::{ + AllocBuilder, AllocLayout, AllocLayoutAlignedBuilder, AllocLayoutBuilder, + AllocLayoutSizedBuilder, AllocPolicy, AsyncAllocPolicy, BlockOn, DeallocEldest, + DeallocOptimal, DeallocYoungest, Deallocate, Defragment, DynamicProtocolID, + ForceDeallocPolicy, GarbageCollect, JustAlloc, ProtocolIDSource, SharedMemoryProvider, + SharedMemoryProviderBuilder, SharedMemoryProviderBuilderBackendID, + SharedMemoryProviderBuilderID, StaticProtocolID, + }; + pub use zenoh_shm::api::provider::types::{ + AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError, + }; + pub use zenoh_shm::api::slice::{ + zsliceshm::{zsliceshm, ZSliceShm}, + zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, }; } diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 17286ddeea..ac60f16c89 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -51,7 +51,7 @@ pub(crate) mod flat { pub use crate::scouting::*; pub use crate::selector::*; pub use crate::session::*; - #[cfg(feature = "shared-memory")] + #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub use crate::shm::*; pub use crate::subscriber::*; pub use crate::time::*; @@ -74,7 +74,7 @@ pub(crate) mod mods { pub use crate::scouting; pub use crate::selector; pub use crate::session; - #[cfg(feature = "shared-memory")] + #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub use crate::shm; pub use crate::subscriber; pub use crate::time; From 3c6327727997cbea4a02a5046f00e9a1e06e0c3f Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 30 Apr 2024 16:02:09 +0200 Subject: [PATCH 311/598] Merge main in protocol changes (#997) * fix(zenoh_runtime): disable atexit on windows (#981) * Revert "fix(zenoh-runtime): zenoh-c DLL crash in `libc::atexit` handler (#972)" This reverts commit 274166d778945be0bb9250944f1374e3c0dfc892. * ci: disable atexit cleanup on Windows * fix: Deny publishing of zenoh-ext-examples (#984) * Fix runtime start calling (#985) * Fix invalid JSON in admin space for static plugins (#988) * Fix admin space: plugins __path__ was invalid JSON (#990) * fix(test): sporadic failures of downsampling test on Windows (#995) * fix: set the minimal sleep interval to 17ms on windows * fixup! fix: set the minimal sleep interval to 17ms on windows * fixup! fix: set the minimal sleep interval to 17ms on windows * Acl fix (#993) * ACL does not intercept messages with no key_expr * Update DEFAULT_CONFIG.json5 --------- Co-authored-by: Yuyuan Yuan Co-authored-by: Mahmoud Mazouz Co-authored-by: Julien Enoch Co-authored-by: OlivierHecart --- DEFAULT_CONFIG.json5 | 2 ++ commons/zenoh-runtime/Cargo.toml | 12 ++----- commons/zenoh-runtime/src/lib.rs | 33 +++-------------- .../tests/operations.rs | 5 ++- .../tests/wildcard.rs | 5 ++- .../src/manager/dynamic_plugin.rs | 2 +- .../src/manager/static_plugin.rs | 2 +- zenoh-ext/examples/Cargo.toml | 1 + zenoh/src/api/session.rs | 19 ++++++---- .../net/routing/interceptor/access_control.rs | 20 +++++------ zenoh/src/net/runtime/adminspace.rs | 9 ++++- zenoh/src/net/runtime/mod.rs | 36 +++---------------- zenoh/src/net/runtime/orchestrator.rs | 2 +- zenoh/tests/interceptors.rs | 9 +++-- zenoh/tests/session.rs | 8 +++-- 15 files changed, 68 insertions(+), 97 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index bd3bbbaf6b..ec9a827777 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -175,6 +175,7 @@ // ], // }, // ], + // /// configure access control (ACL) rules // access_control: { // ///[true/false] acl will be activated only if this is set to true @@ -199,6 +200,7 @@ // }, // ] //}, + /// Configure internal transport parameters transport: { unicast: { diff --git a/commons/zenoh-runtime/Cargo.toml b/commons/zenoh-runtime/Cargo.toml index e3a08a9de8..3625e5036f 100644 --- a/commons/zenoh-runtime/Cargo.toml +++ b/commons/zenoh-runtime/Cargo.toml @@ -18,17 +18,9 @@ ron = { workspace = true } serde = { workspace = true } futures = { workspace = true } lazy_static = { workspace = true } +tokio = { workspace = true, features = ["fs", "io-util", "macros", "net", "rt-multi-thread", "sync", "time"] } +tracing = { workspace = true } zenoh-result = { workspace = true, features = ["std"] } zenoh-protocol = { workspace = true } zenoh-collections = { workspace = true, features = ["std"] } zenoh-macros = { workspace = true } -tokio = { workspace = true, features = [ - "fs", - "io-util", - "macros", - "net", - "rt-multi-thread", - "sync", - "time", -] } -tracing = { workspace = true } diff --git a/commons/zenoh-runtime/src/lib.rs b/commons/zenoh-runtime/src/lib.rs index cb58cac570..dcd46744e6 100644 --- a/commons/zenoh-runtime/src/lib.rs +++ b/commons/zenoh-runtime/src/lib.rs @@ -157,6 +157,8 @@ pub struct ZRuntimePool(HashMap>); impl ZRuntimePool { fn new() -> Self { + // It has been recognized that using atexit within Windows DLL is problematic + #[cfg(not(target_os = "windows"))] // Register a callback to clean the static variables. unsafe { libc::atexit(cleanup); @@ -184,42 +186,17 @@ impl ZRuntimePool { // If there are any blocking tasks spawned by ZRuntimes, the function will block until they return. impl Drop for ZRuntimePool { fn drop(&mut self) { - std::panic::set_hook(Box::new(|_| { - // To suppress the panic error caught in the following `catch_unwind`. - })); - let handles: Vec<_> = self .0 .drain() .filter_map(|(_name, mut rt)| { - rt.take().map(|r| { - // NOTE: The error of the atexit handler in DLL (static lib is fine) - // failing to spawn a new thread in `cleanup` has been identified. - std::panic::catch_unwind(|| { - std::thread::spawn(move || r.shutdown_timeout(Duration::from_secs(1))) - }) - }) + rt.take() + .map(|r| std::thread::spawn(move || r.shutdown_timeout(Duration::from_secs(1)))) }) .collect(); for hd in handles { - match hd { - Ok(handle) => { - if let Err(err) = handle.join() { - tracing::error!( - "The handle failed to join during `ZRuntimePool` drop due to {err:?}" - ); - } - } - Err(err) => { - // WARN: Windows with DLL is expected to panic for the time being. - // Otherwise, report the error. - #[cfg(not(target_os = "windows"))] - tracing::error!("`ZRuntimePool` failed to drop due to {err:?}"); - #[cfg(target_os = "windows")] - tracing::trace!("`ZRuntimePool` failed to drop due to {err:?}"); - } - } + let _ = hd.join(); } } } diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index b5384e13be..61ea53deba 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -70,7 +70,10 @@ async fn test_updates_in_order() { ) .unwrap(); - let runtime = zenoh::runtime::Runtime::new(config).await.unwrap(); + let runtime = zenoh::runtime::RuntimeBuilder::new(config) + .build() + .await + .unwrap(); let storage = zenoh_plugin_storage_manager::StoragesPlugin::start("storage-manager", &runtime).unwrap(); diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index bd38e834d7..f2482da8e5 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -71,7 +71,10 @@ async fn test_wild_card_in_order() { ) .unwrap(); - let runtime = zenoh::runtime::Runtime::new(config).await.unwrap(); + let runtime = zenoh::runtime::RuntimeBuilder::new(config) + .build() + .await + .unwrap(); let storage = zenoh_plugin_storage_manager::StoragesPlugin::start("storage-manager", &runtime).unwrap(); diff --git a/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs b/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs index 90008aad36..a8a78306ea 100644 --- a/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs +++ b/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs @@ -142,7 +142,7 @@ impl PluginStatus if let Some(starter) = &self.starter { starter.path() } else { - "" + "__not_loaded__" } } fn state(&self) -> PluginState { diff --git a/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs b/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs index 6d1bcae278..c275fb9818 100644 --- a/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs +++ b/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs @@ -51,7 +51,7 @@ where Some(P::PLUGIN_LONG_VERSION) } fn path(&self) -> &str { - "" + "__static_lib__" } fn state(&self) -> PluginState { self.instance diff --git a/zenoh-ext/examples/Cargo.toml b/zenoh-ext/examples/Cargo.toml index 3493016835..9cca8848ff 100644 --- a/zenoh-ext/examples/Cargo.toml +++ b/zenoh-ext/examples/Cargo.toml @@ -22,6 +22,7 @@ edition = { workspace = true } license = { workspace = true } categories = { workspace = true } description = "Internal crate for zenoh" +publish = false [badges] maintenance = { status = "actively-developed" } diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index dea322419c..703fca2e9d 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -31,7 +31,11 @@ use super::{ value::Value, Id, }; -use crate::net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}; +use crate::net::{ + primitives::Primitives, + routing::dispatcher::face::Face, + runtime::{Runtime, RuntimeBuilder}, +}; use std::future::IntoFuture; use std::{ collections::HashMap, @@ -842,12 +846,13 @@ impl Session { tracing::debug!("Config: {:?}", &config); let aggregated_subscribers = config.aggregation().subscribers().clone(); let aggregated_publishers = config.aggregation().publishers().clone(); - let mut runtime = Runtime::init( - config, - #[cfg(all(feature = "unstable", feature = "shared-memory"))] - shm_clients, - ) - .await?; + #[allow(unused_mut)] // Required for shared-memory + let mut runtime = RuntimeBuilder::new(config); + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + { + runtime = runtime.shm_clients(shm_clients); + } + let mut runtime = runtime.build().await?; let mut session = Self::init( runtime.clone(), diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index b23db9765e..102e30a0df 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -162,14 +162,14 @@ impl InterceptorTrait for IngressAclEnforcer { None } }) - .or_else(|| ctx.full_expr())?; + .or_else(|| ctx.full_expr()); match &ctx.msg.body { NetworkBody::Push(Push { payload: PushBody::Put(_), .. }) => { - if self.action(Action::Put, "Put (ingress)", key_expr) == Permission::Deny { + if self.action(Action::Put, "Put (ingress)", key_expr?) == Permission::Deny { return None; } } @@ -177,7 +177,7 @@ impl InterceptorTrait for IngressAclEnforcer { payload: RequestBody::Query(_), .. }) => { - if self.action(Action::Get, "Get (ingress)", key_expr) == Permission::Deny { + if self.action(Action::Get, "Get (ingress)", key_expr?) == Permission::Deny { return None; } } @@ -188,7 +188,7 @@ impl InterceptorTrait for IngressAclEnforcer { if self.action( Action::DeclareSubscriber, "Declare Subscriber (ingress)", - key_expr, + key_expr?, ) == Permission::Deny { return None; @@ -201,7 +201,7 @@ impl InterceptorTrait for IngressAclEnforcer { if self.action( Action::DeclareQueryable, "Declare Queryable (ingress)", - key_expr, + key_expr?, ) == Permission::Deny { return None; @@ -230,14 +230,14 @@ impl InterceptorTrait for EgressAclEnforcer { None } }) - .or_else(|| ctx.full_expr())?; + .or_else(|| ctx.full_expr()); match &ctx.msg.body { NetworkBody::Push(Push { payload: PushBody::Put(_), .. }) => { - if self.action(Action::Put, "Put (egress)", key_expr) == Permission::Deny { + if self.action(Action::Put, "Put (egress)", key_expr?) == Permission::Deny { return None; } } @@ -245,7 +245,7 @@ impl InterceptorTrait for EgressAclEnforcer { payload: RequestBody::Query(_), .. }) => { - if self.action(Action::Get, "Get (egress)", key_expr) == Permission::Deny { + if self.action(Action::Get, "Get (egress)", key_expr?) == Permission::Deny { return None; } } @@ -256,7 +256,7 @@ impl InterceptorTrait for EgressAclEnforcer { if self.action( Action::DeclareSubscriber, "Declare Subscriber (egress)", - key_expr, + key_expr?, ) == Permission::Deny { return None; @@ -269,7 +269,7 @@ impl InterceptorTrait for EgressAclEnforcer { if self.action( Action::DeclareQueryable, "Declare Queryable (egress)", - key_expr, + key_expr?, ) == Permission::Deny { return None; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 9ea54b8d88..3f2e0b488f 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -793,7 +793,14 @@ fn plugins_status(context: &AdminContext, query: Query) { with_extended_string(plugin_key, &["/__path__"], |plugin_path_key| { if let Ok(key_expr) = KeyExpr::try_from(plugin_path_key.clone()) { if query.key_expr().intersects(&key_expr) { - if let Err(e) = query.reply(key_expr, plugin.path()).wait() { + if let Err(e) = query + .reply( + key_expr, + serde_json::to_string(plugin.path()) + .unwrap_or_else(|_| String::from("{}")), + ) + .wait() + { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 4991844650..f1cf4d95d2 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -184,6 +184,11 @@ impl RuntimeBuilder { *handler.runtime.write().unwrap() = Runtime::downgrade(&runtime); get_mut_unchecked(&mut runtime.state.router.clone()).init_link_state(runtime.clone()); + // Admin space + if start_admin_space { + AdminSpace::start(&runtime, LONG_VERSION.clone()).await; + } + // Start plugins #[cfg(all(feature = "unstable", feature = "plugins"))] start_plugins(&runtime); @@ -215,11 +220,6 @@ impl RuntimeBuilder { } }); - // Admin space - if start_admin_space { - AdminSpace::start(&runtime, LONG_VERSION.clone()).await; - } - Ok(runtime) } } @@ -241,32 +241,6 @@ impl StructVersion for Runtime { impl PluginStartArgs for Runtime {} impl Runtime { - pub async fn new(config: Config) -> ZResult { - // Create plugin_manager and load plugins - let mut runtime = Runtime::init( - config, - #[cfg(all(feature = "unstable", feature = "shared-memory"))] - None, - ) - .await?; - match runtime.start().await { - Ok(()) => Ok(runtime), - Err(err) => Err(err), - } - } - - pub(crate) async fn init( - config: Config, - #[cfg(all(feature = "unstable", feature = "shared-memory"))] shm_clients: Option< - Arc, - >, - ) -> ZResult { - let builder = RuntimeBuilder::new(config); - #[cfg(all(feature = "unstable", feature = "shared-memory"))] - let builder = builder.shm_clients(shm_clients); - builder.build().await - } - #[inline(always)] pub(crate) fn manager(&self) -> &TransportManager { &self.state.manager diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index 687fa90649..c2c7ecedd2 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -43,7 +43,7 @@ pub enum Loop { } impl Runtime { - pub(crate) async fn start(&mut self) -> ZResult<()> { + pub async fn start(&mut self) -> ZResult<()> { match self.whatami() { WhatAmI::Client => self.start_client().await, WhatAmI::Peer => self.start_peer().await, diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index f6e876d92e..37f193630d 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -15,6 +15,11 @@ use std::sync::{Arc, Mutex}; use zenoh::internal::zlock; use zenoh::prelude::*; +#[cfg(target_os = "windows")] +static MINIMAL_SLEEP_INTERVAL_MS: u64 = 17; +#[cfg(not(target_os = "windows"))] +static MINIMAL_SLEEP_INTERVAL_MS: u64 = 2; + struct IntervalCounter { first_tick: bool, last_time: std::time::Instant, @@ -143,7 +148,7 @@ fn downsampling_by_keyexpr_impl(egress: bool) { .unwrap(); // WARN(yuyuan): 2 ms is the limit of tokio - let interval = std::time::Duration::from_millis(2); + let interval = std::time::Duration::from_millis(MINIMAL_SLEEP_INTERVAL_MS); let messages_count = 1000; for i in 0..messages_count { publisher_r100.put(format!("message {}", i)).wait().unwrap(); @@ -245,7 +250,7 @@ fn downsampling_by_interface_impl(egress: bool) { .unwrap(); // WARN(yuyuan): 2 ms is the limit of tokio - let interval = std::time::Duration::from_millis(2); + let interval = std::time::Duration::from_millis(MINIMAL_SLEEP_INTERVAL_MS); let messages_count = 1000; for i in 0..messages_count { publisher_r100.put(format!("message {}", i)).wait().unwrap(); diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index b52dbb90b8..43dfc79470 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -17,7 +17,7 @@ use std::time::Duration; use zenoh::internal::ztimeout; use zenoh::prelude::*; #[cfg(feature = "unstable")] -use zenoh::runtime::Runtime; +use zenoh::runtime::{Runtime, RuntimeBuilder}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); @@ -264,7 +264,8 @@ async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) .collect::>(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Creating r1 session runtime: {:?}", endpoints); - let r1 = Runtime::new(config).await.unwrap(); + let mut r1 = RuntimeBuilder::new(config).build().await.unwrap(); + r1.start().await.unwrap(); let mut config = config::peer(); config.connect.endpoints = endpoints @@ -273,7 +274,8 @@ async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) .collect::>(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][02a] Creating r2 session runtime: {:?}", endpoints); - let r2 = Runtime::new(config).await.unwrap(); + let mut r2 = RuntimeBuilder::new(config).build().await.unwrap(); + r2.start().await.unwrap(); (r1, r2) } From 8fe5ab5852fdaa18a88f450d4531d9fbbfbc7531 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 30 Apr 2024 16:02:50 +0200 Subject: [PATCH 312/598] Rename ZidBuilder to ZenohIdBuilder (#994) --- zenoh/src/api/info.rs | 36 ++++++++++++++++++------------------ zenoh/src/lib.rs | 6 +++--- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index a6f8ff1629..205a412142 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -33,21 +33,21 @@ use zenoh_protocol::core::{WhatAmI, ZenohId}; /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct ZidBuilder<'a> { +pub struct ZenohIdBuilder<'a> { pub(crate) session: SessionRef<'a>, } -impl<'a> Resolvable for ZidBuilder<'a> { +impl<'a> Resolvable for ZenohIdBuilder<'a> { type To = ZenohId; } -impl<'a> Wait for ZidBuilder<'a> { +impl<'a> Wait for ZenohIdBuilder<'a> { fn wait(self) -> Self::To { self.session.runtime.zid() } } -impl<'a> IntoFuture for ZidBuilder<'a> { +impl<'a> IntoFuture for ZenohIdBuilder<'a> { type Output = ::To; type IntoFuture = Ready<::To>; @@ -73,15 +73,15 @@ impl<'a> IntoFuture for ZidBuilder<'a> { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct RoutersZidBuilder<'a> { +pub struct RoutersZenohIdBuilder<'a> { pub(crate) session: SessionRef<'a>, } -impl<'a> Resolvable for RoutersZidBuilder<'a> { +impl<'a> Resolvable for RoutersZenohIdBuilder<'a> { type To = Box + Send + Sync>; } -impl<'a> Wait for RoutersZidBuilder<'a> { +impl<'a> Wait for RoutersZenohIdBuilder<'a> { fn wait(self) -> Self::To { Box::new( zenoh_runtime::ZRuntime::Application @@ -97,7 +97,7 @@ impl<'a> Wait for RoutersZidBuilder<'a> { } } -impl<'a> IntoFuture for RoutersZidBuilder<'a> { +impl<'a> IntoFuture for RoutersZenohIdBuilder<'a> { type Output = ::To; type IntoFuture = Ready<::To>; @@ -123,15 +123,15 @@ impl<'a> IntoFuture for RoutersZidBuilder<'a> { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct PeersZidBuilder<'a> { +pub struct PeersZenohIdBuilder<'a> { pub(crate) session: SessionRef<'a>, } -impl<'a> Resolvable for PeersZidBuilder<'a> { +impl<'a> Resolvable for PeersZenohIdBuilder<'a> { type To = Box + Send + Sync>; } -impl<'a> Wait for PeersZidBuilder<'a> { +impl<'a> Wait for PeersZenohIdBuilder<'a> { fn wait(self) -> ::To { Box::new( zenoh_runtime::ZRuntime::Application @@ -147,7 +147,7 @@ impl<'a> Wait for PeersZidBuilder<'a> { } } -impl<'a> IntoFuture for PeersZidBuilder<'a> { +impl<'a> IntoFuture for PeersZenohIdBuilder<'a> { type Output = ::To; type IntoFuture = Ready<::To>; @@ -187,8 +187,8 @@ impl SessionInfo<'_> { /// let zid = session.info().zid().await; /// # } /// ``` - pub fn zid(&self) -> ZidBuilder<'_> { - ZidBuilder { + pub fn zid(&self) -> ZenohIdBuilder<'_> { + ZenohIdBuilder { session: self.session.clone(), } } @@ -207,8 +207,8 @@ impl SessionInfo<'_> { /// while let Some(router_zid) = routers_zid.next() {} /// # } /// ``` - pub fn routers_zid(&self) -> RoutersZidBuilder<'_> { - RoutersZidBuilder { + pub fn routers_zid(&self) -> RoutersZenohIdBuilder<'_> { + RoutersZenohIdBuilder { session: self.session.clone(), } } @@ -226,8 +226,8 @@ impl SessionInfo<'_> { /// while let Some(peer_zid) = peers_zid.next() {} /// # } /// ``` - pub fn peers_zid(&self) -> PeersZidBuilder<'_> { - PeersZidBuilder { + pub fn peers_zid(&self) -> PeersZenohIdBuilder<'_> { + PeersZenohIdBuilder { session: self.session.clone(), } } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c4247b73da..71ab3a72e8 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -215,10 +215,10 @@ pub mod session { /// Tools to access information about the current zenoh [`Session`](crate::Session). pub mod info { - pub use crate::api::info::PeersZidBuilder; - pub use crate::api::info::RoutersZidBuilder; + pub use crate::api::info::PeersZenohIdBuilder; + pub use crate::api::info::RoutersZenohIdBuilder; pub use crate::api::info::SessionInfo; - pub use crate::api::info::ZidBuilder; + pub use crate::api::info::ZenohIdBuilder; } /// Sample primitives From bca2fd74ca5bdc132e194ef62d56429580d852bc Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 30 Apr 2024 18:10:38 +0300 Subject: [PATCH 313/598] Massive renaming for ZSliceShm and ZSliceShmMut --- .../src/api/{slice => buffer}/mod.rs | 4 +- .../src/api/{slice => buffer}/traits.rs | 0 .../{slice/zsliceshm.rs => buffer/zshm.rs} | 76 ++++++++-------- .../zsliceshmmut.rs => buffer/zshmmut.rs} | 86 +++++++++---------- commons/zenoh-shm/src/api/mod.rs | 2 +- .../api/provider/shared_memory_provider.rs | 12 +-- commons/zenoh-shm/src/api/provider/types.rs | 4 +- examples/examples/z_get_shm.rs | 2 +- examples/examples/z_payload_shm.rs | 36 ++++---- examples/examples/z_ping_shm.rs | 2 +- examples/examples/z_queryable_shm.rs | 2 +- examples/examples/z_sub_shm.rs | 6 +- zenoh/src/api/bytes.rs | 48 +++++------ zenoh/src/api/encoding.rs | 6 +- zenoh/src/lib.rs | 8 +- zenoh/tests/bytes.rs | 32 +++---- 16 files changed, 163 insertions(+), 163 deletions(-) rename commons/zenoh-shm/src/api/{slice => buffer}/mod.rs (92%) rename commons/zenoh-shm/src/api/{slice => buffer}/traits.rs (100%) rename commons/zenoh-shm/src/api/{slice/zsliceshm.rs => buffer/zshm.rs} (59%) rename commons/zenoh-shm/src/api/{slice/zsliceshmmut.rs => buffer/zshmmut.rs} (59%) diff --git a/commons/zenoh-shm/src/api/slice/mod.rs b/commons/zenoh-shm/src/api/buffer/mod.rs similarity index 92% rename from commons/zenoh-shm/src/api/slice/mod.rs rename to commons/zenoh-shm/src/api/buffer/mod.rs index 59c793f94a..8a3e040da9 100644 --- a/commons/zenoh-shm/src/api/slice/mod.rs +++ b/commons/zenoh-shm/src/api/buffer/mod.rs @@ -13,5 +13,5 @@ // pub mod traits; -pub mod zsliceshm; -pub mod zsliceshmmut; +pub mod zshm; +pub mod zshmmut; diff --git a/commons/zenoh-shm/src/api/slice/traits.rs b/commons/zenoh-shm/src/api/buffer/traits.rs similarity index 100% rename from commons/zenoh-shm/src/api/slice/traits.rs rename to commons/zenoh-shm/src/api/buffer/traits.rs diff --git a/commons/zenoh-shm/src/api/slice/zsliceshm.rs b/commons/zenoh-shm/src/api/buffer/zshm.rs similarity index 59% rename from commons/zenoh-shm/src/api/slice/zsliceshm.rs rename to commons/zenoh-shm/src/api/buffer/zshm.rs index 86f4395ebb..e7cf2a3197 100644 --- a/commons/zenoh-shm/src/api/slice/zsliceshm.rs +++ b/commons/zenoh-shm/src/api/buffer/zshm.rs @@ -22,43 +22,43 @@ use zenoh_buffers::{ZBuf, ZSlice}; use crate::SharedMemoryBuf; -use super::{traits::SHMBuf, zsliceshmmut::zsliceshmmut}; +use super::{traits::SHMBuf, zshmmut::zshmmut}; -/// An immutable SHM slice +/// An immutable SHM buffer #[zenoh_macros::unstable_doc] #[repr(transparent)] #[derive(Clone, Debug, PartialEq, Eq)] -pub struct ZSliceShm(pub(crate) SharedMemoryBuf); +pub struct ZShm(pub(crate) SharedMemoryBuf); -impl SHMBuf for ZSliceShm { +impl SHMBuf for ZShm { fn is_valid(&self) -> bool { self.0.is_valid() } } -impl PartialEq<&zsliceshm> for ZSliceShm { - fn eq(&self, other: &&zsliceshm) -> bool { +impl PartialEq<&zshm> for ZShm { + fn eq(&self, other: &&zshm) -> bool { self.0 == other.0 .0 } } -impl Borrow for ZSliceShm { - fn borrow(&self) -> &zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl Borrow for ZShm { + fn borrow(&self) -> &zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl BorrowMut for ZSliceShm { - fn borrow_mut(&mut self) -> &mut zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl BorrowMut for ZShm { + fn borrow_mut(&mut self) -> &mut zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl Deref for ZSliceShm { +impl Deref for ZShm { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -66,37 +66,37 @@ impl Deref for ZSliceShm { } } -impl AsRef<[u8]> for ZSliceShm { +impl AsRef<[u8]> for ZShm { fn as_ref(&self) -> &[u8] { self } } -impl From for ZSliceShm { +impl From for ZShm { fn from(value: SharedMemoryBuf) -> Self { Self(value) } } -impl From for ZSlice { - fn from(value: ZSliceShm) -> Self { +impl From for ZSlice { + fn from(value: ZShm) -> Self { value.0.into() } } -impl From for ZBuf { - fn from(value: ZSliceShm) -> Self { +impl From for ZBuf { + fn from(value: ZShm) -> Self { value.0.into() } } -impl TryFrom<&mut ZSliceShm> for &mut zsliceshmmut { +impl TryFrom<&mut ZShm> for &mut zshmmut { type Error = (); - fn try_from(value: &mut ZSliceShm) -> Result { + fn try_from(value: &mut ZShm) -> Result { match value.0.is_unique() && value.0.is_valid() { true => { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction Ok(unsafe { core::mem::transmute(value) }) } @@ -105,64 +105,64 @@ impl TryFrom<&mut ZSliceShm> for &mut zsliceshmmut { } } -/// A borrowed immutable SHM slice +/// A borrowed immutable SHM buffer #[zenoh_macros::unstable_doc] #[derive(Debug, PartialEq, Eq)] #[allow(non_camel_case_types)] #[repr(transparent)] -pub struct zsliceshm(ZSliceShm); +pub struct zshm(ZShm); -impl ToOwned for zsliceshm { - type Owned = ZSliceShm; +impl ToOwned for zshm { + type Owned = ZShm; fn to_owned(&self) -> Self::Owned { self.0.clone() } } -impl PartialEq for &zsliceshm { - fn eq(&self, other: &ZSliceShm) -> bool { +impl PartialEq for &zshm { + fn eq(&self, other: &ZShm) -> bool { self.0 .0 == other.0 } } -impl Deref for zsliceshm { - type Target = ZSliceShm; +impl Deref for zshm { + type Target = ZShm; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for zsliceshm { +impl DerefMut for zshm { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl From<&SharedMemoryBuf> for &zsliceshm { +impl From<&SharedMemoryBuf> for &zshm { fn from(value: &SharedMemoryBuf) -> Self { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(value) } } } -impl From<&mut SharedMemoryBuf> for &mut zsliceshm { +impl From<&mut SharedMemoryBuf> for &mut zshm { fn from(value: &mut SharedMemoryBuf) -> Self { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(value) } } } -impl TryFrom<&mut zsliceshm> for &mut zsliceshmmut { +impl TryFrom<&mut zshm> for &mut zshmmut { type Error = (); - fn try_from(value: &mut zsliceshm) -> Result { + fn try_from(value: &mut zshm) -> Result { match value.0 .0.is_unique() && value.0 .0.is_valid() { true => { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction Ok(unsafe { core::mem::transmute(value) }) } diff --git a/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs b/commons/zenoh-shm/src/api/buffer/zshmmut.rs similarity index 59% rename from commons/zenoh-shm/src/api/slice/zsliceshmmut.rs rename to commons/zenoh-shm/src/api/buffer/zshmmut.rs index 62823785da..e40c9c77f1 100644 --- a/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs +++ b/commons/zenoh-shm/src/api/buffer/zshmmut.rs @@ -21,36 +21,36 @@ use crate::SharedMemoryBuf; use super::{ traits::{SHMBuf, SHMBufMut}, - zsliceshm::{zsliceshm, ZSliceShm}, + zshm::{zshm, ZShm}, }; -/// A mutable SHM slice +/// A mutable SHM buffer #[zenoh_macros::unstable_doc] #[derive(Debug, PartialEq, Eq)] #[repr(transparent)] -pub struct ZSliceShmMut(SharedMemoryBuf); +pub struct ZShmMut(SharedMemoryBuf); -impl SHMBuf for ZSliceShmMut { +impl SHMBuf for ZShmMut { fn is_valid(&self) -> bool { self.0.is_valid() } } -impl SHMBufMut for ZSliceShmMut {} +impl SHMBufMut for ZShmMut {} -impl ZSliceShmMut { +impl ZShmMut { pub(crate) unsafe fn new_unchecked(data: SharedMemoryBuf) -> Self { Self(data) } } -impl PartialEq for &ZSliceShmMut { - fn eq(&self, other: &zsliceshmmut) -> bool { +impl PartialEq for &ZShmMut { + fn eq(&self, other: &zshmmut) -> bool { self.0 == other.0 .0 } } -impl TryFrom for ZSliceShmMut { +impl TryFrom for ZShmMut { type Error = SharedMemoryBuf; fn try_from(value: SharedMemoryBuf) -> Result { @@ -61,10 +61,10 @@ impl TryFrom for ZSliceShmMut { } } -impl TryFrom for ZSliceShmMut { - type Error = ZSliceShm; +impl TryFrom for ZShmMut { + type Error = ZShm; - fn try_from(value: ZSliceShm) -> Result { + fn try_from(value: ZShm) -> Result { match value.0.is_unique() && value.0.is_valid() { true => Ok(Self(value.0)), false => Err(value), @@ -72,39 +72,39 @@ impl TryFrom for ZSliceShmMut { } } -impl Borrow for ZSliceShmMut { - fn borrow(&self) -> &zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl Borrow for ZShmMut { + fn borrow(&self) -> &zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl BorrowMut for ZSliceShmMut { - fn borrow_mut(&mut self) -> &mut zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl BorrowMut for ZShmMut { + fn borrow_mut(&mut self) -> &mut zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl Borrow for ZSliceShmMut { - fn borrow(&self) -> &zsliceshmmut { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl Borrow for ZShmMut { + fn borrow(&self) -> &zshmmut { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl BorrowMut for ZSliceShmMut { - fn borrow_mut(&mut self) -> &mut zsliceshmmut { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl BorrowMut for ZShmMut { + fn borrow_mut(&mut self) -> &mut zshmmut { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl Deref for ZSliceShmMut { +impl Deref for ZShmMut { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -112,75 +112,75 @@ impl Deref for ZSliceShmMut { } } -impl DerefMut for ZSliceShmMut { +impl DerefMut for ZShmMut { fn deref_mut(&mut self) -> &mut Self::Target { self.0.as_mut() } } -impl AsRef<[u8]> for ZSliceShmMut { +impl AsRef<[u8]> for ZShmMut { fn as_ref(&self) -> &[u8] { self } } -impl AsMut<[u8]> for ZSliceShmMut { +impl AsMut<[u8]> for ZShmMut { fn as_mut(&mut self) -> &mut [u8] { self } } -impl From for ZSliceShm { - fn from(value: ZSliceShmMut) -> Self { +impl From for ZShm { + fn from(value: ZShmMut) -> Self { value.0.into() } } -impl From for ZSlice { - fn from(value: ZSliceShmMut) -> Self { +impl From for ZSlice { + fn from(value: ZShmMut) -> Self { value.0.into() } } -impl From for ZBuf { - fn from(value: ZSliceShmMut) -> Self { +impl From for ZBuf { + fn from(value: ZShmMut) -> Self { value.0.into() } } -/// A borrowed mutable SHM slice +/// A borrowed mutable SHM buffer #[zenoh_macros::unstable_doc] #[derive(Debug, PartialEq, Eq)] #[allow(non_camel_case_types)] #[repr(transparent)] -pub struct zsliceshmmut(ZSliceShmMut); +pub struct zshmmut(ZShmMut); -impl PartialEq for &zsliceshmmut { - fn eq(&self, other: &ZSliceShmMut) -> bool { +impl PartialEq for &zshmmut { + fn eq(&self, other: &ZShmMut) -> bool { self.0 .0 == other.0 } } -impl Deref for zsliceshmmut { - type Target = ZSliceShmMut; +impl Deref for zshmmut { + type Target = ZShmMut; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for zsliceshmmut { +impl DerefMut for zshmmut { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl TryFrom<&mut SharedMemoryBuf> for &mut zsliceshmmut { +impl TryFrom<&mut SharedMemoryBuf> for &mut zshmmut { type Error = (); fn try_from(value: &mut SharedMemoryBuf) -> Result { match value.is_unique() && value.is_valid() { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction true => Ok(unsafe { core::mem::transmute(value) }), false => Err(()), diff --git a/commons/zenoh-shm/src/api/mod.rs b/commons/zenoh-shm/src/api/mod.rs index 08a5678fa8..a87188da29 100644 --- a/commons/zenoh-shm/src/api/mod.rs +++ b/commons/zenoh-shm/src/api/mod.rs @@ -12,9 +12,9 @@ // ZettaScale Zenoh Team, // +pub mod buffer; pub mod client; pub mod client_storage; pub mod common; pub mod protocol_implementations; pub mod provider; -pub mod slice; diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs index c3b8128300..82a4789738 100644 --- a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs +++ b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs @@ -23,7 +23,7 @@ use async_trait::async_trait; use zenoh_result::ZResult; use crate::{ - api::{common::types::ProtocolID, slice::zsliceshmmut::ZSliceShmMut}, + api::{buffer::zshmmut::ZShmMut, common::types::ProtocolID}, header::{ allocated_descriptor::AllocatedHeaderDescriptor, descriptor::HeaderDescriptor, storage::GLOBAL_HEADER_STORAGE, @@ -713,11 +713,11 @@ where self.backend.defragment() } - /// Map externally-allocated chunk into ZSliceShmMut. + /// Map externally-allocated chunk into ZShmMut. /// This method is designed to be used with push data sources. /// Remember that chunk's len may be >= len! #[zenoh_macros::unstable_doc] - pub fn map(&self, chunk: AllocatedChunk, len: usize) -> ZResult { + pub fn map(&self, chunk: AllocatedChunk, len: usize) -> ZResult { // allocate resources for SHM buffer let (allocated_header, allocated_watchdog, confirmed_watchdog) = Self::alloc_resources()?; @@ -729,7 +729,7 @@ where allocated_watchdog, confirmed_watchdog, ); - Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + Ok(unsafe { ZShmMut::new_unchecked(wrapped) }) } /// Try to collect free chunks. @@ -806,7 +806,7 @@ where allocated_watchdog, confirmed_watchdog, ); - Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + Ok(unsafe { ZShmMut::new_unchecked(wrapped) }) } fn alloc_resources() -> ZResult<( @@ -911,6 +911,6 @@ where allocated_watchdog, confirmed_watchdog, ); - Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + Ok(unsafe { ZShmMut::new_unchecked(wrapped) }) } } diff --git a/commons/zenoh-shm/src/api/provider/types.rs b/commons/zenoh-shm/src/api/provider/types.rs index 662482f567..b7f1ad2de6 100644 --- a/commons/zenoh-shm/src/api/provider/types.rs +++ b/commons/zenoh-shm/src/api/provider/types.rs @@ -16,7 +16,7 @@ use std::fmt::Display; use zenoh_result::{bail, ZResult}; -use crate::api::slice::zsliceshmmut::ZSliceShmMut; +use crate::api::buffer::zshmmut::ZShmMut; use super::chunk::AllocatedChunk; @@ -170,4 +170,4 @@ pub type ChunkAllocResult = Result; /// SHM buffer allocation result #[zenoh_macros::unstable_doc] -pub type BufAllocResult = Result; +pub type BufAllocResult = Result; diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 60015829aa..2773348fd0 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -94,7 +94,7 @@ async fn main() { match reply.result() { Ok(sample) => { print!(">> Received ('{}': ", sample.key_expr().as_str()); - match sample.payload().deserialize::<&zsliceshm>() { + match sample.payload().deserialize::<&zshm>() { Ok(payload) => println!("'{}')", String::from_utf8_lossy(payload),), Err(e) => println!("'Not a SharedMemoryBuf: {:?}')", e), } diff --git a/examples/examples/z_payload_shm.rs b/examples/examples/z_payload_shm.rs index 4bf45381de..d9ab4e1f82 100644 --- a/examples/examples/z_payload_shm.rs +++ b/examples/examples/z_payload_shm.rs @@ -14,8 +14,8 @@ use zenoh::{ bytes::ZBytes, shm::{ - zsliceshm, zsliceshmmut, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, - ZSliceShm, ZSliceShmMut, POSIX_PROTOCOL_ID, + zshm, zshmmut, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, ZShm, + ZShmMut, POSIX_PROTOCOL_ID, }, }; @@ -35,59 +35,59 @@ fn main() { // Prepare a layout for allocations let layout = provider.alloc_layout().size(1024).res().unwrap(); - // allocate an SHM buffer (ZSliceShmMut) + // allocate an SHM buffer (ZShmMut) let mut owned_shm_buf_mut = layout.alloc().res().unwrap(); // mutable and immutable API let _data: &[u8] = &owned_shm_buf_mut; let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; - // convert into immutable owned buffer (ZSliceShmMut -> ZSlceShm) - let owned_shm_buf: ZSliceShm = owned_shm_buf_mut.into(); + // convert into immutable owned buffer (ZShmMut -> ZSlceShm) + let owned_shm_buf: ZShm = owned_shm_buf_mut.into(); // immutable API let _data: &[u8] = &owned_shm_buf; - // convert again into mutable owned buffer (ZSliceShm -> ZSlceShmMut) - let mut owned_shm_buf_mut: ZSliceShmMut = owned_shm_buf.try_into().unwrap(); + // convert again into mutable owned buffer (ZShm -> ZSlceShmMut) + let mut owned_shm_buf_mut: ZShmMut = owned_shm_buf.try_into().unwrap(); // mutable and immutable API let _data: &[u8] = &owned_shm_buf_mut; let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; - // build a ZBytes from an SHM buffer (ZSliceShmMut -> ZBytes) + // build a ZBytes from an SHM buffer (ZShmMut -> ZBytes) let mut payload: ZBytes = owned_shm_buf_mut.into(); // branch to illustrate immutable access to SHM data { - // deserialize ZBytes as an immutably borrowed zsliceshm (ZBytes -> &zsliceshm) - let borrowed_shm_buf: &zsliceshm = payload.deserialize().unwrap(); + // deserialize ZBytes as an immutably borrowed zshm (ZBytes -> &zshm) + let borrowed_shm_buf: &zshm = payload.deserialize().unwrap(); // immutable API let _data: &[u8] = borrowed_shm_buf; - // construct owned buffer from borrowed type (&zsliceshm -> ZSliceShm) + // construct owned buffer from borrowed type (&zshm -> ZShm) let owned = borrowed_shm_buf.to_owned(); // immutable API let _data: &[u8] = &owned; - // try to construct mutable ZSliceShmMut (ZSliceShm -> ZSliceShmMut) - let owned_mut: Result = owned.try_into(); - // the attempt fails because ZSliceShm has two existing references ('owned' and inside 'payload') + // try to construct mutable ZShmMut (ZShm -> ZShmMut) + let owned_mut: Result = owned.try_into(); + // the attempt fails because ZShm has two existing references ('owned' and inside 'payload') assert!(owned_mut.is_err()) } // branch to illustrate mutable access to SHM data { - // deserialize ZBytes as mutably borrowed zsliceshm (ZBytes -> &mut zsliceshm) - let borrowed_shm_buf: &mut zsliceshm = payload.deserialize_mut().unwrap(); + // deserialize ZBytes as mutably borrowed zshm (ZBytes -> &mut zshm) + let borrowed_shm_buf: &mut zshm = payload.deserialize_mut().unwrap(); // immutable API let _data: &[u8] = borrowed_shm_buf; - // convert zsliceshm to zsliceshmmut (&mut zsliceshm -> &mut zsliceshmmut) - let borrowed_shm_buf_mut: &mut zsliceshmmut = borrowed_shm_buf.try_into().unwrap(); + // convert zshm to zshmmut (&mut zshm -> &mut zshmmut) + let borrowed_shm_buf_mut: &mut zshmmut = borrowed_shm_buf.try_into().unwrap(); // mutable and immutable API let _data: &[u8] = borrowed_shm_buf_mut; diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 7a7bd61580..372967f6e8 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -80,7 +80,7 @@ fn main() { .res() .unwrap(); - // convert ZSliceShmMut into ZSlice as ZSliceShmMut does not support Clone + // convert ZShmMut into ZSlice as ZShmMut does not support Clone let buf: ZSlice = buf.into(); // -- warmup -- diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs index 62fa7571d5..49939dcb0a 100644 --- a/examples/examples/z_queryable_shm.rs +++ b/examples/examples/z_queryable_shm.rs @@ -76,7 +76,7 @@ async fn main() { query.key_expr().as_str(), ); if let Some(payload) = query.payload() { - match payload.deserialize::<&zsliceshm>() { + match payload.deserialize::<&zshm>() { Ok(payload) => print!(": '{}'", String::from_utf8_lossy(payload)), Err(e) => print!(": 'Not a SharedMemoryBuf: {:?}'", e), } diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index a43b5c6cd0..a7e96c2b75 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -41,7 +41,7 @@ async fn main() { sample.kind(), sample.key_expr().as_str(), ); - match sample.payload().deserialize::<&zsliceshm>() { + match sample.payload().deserialize::<&zshm>() { Ok(payload) => print!("'{}'", String::from_utf8_lossy(payload)), Err(e) => print!("'Not a SharedMemoryBuf: {:?}'", e), } @@ -52,12 +52,12 @@ async fn main() { // // holding a reference to the SHM buffer, then it will be able to get a mutable reference to it. // // With the mutable reference at hand, it's possible to mutate in place the SHM buffer content. // - // use zenoh::shm::zsliceshmmut; + // use zenoh::shm::zshmmut; // while let Ok(mut sample) = subscriber.recv_async().await { // let kind = sample.kind(); // let key_expr = sample.key_expr().to_string(); - // match sample.payload_mut().deserialize_mut::<&mut zsliceshmmut>() { + // match sample.payload_mut().deserialize_mut::<&mut zshmmut>() { // Ok(payload) => println!( // ">> [Subscriber] Received {} ('{}': '{:02x?}')", // kind, key_expr, payload diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index c36136ef81..ce88b2bdbe 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -30,9 +30,9 @@ use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; use zenoh_result::{ZError, ZResult}; #[cfg(all(feature = "shared-memory", feature = "unstable"))] use zenoh_shm::{ - api::slice::{ - zsliceshm::{zsliceshm, ZSliceShm}, - zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, + api::buffer::{ + zshm::{zshm, ZShm}, + zshmmut::{zshmmut, ZShmMut}, }, SharedMemoryBuf, }; @@ -1524,47 +1524,47 @@ impl TryFrom<&mut ZBytes> for serde_pickle::Value { // Shared memory conversion #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl Serialize for ZSerde { +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: ZSliceShm) -> Self::Output { + fn serialize(self, t: ZShm) -> Self::Output { let slice: ZSlice = t.into(); ZBytes::new(slice) } } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl From for ZBytes { - fn from(t: ZSliceShm) -> Self { +impl From for ZBytes { + fn from(t: ZShm) -> Self { ZSerde.serialize(t) } } // Shared memory conversion #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl Serialize for ZSerde { +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: ZSliceShmMut) -> Self::Output { + fn serialize(self, t: ZShmMut) -> Self::Output { let slice: ZSlice = t.into(); ZBytes::new(slice) } } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl From for ZBytes { - fn from(t: ZSliceShmMut) -> Self { +impl From for ZBytes { + fn from(t: ZShmMut) -> Self { ZSerde.serialize(t) } } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a zsliceshm> for ZSerde { +impl<'a> Deserialize<'a, &'a zshm> for ZSerde { type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<&'a zsliceshm, Self::Error> { - // A ZSliceShm is expected to have only one slice + fn deserialize(self, v: Self::Input) -> Result<&'a zshm, Self::Error> { + // A ZShm is expected to have only one slice let mut zslices = v.0.zslices(); if let Some(zs) = zslices.next() { if let Some(shmb) = zs.downcast_ref::() { @@ -1576,7 +1576,7 @@ impl<'a> Deserialize<'a, &'a zsliceshm> for ZSerde { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a ZBytes> for &'a zsliceshm { +impl<'a> TryFrom<&'a ZBytes> for &'a zshm { type Error = ZDeserializeError; fn try_from(value: &'a ZBytes) -> Result { @@ -1585,7 +1585,7 @@ impl<'a> TryFrom<&'a ZBytes> for &'a zsliceshm { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshm { +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zshm { type Error = ZDeserializeError; fn try_from(value: &'a mut ZBytes) -> Result { @@ -1594,11 +1594,11 @@ impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshm { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a mut zsliceshm> for ZSerde { +impl<'a> Deserialize<'a, &'a mut zshm> for ZSerde { type Input = &'a mut ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshm, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<&'a mut zshm, Self::Error> { // A ZSliceShmBorrowMut is expected to have only one slice let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { @@ -1611,11 +1611,11 @@ impl<'a> Deserialize<'a, &'a mut zsliceshm> for ZSerde { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a mut zsliceshmmut> for ZSerde { +impl<'a> Deserialize<'a, &'a mut zshmmut> for ZSerde { type Input = &'a mut ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshmmut, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<&'a mut zshmmut, Self::Error> { // A ZSliceShmBorrowMut is expected to have only one slice let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { @@ -1628,7 +1628,7 @@ impl<'a> Deserialize<'a, &'a mut zsliceshmmut> for ZSerde { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshmmut { +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zshmmut { type Error = ZDeserializeError; fn try_from(value: &'a mut ZBytes) -> Result { @@ -1838,7 +1838,7 @@ mod tests { protocol_id::POSIX_PROTOCOL_ID, }, provider::shared_memory_provider::SharedMemoryProviderBuilder, - slice::zsliceshm::{zsliceshm, ZSliceShm}, + slice::zshm::{zshm, ZShm}, }; const NUM: usize = 1_000; @@ -1964,9 +1964,9 @@ mod tests { let mutable_shm_buf = layout.alloc().res().unwrap(); // convert to immutable SHM buffer - let immutable_shm_buf: ZSliceShm = mutable_shm_buf.into(); + let immutable_shm_buf: ZShm = mutable_shm_buf.into(); - serialize_deserialize!(&zsliceshm, immutable_shm_buf); + serialize_deserialize!(&zshm, immutable_shm_buf); } // Properties diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index 6c08303612..791bdbd3ea 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -17,7 +17,7 @@ use std::{borrow::Cow, convert::Infallible, fmt, str::FromStr}; use zenoh_buffers::{ZBuf, ZSlice}; use zenoh_protocol::core::EncodingId; #[cfg(feature = "shared-memory")] -use zenoh_shm::api::slice::{zsliceshm::ZSliceShm, zsliceshmmut::ZSliceShmMut}; +use zenoh_shm::api::buffer::{zshm::ZShm, zshmmut::ZShmMut}; /// Default encoding values used by Zenoh. /// @@ -835,10 +835,10 @@ impl EncodingMapping for serde_pickle::Value { // - Zenoh SHM #[cfg(feature = "shared-memory")] -impl EncodingMapping for ZSliceShm { +impl EncodingMapping for ZShm { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } #[cfg(feature = "shared-memory")] -impl EncodingMapping for ZSliceShmMut { +impl EncodingMapping for ZShmMut { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 92901c54b3..6e6f7bae64 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -417,6 +417,10 @@ pub mod internal { #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub mod shm { + pub use zenoh_shm::api::buffer::{ + zshm::{zshm, ZShm}, + zshmmut::{zshmmut, ZShmMut}, + }; pub use zenoh_shm::api::client::{ shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, }; @@ -441,8 +445,4 @@ pub mod shm { pub use zenoh_shm::api::provider::types::{ AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError, }; - pub use zenoh_shm::api::slice::{ - zsliceshm::{zsliceshm, ZSliceShm}, - zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, - }; } diff --git a/zenoh/tests/bytes.rs b/zenoh/tests/bytes.rs index 41e6d14c6e..f8eb11bf63 100644 --- a/zenoh/tests/bytes.rs +++ b/zenoh/tests/bytes.rs @@ -32,38 +32,38 @@ fn shm_bytes_single_buf() { // Prepare a layout for allocations let layout = provider.alloc_layout().size(1024).res().unwrap(); - // allocate an SHM buffer (ZSliceShmMut) + // allocate an SHM buffer (ZShmMut) let owned_shm_buf_mut = layout.alloc().res().unwrap(); - // convert into immutable owned buffer (ZSliceShmMut -> ZSlceShm) - let owned_shm_buf: ZSliceShm = owned_shm_buf_mut.into(); + // convert into immutable owned buffer (ZShmMut -> ZSlceShm) + let owned_shm_buf: ZShm = owned_shm_buf_mut.into(); - // convert again into mutable owned buffer (ZSliceShm -> ZSlceShmMut) - let owned_shm_buf_mut: ZSliceShmMut = owned_shm_buf.try_into().unwrap(); + // convert again into mutable owned buffer (ZShm -> ZSlceShmMut) + let owned_shm_buf_mut: ZShmMut = owned_shm_buf.try_into().unwrap(); - // build a ZBytes from an SHM buffer (ZSliceShmMut -> ZBytes) + // build a ZBytes from an SHM buffer (ZShmMut -> ZBytes) let mut payload: ZBytes = owned_shm_buf_mut.into(); // branch to illustrate immutable access to SHM data { - // deserialize ZBytes as an immutably borrowed zsliceshm (ZBytes -> &zsliceshm) - let borrowed_shm_buf: &zsliceshm = payload.deserialize().unwrap(); + // deserialize ZBytes as an immutably borrowed zshm (ZBytes -> &zshm) + let borrowed_shm_buf: &zshm = payload.deserialize().unwrap(); - // construct owned buffer from borrowed type (&zsliceshm -> ZSliceShm) + // construct owned buffer from borrowed type (&zshm -> ZShm) let owned = borrowed_shm_buf.to_owned(); - // try to construct mutable ZSliceShmMut (ZSliceShm -> ZSliceShmMut) - let owned_mut: Result = owned.try_into(); - // the attempt fails because ZSliceShm has two existing references ('owned' and inside 'payload') + // try to construct mutable ZShmMut (ZShm -> ZShmMut) + let owned_mut: Result = owned.try_into(); + // the attempt fails because ZShm has two existing references ('owned' and inside 'payload') assert!(owned_mut.is_err()) } // branch to illustrate mutable access to SHM data { - // deserialize ZBytes as mutably borrowed zsliceshm (ZBytes -> &mut zsliceshm) - let borrowed_shm_buf: &mut zsliceshm = payload.deserialize_mut().unwrap(); + // deserialize ZBytes as mutably borrowed zshm (ZBytes -> &mut zshm) + let borrowed_shm_buf: &mut zshm = payload.deserialize_mut().unwrap(); - // convert zsliceshm to zsliceshmmut (&mut zsliceshm -> &mut zsliceshmmut) - let _borrowed_shm_buf_mut: &mut zsliceshmmut = borrowed_shm_buf.try_into().unwrap(); + // convert zshm to zshmmut (&mut zshm -> &mut zshmmut) + let _borrowed_shm_buf_mut: &mut zshmmut = borrowed_shm_buf.try_into().unwrap(); } } From ccb960dc7814206e68692898cadeb49189eac133 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 30 Apr 2024 18:40:12 +0300 Subject: [PATCH 314/598] fix ci --- zenoh/src/api/bytes.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index ce88b2bdbe..874f37ba8c 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -1838,7 +1838,7 @@ mod tests { protocol_id::POSIX_PROTOCOL_ID, }, provider::shared_memory_provider::SharedMemoryProviderBuilder, - slice::zshm::{zshm, ZShm}, + buffer::zshm::{zshm, ZShm}, }; const NUM: usize = 1_000; From bd5a0da5fa1ad2536d06a72740907b79978ee8cc Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 30 Apr 2024 18:41:55 +0300 Subject: [PATCH 315/598] [skip ci] z_payload_shm -> z_bytes_shm --- examples/Cargo.toml | 4 ++-- examples/examples/{z_payload_shm.rs => z_bytes_shm.rs} | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename examples/examples/{z_payload_shm.rs => z_bytes_shm.rs} (100%) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 602c3833db..263653028a 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -169,6 +169,6 @@ path = "examples/z_alloc_shm.rs" required-features = ["unstable", "shared-memory"] [[example]] -name = "z_payload_shm" -path = "examples/z_payload_shm.rs" +name = "z_bytes_shm" +path = "examples/z_bytes_shm.rs" required-features = ["unstable", "shared-memory"] diff --git a/examples/examples/z_payload_shm.rs b/examples/examples/z_bytes_shm.rs similarity index 100% rename from examples/examples/z_payload_shm.rs rename to examples/examples/z_bytes_shm.rs From 6ea1cc5b2a1d1008288417858eccc8b9dd38d424 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 29 Apr 2024 16:57:35 +0200 Subject: [PATCH 316/598] refactor: use unstable rustfmt config to format imports --- .github/workflows/ci.yml | 2 +- .github/workflows/pre-release.yml | 2 +- commons/zenoh-buffers/src/bbuf.rs | 11 +- commons/zenoh-buffers/src/lib.rs | 6 +- commons/zenoh-buffers/src/slice.rs | 13 +- commons/zenoh-buffers/src/vec.rs | 8 +- commons/zenoh-buffers/src/zbuf.rs | 15 +- commons/zenoh-buffers/src/zslice.rs | 9 +- commons/zenoh-buffers/tests/readwrite.rs | 2 +- commons/zenoh-codec/src/common/extension.rs | 4 +- commons/zenoh-codec/src/core/encoding.rs | 3 +- commons/zenoh-codec/src/core/locator.rs | 4 +- commons/zenoh-codec/src/core/mod.rs | 4 +- commons/zenoh-codec/src/core/shm.rs | 3 +- commons/zenoh-codec/src/core/timestamp.rs | 4 +- commons/zenoh-codec/src/core/wire_expr.rs | 4 +- commons/zenoh-codec/src/core/zbuf.rs | 6 +- commons/zenoh-codec/src/core/zenohid.rs | 4 +- commons/zenoh-codec/src/core/zint.rs | 3 +- commons/zenoh-codec/src/core/zslice.rs | 3 +- commons/zenoh-codec/src/network/declare.rs | 4 +- commons/zenoh-codec/src/network/interest.rs | 3 +- commons/zenoh-codec/src/network/mod.rs | 7 +- commons/zenoh-codec/src/network/oam.rs | 3 +- commons/zenoh-codec/src/network/push.rs | 3 +- commons/zenoh-codec/src/network/request.rs | 7 +- commons/zenoh-codec/src/network/response.rs | 7 +- commons/zenoh-codec/src/scouting/hello.rs | 4 +- commons/zenoh-codec/src/scouting/mod.rs | 3 +- commons/zenoh-codec/src/scouting/scout.rs | 4 +- commons/zenoh-codec/src/transport/batch.rs | 22 +- commons/zenoh-codec/src/transport/close.rs | 3 +- commons/zenoh-codec/src/transport/fragment.rs | 3 +- commons/zenoh-codec/src/transport/frame.rs | 4 +- commons/zenoh-codec/src/transport/init.rs | 7 +- commons/zenoh-codec/src/transport/join.rs | 4 +- .../zenoh-codec/src/transport/keepalive.rs | 3 +- commons/zenoh-codec/src/transport/mod.rs | 3 +- commons/zenoh-codec/src/transport/oam.rs | 3 +- commons/zenoh-codec/src/transport/open.rs | 4 +- commons/zenoh-codec/src/zenoh/del.rs | 4 +- commons/zenoh-codec/src/zenoh/err.rs | 4 +- commons/zenoh-codec/src/zenoh/mod.rs | 11 +- commons/zenoh-codec/src/zenoh/put.rs | 12 +- commons/zenoh-codec/src/zenoh/query.rs | 5 +- commons/zenoh-codec/src/zenoh/reply.rs | 4 +- commons/zenoh-codec/tests/codec.rs | 10 +- .../zenoh-collections/src/single_or_vec.rs | 5 +- commons/zenoh-config/src/connection_retry.rs | 10 +- commons/zenoh-config/src/lib.rs | 9 +- commons/zenoh-config/src/mode_dependent.rs | 4 +- commons/zenoh-core/src/lib.rs | 3 +- commons/zenoh-crypto/src/cipher.rs | 12 +- commons/zenoh-keyexpr/benches/keyexpr_tree.rs | 13 +- .../zenoh-keyexpr/src/key_expr/borrowed.rs | 4 +- commons/zenoh-keyexpr/src/key_expr/canon.rs | 5 +- .../src/key_expr/format/parsing.rs | 3 +- .../src/key_expr/intersect/classical.rs | 3 +- .../src/key_expr/intersect/mod.rs | 3 +- commons/zenoh-keyexpr/src/key_expr/owned.rs | 3 +- commons/zenoh-keyexpr/src/key_expr/tests.rs | 3 +- .../src/keyexpr_tree/arc_tree.rs | 10 +- .../src/keyexpr_tree/box_tree.rs | 14 +- .../src/keyexpr_tree/impls/hashmap_impl.rs | 11 +- .../src/keyexpr_tree/impls/keyed_set_impl.rs | 3 +- .../src/keyexpr_tree/impls/mod.rs | 3 +- .../src/keyexpr_tree/impls/vec_set_impl.rs | 1 + .../src/keyexpr_tree/iters/includer.rs | 3 +- .../src/keyexpr_tree/iters/inclusion.rs | 4 +- .../src/keyexpr_tree/iters/intersection.rs | 4 +- .../src/keyexpr_tree/iters/tree_iter.rs | 3 +- .../zenoh-keyexpr/src/keyexpr_tree/test.rs | 19 +- .../src/keyexpr_tree/traits/default_impls.rs | 1 + .../src/keyexpr_tree/traits/mod.rs | 3 +- .../zenoh-protocol/src/common/extension.rs | 1 + commons/zenoh-protocol/src/core/cowstr.rs | 6 +- commons/zenoh-protocol/src/core/encoding.rs | 1 + commons/zenoh-protocol/src/core/endpoint.rs | 4 +- commons/zenoh-protocol/src/core/locator.rs | 4 +- commons/zenoh-protocol/src/core/mod.rs | 2 +- commons/zenoh-protocol/src/core/properties.rs | 3 +- commons/zenoh-protocol/src/core/resolution.rs | 4 +- commons/zenoh-protocol/src/core/whatami.rs | 3 +- commons/zenoh-protocol/src/core/wire_expr.rs | 1 + commons/zenoh-protocol/src/network/declare.rs | 20 +- .../zenoh-protocol/src/network/interest.rs | 3 +- commons/zenoh-protocol/src/network/mod.rs | 3 +- commons/zenoh-protocol/src/network/request.rs | 6 +- commons/zenoh-protocol/src/scouting/hello.rs | 3 +- .../zenoh-protocol/src/transport/fragment.rs | 3 +- commons/zenoh-protocol/src/transport/frame.rs | 3 +- commons/zenoh-protocol/src/transport/init.rs | 9 +- commons/zenoh-protocol/src/transport/join.rs | 9 +- commons/zenoh-protocol/src/transport/open.rs | 17 +- commons/zenoh-protocol/src/zenoh/del.rs | 7 +- commons/zenoh-protocol/src/zenoh/err.rs | 7 +- commons/zenoh-protocol/src/zenoh/mod.rs | 3 +- commons/zenoh-protocol/src/zenoh/put.rs | 7 +- commons/zenoh-protocol/src/zenoh/query.rs | 6 +- commons/zenoh-protocol/src/zenoh/reply.rs | 3 +- commons/zenoh-result/src/lib.rs | 3 +- commons/zenoh-runtime/src/lib.rs | 5 +- .../src/api/client/shared_memory_client.rs | 7 +- .../src/api/client/shared_memory_segment.rs | 4 +- .../zenoh-shm/src/api/client_storage/mod.rs | 21 +- .../posix/posix_shared_memory_client.rs | 3 +- .../posix_shared_memory_provider_backend.rs | 3 +- .../posix/posix_shared_memory_segment.rs | 11 +- .../api/provider/shared_memory_provider.rs | 11 +- commons/zenoh-shm/src/api/provider/types.rs | 3 +- commons/zenoh-shm/src/api/slice/zsliceshm.rs | 3 +- .../zenoh-shm/src/api/slice/zsliceshmmut.rs | 3 +- commons/zenoh-shm/src/header/segment.rs | 3 +- commons/zenoh-shm/src/lib.rs | 5 +- commons/zenoh-shm/src/watchdog/segment.rs | 3 +- commons/zenoh-sync/src/condition.rs | 3 +- commons/zenoh-sync/src/fifo_queue.rs | 3 +- commons/zenoh-sync/src/lib.rs | 9 +- commons/zenoh-sync/src/lifo_queue.rs | 1 + commons/zenoh-sync/src/mvar.rs | 8 +- commons/zenoh-sync/src/object_pool.rs | 4 +- commons/zenoh-sync/src/signal.rs | 10 +- commons/zenoh-task/src/lib.rs | 7 +- commons/zenoh-util/src/std_only/ffi/win.rs | 8 +- commons/zenoh-util/src/std_only/lib_loader.rs | 11 +- commons/zenoh-util/src/std_only/net/mod.rs | 16 +- commons/zenoh-util/src/std_only/time_range.rs | 3 +- commons/zenoh-util/src/std_only/timer.rs | 33 +-- examples/examples/z_get.rs | 3 +- examples/examples/z_get_liveliness.rs | 3 +- examples/examples/z_ping.rs | 3 +- examples/examples/z_ping_shm.rs | 3 +- examples/examples/z_pub.rs | 3 +- examples/examples/z_pub_thr.rs | 3 +- examples/examples/z_pull.rs | 3 +- examples/examples/z_storage.rs | 3 +- examples/examples/z_sub_shm.rs | 3 +- examples/examples/z_sub_thr.rs | 3 +- io/zenoh-link-commons/src/lib.rs | 6 +- io/zenoh-link-commons/src/listener.rs | 10 +- io/zenoh-link-commons/src/multicast.rs | 3 +- io/zenoh-link-commons/src/tls.rs | 1 + io/zenoh-link-commons/src/unicast.rs | 3 +- io/zenoh-link/src/lib.rs | 65 +++--- io/zenoh-links/zenoh-link-quic/src/lib.rs | 1 - io/zenoh-links/zenoh-link-quic/src/unicast.rs | 27 ++- io/zenoh-links/zenoh-link-quic/src/utils.rs | 35 +-- io/zenoh-links/zenoh-link-quic/src/verify.rs | 4 +- io/zenoh-links/zenoh-link-serial/src/lib.rs | 9 +- .../zenoh-link-serial/src/unicast.rs | 35 +-- io/zenoh-links/zenoh-link-tcp/src/lib.rs | 9 +- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 20 +- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 30 +-- io/zenoh-links/zenoh-link-tls/src/utils.rs | 28 +-- io/zenoh-links/zenoh-link-udp/src/lib.rs | 9 +- .../zenoh-link-udp/src/multicast.rs | 21 +- io/zenoh-links/zenoh-link-udp/src/unicast.rs | 29 +-- .../zenoh-link-unixpipe/src/unix/unicast.rs | 44 ++-- .../zenoh-link-unixsock_stream/src/lib.rs | 6 +- .../zenoh-link-unixsock_stream/src/unicast.rs | 35 +-- .../zenoh-link-vsock/src/unicast.rs | 23 +- io/zenoh-links/zenoh-link-ws/src/lib.rs | 9 +- io/zenoh-links/zenoh-link-ws/src/unicast.rs | 39 ++-- io/zenoh-transport/src/common/batch.rs | 4 +- .../src/common/defragmentation.rs | 3 +- io/zenoh-transport/src/common/pipeline.rs | 62 +++--- io/zenoh-transport/src/common/priority.rs | 8 +- io/zenoh-transport/src/common/stats.rs | 3 +- io/zenoh-transport/src/lib.rs | 13 +- io/zenoh-transport/src/manager.rs | 24 ++- .../src/multicast/establishment.rs | 20 +- io/zenoh-transport/src/multicast/link.rs | 28 +-- io/zenoh-transport/src/multicast/manager.rs | 12 +- io/zenoh-transport/src/multicast/mod.rs | 16 +- io/zenoh-transport/src/multicast/rx.rs | 12 +- io/zenoh-transport/src/multicast/transport.rs | 34 +-- io/zenoh-transport/src/multicast/tx.rs | 2 +- io/zenoh-transport/src/shm.rs | 1 + .../src/unicast/establishment/accept.rs | 35 +-- .../src/unicast/establishment/cookie.rs | 7 +- .../src/unicast/establishment/ext/auth/mod.rs | 12 +- .../unicast/establishment/ext/auth/pubkey.rs | 9 +- .../unicast/establishment/ext/auth/usrpwd.rs | 13 +- .../unicast/establishment/ext/compression.rs | 6 +- .../unicast/establishment/ext/lowlatency.rs | 6 +- .../unicast/establishment/ext/multilink.rs | 9 +- .../src/unicast/establishment/ext/qos.rs | 6 +- .../src/unicast/establishment/ext/shm.rs | 6 +- .../src/unicast/establishment/mod.rs | 3 +- .../src/unicast/establishment/open.rs | 32 +-- io/zenoh-transport/src/unicast/link.rs | 7 +- .../src/unicast/lowlatency/link.rs | 17 +- .../src/unicast/lowlatency/rx.rs | 3 +- .../src/unicast/lowlatency/transport.rs | 34 +-- .../src/unicast/lowlatency/tx.rs | 2 +- io/zenoh-transport/src/unicast/manager.rs | 34 +-- io/zenoh-transport/src/unicast/mod.rs | 18 +- .../src/unicast/test_helpers.rs | 3 +- .../src/unicast/transport_unicast_inner.rs | 11 +- .../src/unicast/universal/link.rs | 18 +- .../src/unicast/universal/rx.rs | 20 +- .../src/unicast/universal/transport.rs | 30 +-- .../src/unicast/universal/tx.rs | 2 +- io/zenoh-transport/tests/endpoints.rs | 1 + .../tests/multicast_compression.rs | 1 + .../tests/multicast_transport.rs | 1 + .../tests/transport_whitelist.rs | 1 + .../tests/unicast_authenticator.rs | 9 +- .../tests/unicast_compression.rs | 3 +- .../tests/unicast_concurrent.rs | 15 +- .../tests/unicast_defragmentation.rs | 1 + .../tests/unicast_intermittent.rs | 17 +- io/zenoh-transport/tests/unicast_multilink.rs | 1 + io/zenoh-transport/tests/unicast_openclose.rs | 2 +- .../tests/unicast_priorities.rs | 20 +- io/zenoh-transport/tests/unicast_shm.rs | 1 + .../tests/unicast_simultaneous.rs | 15 +- io/zenoh-transport/tests/unicast_time.rs | 1 + io/zenoh-transport/tests/unicast_transport.rs | 5 +- plugins/zenoh-backend-example/src/lib.rs | 3 +- plugins/zenoh-backend-traits/src/config.rs | 9 +- plugins/zenoh-backend-traits/src/lib.rs | 10 +- plugins/zenoh-plugin-example/src/lib.rs | 29 +-- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 13 +- plugins/zenoh-plugin-rest/src/config.rs | 10 +- plugins/zenoh-plugin-rest/src/lib.rs | 34 ++- .../src/backends_mgt.rs | 12 +- .../zenoh-plugin-storage-manager/src/lib.rs | 46 ++-- .../src/memory_backend/mod.rs | 15 +- .../src/replica/align_queryable.rs | 15 +- .../src/replica/aligner.rs | 11 +- .../src/replica/digest.rs | 16 +- .../src/replica/mod.rs | 22 +- .../src/replica/snapshotter.rs | 24 ++- .../src/replica/storage.rs | 56 ++--- .../src/storages_mgt.rs | 3 +- .../tests/operations.rs | 6 +- .../tests/wildcard.rs | 6 +- plugins/zenoh-plugin-trait/src/manager.rs | 2 +- .../src/manager/dynamic_plugin.rs | 3 +- .../src/manager/static_plugin.rs | 4 +- plugins/zenoh-plugin-trait/src/plugin.rs | 6 +- zenoh-ext/examples/examples/z_member.rs | 4 +- zenoh-ext/examples/examples/z_pub_cache.rs | 9 +- zenoh-ext/examples/examples/z_query_sub.rs | 6 +- zenoh-ext/examples/examples/z_view_size.rs | 4 +- zenoh-ext/src/group.rs | 22 +- zenoh-ext/src/lib.rs | 7 +- zenoh-ext/src/publication_cache.rs | 33 +-- zenoh-ext/src/querying_subscriber.rs | 40 ++-- zenoh-ext/src/session_ext.rs | 7 +- zenoh-ext/src/subscriber_ext.rs | 16 +- zenoh/src/api/admin.rs | 19 +- zenoh/src/api/builders/publication.rs | 31 +-- zenoh/src/api/builders/sample.rs | 23 +- zenoh/src/api/bytes.rs | 11 +- zenoh/src/api/encoding.rs | 6 +- zenoh/src/api/handlers/ring.rs | 7 +- zenoh/src/api/info.rs | 4 +- zenoh/src/api/key_expr.rs | 9 +- zenoh/src/api/liveliness.rs | 19 +- zenoh/src/api/loader.rs | 5 +- zenoh/src/api/plugins.rs | 5 +- zenoh/src/api/publication.rs | 39 ++-- zenoh/src/api/query.rs | 22 +- zenoh/src/api/queryable.rs | 34 +-- zenoh/src/api/sample.rs | 15 +- zenoh/src/api/scouting.rs | 18 +- zenoh/src/api/selector.rs | 4 +- zenoh/src/api/session.rs | 54 ++--- zenoh/src/api/subscriber.rs | 22 +- zenoh/src/api/time.rs | 1 + zenoh/src/lib.rs | 199 ++++++++---------- zenoh/src/net/codec/linkstate.rs | 12 +- zenoh/src/net/primitives/demux.rs | 13 +- zenoh/src/net/primitives/mux.rs | 14 +- zenoh/src/net/routing/dispatcher/face.rs | 28 ++- zenoh/src/net/routing/dispatcher/pubsub.rs | 25 ++- zenoh/src/net/routing/dispatcher/queries.rs | 22 +- zenoh/src/net/routing/dispatcher/resource.rs | 26 ++- zenoh/src/net/routing/dispatcher/tables.rs | 37 ++-- zenoh/src/net/routing/hat/client/mod.rs | 37 ++-- zenoh/src/net/routing/hat/client/pubsub.rs | 37 ++-- zenoh/src/net/routing/hat/client/queries.rs | 44 ++-- .../src/net/routing/hat/linkstate_peer/mod.rs | 48 ++--- .../net/routing/hat/linkstate_peer/network.rs | 36 ++-- .../net/routing/hat/linkstate_peer/pubsub.rs | 43 ++-- .../net/routing/hat/linkstate_peer/queries.rs | 50 +++-- zenoh/src/net/routing/hat/mod.rs | 18 +- zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 28 ++- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 49 +++-- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 37 ++-- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 44 ++-- zenoh/src/net/routing/hat/router/mod.rs | 56 ++--- zenoh/src/net/routing/hat/router/network.rs | 35 +-- zenoh/src/net/routing/hat/router/pubsub.rs | 43 ++-- zenoh/src/net/routing/hat/router/queries.rs | 50 +++-- .../net/routing/interceptor/access_control.rs | 16 +- .../net/routing/interceptor/authorization.rs | 9 +- .../net/routing/interceptor/downsampling.rs | 16 +- zenoh/src/net/routing/interceptor/mod.rs | 5 +- zenoh/src/net/routing/mod.rs | 10 +- zenoh/src/net/routing/router.rs | 43 ++-- zenoh/src/net/runtime/adminspace.rs | 48 +++-- zenoh/src/net/runtime/mod.rs | 43 ++-- zenoh/src/net/runtime/orchestrator.rs | 16 +- zenoh/src/net/tests/tables.rs | 34 +-- zenoh/src/prelude.rs | 68 +++--- zenoh/tests/acl.rs | 7 +- zenoh/tests/attachments.rs | 3 +- zenoh/tests/events.rs | 4 +- zenoh/tests/handler.rs | 1 + zenoh/tests/interceptors.rs | 4 +- zenoh/tests/liveliness.rs | 4 +- zenoh/tests/qos.rs | 4 +- zenoh/tests/routing.rs | 21 +- zenoh/tests/session.rs | 14 +- zenoh/tests/shm.rs | 14 +- zenoh/tests/unicity.rs | 14 +- zenohd/src/main.rs | 14 +- 320 files changed, 2384 insertions(+), 1882 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 084a3b148f..cb57db3abe 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -47,7 +47,7 @@ jobs: uses: Swatinem/rust-cache@v2 - name: Code format check - run: cargo fmt --check + run: cargo fmt --check -- --config "unstable_features=true,imports_granularity=Crate,group_imports=StdExternalCrate" - name: Clippy run: cargo +stable clippy --all-targets -- --deny warnings diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 9452e0da86..bb245d4747 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -53,7 +53,7 @@ jobs: run: rustup component add rustfmt clippy - name: Code format check - run: cargo fmt --check + run: cargo fmt --check -- cargo fmt --check -- --config "unstable_features=true,imports_granularity=Crate,group_imports=StdExternalCrate" env: CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse diff --git a/commons/zenoh-buffers/src/bbuf.rs b/commons/zenoh-buffers/src/bbuf.rs index 687961aa5e..72491ae704 100644 --- a/commons/zenoh-buffers/src/bbuf.rs +++ b/commons/zenoh-buffers/src/bbuf.rs @@ -11,6 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // +#[cfg(not(feature = "std"))] +use alloc::boxed::Box; +use alloc::sync::Arc; +use core::{fmt, num::NonZeroUsize, option}; + use crate::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, @@ -18,11 +23,6 @@ use crate::{ writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, ZSlice, }; -use alloc::sync::Arc; -use core::{fmt, num::NonZeroUsize, option}; - -#[cfg(not(feature = "std"))] -use alloc::boxed::Box; #[derive(Clone, PartialEq, Eq)] pub struct BBuf { @@ -199,6 +199,7 @@ impl BBuf { pub fn rand(len: usize) -> Self { #[cfg(not(feature = "std"))] use alloc::vec::Vec; + use rand::Rng; let mut rng = rand::thread_rng(); diff --git a/commons/zenoh-buffers/src/lib.rs b/commons/zenoh-buffers/src/lib.rs index 117fb412b7..da0cdd4030 100644 --- a/commons/zenoh-buffers/src/lib.rs +++ b/commons/zenoh-buffers/src/lib.rs @@ -113,9 +113,10 @@ pub mod buffer { } pub mod writer { - use crate::ZSlice; use core::num::NonZeroUsize; + use crate::ZSlice; + #[derive(Debug, Clone, Copy)] pub struct DidntWrite; @@ -156,9 +157,10 @@ pub mod writer { } pub mod reader { - use crate::ZSlice; use core::num::NonZeroUsize; + use crate::ZSlice; + #[derive(Debug, Clone, Copy)] pub struct DidntRead; diff --git a/commons/zenoh-buffers/src/slice.rs b/commons/zenoh-buffers/src/slice.rs index a652c6930e..f26e37a2aa 100644 --- a/commons/zenoh-buffers/src/slice.rs +++ b/commons/zenoh-buffers/src/slice.rs @@ -11,12 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - buffer::{Buffer, SplitBuffer}, - reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, - writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, - ZSlice, -}; use core::{ marker::PhantomData, mem, @@ -25,6 +19,13 @@ use core::{ slice::{self}, }; +use crate::{ + buffer::{Buffer, SplitBuffer}, + reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, + writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, + ZSlice, +}; + // Buffer impl Buffer for &[u8] { #[inline(always)] diff --git a/commons/zenoh-buffers/src/vec.rs b/commons/zenoh-buffers/src/vec.rs index bc2edf87bb..9d63880aea 100644 --- a/commons/zenoh-buffers/src/vec.rs +++ b/commons/zenoh-buffers/src/vec.rs @@ -11,15 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; +use core::{mem, num::NonZeroUsize, option}; + use crate::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, }; -use core::{mem, num::NonZeroUsize, option}; - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; /// Allocate a vector with a given capacity and sets the length to that capacity. #[must_use] diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index 616dbb1b96..50eb54c923 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -11,6 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // +use alloc::{sync::Arc, vec::Vec}; +use core::{cmp, iter, mem, num::NonZeroUsize, ops::RangeBounds, ptr}; +#[cfg(feature = "std")] +use std::io; + +use zenoh_collections::SingleOrVec; + #[cfg(feature = "shared-memory")] use crate::ZSliceKind; use crate::{ @@ -22,11 +29,6 @@ use crate::{ writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, ZSlice, ZSliceBuffer, }; -use alloc::{sync::Arc, vec::Vec}; -use core::{cmp, iter, mem, num::NonZeroUsize, ops::RangeBounds, ptr}; -#[cfg(feature = "std")] -use std::io; -use zenoh_collections::SingleOrVec; fn get_mut_unchecked(arc: &mut Arc) -> &mut T { unsafe { &mut (*(Arc::as_ptr(arc) as *mut T)) } @@ -776,9 +778,10 @@ mod tests { #[cfg(feature = "std")] #[test] fn zbuf_seek() { + use std::io::Seek; + use super::{HasReader, ZBuf}; use crate::reader::Reader; - use std::io::Seek; let mut buf = ZBuf::empty(); buf.push_zslice([0u8, 1u8, 2u8, 3u8].into()); diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index 60dbdab5e1..c169fcd4c0 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -11,10 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - buffer::{Buffer, SplitBuffer}, - reader::{BacktrackableReader, DidntRead, HasReader, Reader}, -}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use core::{ any::Any, @@ -25,6 +21,11 @@ use core::{ option, }; +use crate::{ + buffer::{Buffer, SplitBuffer}, + reader::{BacktrackableReader, DidntRead, HasReader, Reader}, +}; + /*************************************/ /* ZSLICE BUFFER */ /*************************************/ diff --git a/commons/zenoh-buffers/tests/readwrite.rs b/commons/zenoh-buffers/tests/readwrite.rs index ea48218a85..cdfc8fea05 100644 --- a/commons/zenoh-buffers/tests/readwrite.rs +++ b/commons/zenoh-buffers/tests/readwrite.rs @@ -14,8 +14,8 @@ use zenoh_buffers::{ reader::{HasReader, Reader, SiphonableReader}, writer::{BacktrackableWriter, HasWriter, Writer}, + BBuf, ZBuf, ZSlice, }; -use zenoh_buffers::{BBuf, ZBuf, ZSlice}; const BYTES: usize = 18; diff --git a/commons/zenoh-codec/src/common/extension.rs b/commons/zenoh-codec/src/common/extension.rs index 6c22f8ff01..21d716a769 100644 --- a/commons/zenoh-codec/src/common/extension.rs +++ b/commons/zenoh-codec/src/common/extension.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -22,6 +22,8 @@ use zenoh_protocol::common::{ iext, imsg::has_flag, ZExtBody, ZExtUnit, ZExtUnknown, ZExtZ64, ZExtZBuf, ZExtZBufHeader, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header}; + fn read_inner(reader: &mut R, _s: &str, header: u8) -> Result<(ZExtUnknown, bool), DidntRead> where R: Reader, diff --git a/commons/zenoh-codec/src/core/encoding.rs b/commons/zenoh-codec/src/core/encoding.rs index c8033cdd5f..abe33f6ab8 100644 --- a/commons/zenoh-codec/src/core/encoding.rs +++ b/commons/zenoh-codec/src/core/encoding.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -21,6 +20,8 @@ use zenoh_protocol::{ core::encoding::{flag, Encoding, EncodingId}, }; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + impl LCodec<&Encoding> for Zenoh080 { fn w_len(self, x: &Encoding) -> usize { let mut len = self.w_len((x.id as u32) << 1); diff --git a/commons/zenoh-codec/src/core/locator.rs b/commons/zenoh-codec/src/core/locator.rs index 0bbd28a189..464b1bbb05 100644 --- a/commons/zenoh-codec/src/core/locator.rs +++ b/commons/zenoh-codec/src/core/locator.rs @@ -11,15 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use alloc::{string::String, vec::Vec}; use core::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; use zenoh_protocol::core::Locator; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + impl WCodec<&Locator, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/core/mod.rs b/commons/zenoh-codec/src/core/mod.rs index c8e19f057f..8230cdd9ac 100644 --- a/commons/zenoh-codec/src/core/mod.rs +++ b/commons/zenoh-codec/src/core/mod.rs @@ -22,13 +22,15 @@ mod zenohid; mod zint; mod zslice; -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use alloc::{string::String, vec::Vec}; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + // [u8; N] macro_rules! array_impl { ($n:expr) => { diff --git a/commons/zenoh-codec/src/core/shm.rs b/commons/zenoh-codec/src/core/shm.rs index 2548e4ed14..e25496a268 100644 --- a/commons/zenoh-codec/src/core/shm.rs +++ b/commons/zenoh-codec/src/core/shm.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -21,6 +20,8 @@ use zenoh_shm::{ watchdog::descriptor::Descriptor, SharedMemoryBufInfo, }; +use crate::{RCodec, WCodec, Zenoh080}; + impl WCodec<&Descriptor, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/core/timestamp.rs b/commons/zenoh-codec/src/core/timestamp.rs index 4891643192..025f8f8bf5 100644 --- a/commons/zenoh-codec/src/core/timestamp.rs +++ b/commons/zenoh-codec/src/core/timestamp.rs @@ -11,14 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{LCodec, RCodec, WCodec, Zenoh080}; use core::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; use zenoh_protocol::core::{Timestamp, ZenohId}; +use crate::{LCodec, RCodec, WCodec, Zenoh080}; + impl LCodec<&Timestamp> for Zenoh080 { fn w_len(self, x: &Timestamp) -> usize { self.w_len(x.get_time().as_u64()) + self.w_len(x.get_id().size()) diff --git a/commons/zenoh-codec/src/core/wire_expr.rs b/commons/zenoh-codec/src/core/wire_expr.rs index aa6f77b379..d5b91f75ed 100644 --- a/commons/zenoh-codec/src/core/wire_expr.rs +++ b/commons/zenoh-codec/src/core/wire_expr.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{core::Zenoh080Bounded, RCodec, WCodec, Zenoh080, Zenoh080Condition}; use alloc::string::String; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -22,6 +22,8 @@ use zenoh_protocol::{ network::Mapping, }; +use crate::{core::Zenoh080Bounded, RCodec, WCodec, Zenoh080, Zenoh080Condition}; + impl WCodec<&WireExpr<'_>, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/core/zbuf.rs b/commons/zenoh-codec/src/core/zbuf.rs index 137030e66c..8b8ead6ca0 100644 --- a/commons/zenoh-codec/src/core/zbuf.rs +++ b/commons/zenoh-codec/src/core/zbuf.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use zenoh_buffers::{ buffer::Buffer, reader::{DidntRead, Reader}, @@ -19,6 +18,8 @@ use zenoh_buffers::{ ZBuf, }; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + // ZBuf bounded macro_rules! zbuf_impl { ($bound:ty) => { @@ -100,9 +101,10 @@ impl LCodec<&ZBuf> for Zenoh080 { // ZBuf sliced #[cfg(feature = "shared-memory")] mod shm { + use zenoh_buffers::{ZSlice, ZSliceKind}; + use super::*; use crate::Zenoh080Sliced; - use zenoh_buffers::{ZSlice, ZSliceKind}; const RAW: u8 = 0; const SHM_PTR: u8 = 1; diff --git a/commons/zenoh-codec/src/core/zenohid.rs b/commons/zenoh-codec/src/core/zenohid.rs index 6c53d4e63f..5098cad534 100644 --- a/commons/zenoh-codec/src/core/zenohid.rs +++ b/commons/zenoh-codec/src/core/zenohid.rs @@ -11,14 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Length}; use core::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; use zenoh_protocol::core::ZenohId; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Length}; + impl LCodec<&ZenohId> for Zenoh080 { fn w_len(self, x: &ZenohId) -> usize { x.size() diff --git a/commons/zenoh-codec/src/core/zint.rs b/commons/zenoh-codec/src/core/zint.rs index d5160e2ee6..a29f88f3d5 100644 --- a/commons/zenoh-codec/src/core/zint.rs +++ b/commons/zenoh-codec/src/core/zint.rs @@ -11,12 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + const VLE_LEN_MAX: usize = vle_len(u64::MAX); const fn vle_len(x: u64) -> usize { diff --git a/commons/zenoh-codec/src/core/zslice.rs b/commons/zenoh-codec/src/core/zslice.rs index cea0961b51..fe907ed273 100644 --- a/commons/zenoh-codec/src/core/zslice.rs +++ b/commons/zenoh-codec/src/core/zslice.rs @@ -11,13 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, ZSlice, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + // ZSlice - Bounded macro_rules! zslice_impl { ($bound:ty) => { diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index ed3d019950..faffb04952 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; use alloc::string::String; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, HasWriter, Writer}, @@ -27,6 +27,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; + // Declaration impl WCodec<&DeclareBody, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/network/interest.rs b/commons/zenoh-codec/src/network/interest.rs index 852e106f98..2deda7748a 100644 --- a/commons/zenoh-codec/src/network/interest.rs +++ b/commons/zenoh-codec/src/network/interest.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -29,6 +28,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; + // Interest impl WCodec<&Interest, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/network/mod.rs b/commons/zenoh-codec/src/network/mod.rs index 5ebdb17b8e..fe9d254ee8 100644 --- a/commons/zenoh-codec/src/network/mod.rs +++ b/commons/zenoh-codec/src/network/mod.rs @@ -18,9 +18,6 @@ mod push; mod request; mod response; -use crate::{ - LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length, Zenoh080Reliability, -}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -31,6 +28,10 @@ use zenoh_protocol::{ network::{ext::EntityGlobalIdType, *}, }; +use crate::{ + LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length, Zenoh080Reliability, +}; + // NetworkMessage impl WCodec<&NetworkMessage, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/network/oam.rs b/commons/zenoh-codec/src/network/oam.rs index 9751e9952d..172b3f1058 100644 --- a/commons/zenoh-codec/src/network/oam.rs +++ b/commons/zenoh-codec/src/network/oam.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -25,6 +24,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Oam, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/network/push.rs b/commons/zenoh-codec/src/network/push.rs index b9ec2ba5db..2c2e11a718 100644 --- a/commons/zenoh-codec/src/network/push.rs +++ b/commons/zenoh-codec/src/network/push.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -27,6 +26,8 @@ use zenoh_protocol::{ zenoh::PushBody, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; + impl WCodec<&Push, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/network/request.rs b/commons/zenoh-codec/src/network/request.rs index 6173840d7e..21f42709c4 100644 --- a/commons/zenoh-codec/src/network/request.rs +++ b/commons/zenoh-codec/src/network/request.rs @@ -11,9 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Condition, Zenoh080Header, -}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -29,6 +26,10 @@ use zenoh_protocol::{ zenoh::RequestBody, }; +use crate::{ + common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Condition, Zenoh080Header, +}; + // Target impl WCodec<(&ext::TargetType, bool), &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/network/response.rs b/commons/zenoh-codec/src/network/response.rs index 5b69e8b109..d94316de8e 100644 --- a/commons/zenoh-codec/src/network/response.rs +++ b/commons/zenoh-codec/src/network/response.rs @@ -11,9 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Condition, Zenoh080Header, -}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -29,6 +26,10 @@ use zenoh_protocol::{ zenoh::ResponseBody, }; +use crate::{ + common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Condition, Zenoh080Header, +}; + // Response impl WCodec<&Response, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/scouting/hello.rs b/commons/zenoh-codec/src/scouting/hello.rs index 430201133e..c3aff83667 100644 --- a/commons/zenoh-codec/src/scouting/hello.rs +++ b/commons/zenoh-codec/src/scouting/hello.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; use alloc::{vec, vec::Vec}; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -26,6 +26,8 @@ use zenoh_protocol::{ }, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; + impl WCodec<&Hello, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/scouting/mod.rs b/commons/zenoh-codec/src/scouting/mod.rs index bbedce4282..d1f0b883a1 100644 --- a/commons/zenoh-codec/src/scouting/mod.rs +++ b/commons/zenoh-codec/src/scouting/mod.rs @@ -14,7 +14,6 @@ mod hello; mod scout; -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -24,6 +23,8 @@ use zenoh_protocol::{ scouting::{id, ScoutingBody, ScoutingMessage}, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&ScoutingMessage, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/scouting/scout.rs b/commons/zenoh-codec/src/scouting/scout.rs index 02d5294047..888ce2954f 100644 --- a/commons/zenoh-codec/src/scouting/scout.rs +++ b/commons/zenoh-codec/src/scouting/scout.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; use core::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -26,6 +26,8 @@ use zenoh_protocol::{ }, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; + impl WCodec<&Scout, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/transport/batch.rs b/commons/zenoh-codec/src/transport/batch.rs index 525336d6e8..a08e796358 100644 --- a/commons/zenoh-codec/src/transport/batch.rs +++ b/commons/zenoh-codec/src/transport/batch.rs @@ -11,17 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080}; use core::num::NonZeroUsize; -use zenoh_buffers::reader::{BacktrackableReader, DidntRead, Reader, SiphonableReader}; -use zenoh_buffers::writer::{BacktrackableWriter, DidntWrite, Writer}; -use zenoh_buffers::ZBufReader; -use zenoh_protocol::core::Reliability; -use zenoh_protocol::network::NetworkMessage; -use zenoh_protocol::transport::{ - Fragment, FragmentHeader, Frame, FrameHeader, TransportBody, TransportMessage, TransportSn, + +use zenoh_buffers::{ + reader::{BacktrackableReader, DidntRead, Reader, SiphonableReader}, + writer::{BacktrackableWriter, DidntWrite, Writer}, + ZBufReader, +}; +use zenoh_protocol::{ + core::Reliability, + network::NetworkMessage, + transport::{ + Fragment, FragmentHeader, Frame, FrameHeader, TransportBody, TransportMessage, TransportSn, + }, }; +use crate::{RCodec, WCodec, Zenoh080}; + #[derive(Clone, Copy, Debug)] #[repr(u8)] pub enum CurrentFrame { diff --git a/commons/zenoh-codec/src/transport/close.rs b/commons/zenoh-codec/src/transport/close.rs index 9771b9e1e9..62d9e542b7 100644 --- a/commons/zenoh-codec/src/transport/close.rs +++ b/commons/zenoh-codec/src/transport/close.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -24,6 +23,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Close, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/transport/fragment.rs b/commons/zenoh-codec/src/transport/fragment.rs index b01e2c2bae..fc30abce9d 100644 --- a/commons/zenoh-codec/src/transport/fragment.rs +++ b/commons/zenoh-codec/src/transport/fragment.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{BacktrackableReader, DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -25,6 +24,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + // FragmentHeader impl WCodec<&FragmentHeader, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/transport/frame.rs b/commons/zenoh-codec/src/transport/frame.rs index ab82a024c4..6db4e70652 100644 --- a/commons/zenoh-codec/src/transport/frame.rs +++ b/commons/zenoh-codec/src/transport/frame.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Reliability}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{BacktrackableReader, DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -27,6 +27,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Reliability}; + // FrameHeader impl WCodec<&FrameHeader, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/transport/init.rs b/commons/zenoh-codec/src/transport/init.rs index fec9f07afd..55e129799c 100644 --- a/commons/zenoh-codec/src/transport/init.rs +++ b/commons/zenoh-codec/src/transport/init.rs @@ -11,9 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header, Zenoh080Length, -}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -29,6 +26,10 @@ use zenoh_protocol::{ }, }; +use crate::{ + common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header, Zenoh080Length, +}; + // InitSyn impl WCodec<&InitSyn, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/transport/join.rs b/commons/zenoh-codec/src/transport/join.rs index d87ceecc78..896d7f6290 100644 --- a/commons/zenoh-codec/src/transport/join.rs +++ b/commons/zenoh-codec/src/transport/join.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; use alloc::boxed::Box; use core::time::Duration; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -28,6 +28,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; + impl LCodec<&PrioritySn> for Zenoh080 { fn w_len(self, p: &PrioritySn) -> usize { let PrioritySn { diff --git a/commons/zenoh-codec/src/transport/keepalive.rs b/commons/zenoh-codec/src/transport/keepalive.rs index aa6726f50b..44ef4c676a 100644 --- a/commons/zenoh-codec/src/transport/keepalive.rs +++ b/commons/zenoh-codec/src/transport/keepalive.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -24,6 +23,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&KeepAlive, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/transport/mod.rs b/commons/zenoh-codec/src/transport/mod.rs index 559b5b5fda..3adae0fb72 100644 --- a/commons/zenoh-codec/src/transport/mod.rs +++ b/commons/zenoh-codec/src/transport/mod.rs @@ -21,7 +21,6 @@ mod keepalive; mod oam; mod open; -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{BacktrackableReader, DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -32,6 +31,8 @@ use zenoh_protocol::{ transport::*, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; + // TransportMessageLowLatency impl WCodec<&TransportMessageLowLatency, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/transport/oam.rs b/commons/zenoh-codec/src/transport/oam.rs index 6861f638d3..156a0ce1ff 100644 --- a/commons/zenoh-codec/src/transport/oam.rs +++ b/commons/zenoh-codec/src/transport/oam.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -25,6 +24,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Oam, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/transport/open.rs b/commons/zenoh-codec/src/transport/open.rs index d539526715..712fe5ca95 100644 --- a/commons/zenoh-codec/src/transport/open.rs +++ b/commons/zenoh-codec/src/transport/open.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use core::time::Duration; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -27,6 +27,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + // OpenSyn impl WCodec<&OpenSyn, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/zenoh/del.rs b/commons/zenoh-codec/src/zenoh/del.rs index 3d0a64f428..07df1affc7 100644 --- a/commons/zenoh-codec/src/zenoh/del.rs +++ b/commons/zenoh-codec/src/zenoh/del.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -25,6 +25,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Del, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/zenoh/err.rs b/commons/zenoh-codec/src/zenoh/err.rs index 5291645bf0..e19b11f70d 100644 --- a/commons/zenoh-codec/src/zenoh/err.rs +++ b/commons/zenoh-codec/src/zenoh/err.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -27,6 +27,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header}; + impl WCodec<&Err, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/zenoh/mod.rs b/commons/zenoh-codec/src/zenoh/mod.rs index dc38e5ee84..aeb8f53102 100644 --- a/commons/zenoh-codec/src/zenoh/mod.rs +++ b/commons/zenoh-codec/src/zenoh/mod.rs @@ -17,11 +17,6 @@ pub mod put; pub mod query; pub mod reply; -#[cfg(not(feature = "shared-memory"))] -use crate::Zenoh080Bounded; -#[cfg(feature = "shared-memory")] -use crate::Zenoh080Sliced; -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -35,6 +30,12 @@ use zenoh_protocol::{ zenoh::{ext, id, PushBody, RequestBody, ResponseBody}, }; +#[cfg(not(feature = "shared-memory"))] +use crate::Zenoh080Bounded; +#[cfg(feature = "shared-memory")] +use crate::Zenoh080Sliced; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; + // Push impl WCodec<&PushBody, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/zenoh/put.rs b/commons/zenoh-codec/src/zenoh/put.rs index 776b47245f..c10a98f6d8 100644 --- a/commons/zenoh-codec/src/zenoh/put.rs +++ b/commons/zenoh-codec/src/zenoh/put.rs @@ -11,12 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(not(feature = "shared-memory"))] -use crate::Zenoh080Bounded; -#[cfg(feature = "shared-memory")] -use crate::Zenoh080Sliced; -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -31,6 +27,12 @@ use zenoh_protocol::{ }, }; +#[cfg(not(feature = "shared-memory"))] +use crate::Zenoh080Bounded; +#[cfg(feature = "shared-memory")] +use crate::Zenoh080Sliced; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Put, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/zenoh/query.rs b/commons/zenoh-codec/src/zenoh/query.rs index efac7b5671..c9b1cc196e 100644 --- a/commons/zenoh-codec/src/zenoh/query.rs +++ b/commons/zenoh-codec/src/zenoh/query.rs @@ -11,13 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::{string::String, vec::Vec}; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; - use zenoh_protocol::{ common::{iext, imsg}, zenoh::{ @@ -26,6 +25,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + // Consolidation impl WCodec for Zenoh080 where diff --git a/commons/zenoh-codec/src/zenoh/reply.rs b/commons/zenoh-codec/src/zenoh/reply.rs index 308004a1c2..a8d6a2afdc 100644 --- a/commons/zenoh-codec/src/zenoh/reply.rs +++ b/commons/zenoh-codec/src/zenoh/reply.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -26,6 +26,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Reply, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index e9b8140f21..c26b681336 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::convert::TryFrom; + use rand::{ distributions::{Alphanumeric, DistString}, *, }; -use std::convert::TryFrom; use zenoh_buffers::{ reader::{HasReader, Reader}, writer::HasWriter, @@ -360,9 +361,10 @@ fn codec_encoding() { #[cfg(feature = "shared-memory")] #[test] fn codec_shm_info() { - use zenoh_shm::api::provider::chunk::ChunkDescriptor; - use zenoh_shm::header::descriptor::HeaderDescriptor; - use zenoh_shm::{watchdog::descriptor::Descriptor, SharedMemoryBufInfo}; + use zenoh_shm::{ + api::provider::chunk::ChunkDescriptor, header::descriptor::HeaderDescriptor, + watchdog::descriptor::Descriptor, SharedMemoryBufInfo, + }; run!(SharedMemoryBufInfo, { let mut rng = rand::thread_rng(); diff --git a/commons/zenoh-collections/src/single_or_vec.rs b/commons/zenoh-collections/src/single_or_vec.rs index ed82bf49af..7b2391197d 100644 --- a/commons/zenoh-collections/src/single_or_vec.rs +++ b/commons/zenoh-collections/src/single_or_vec.rs @@ -13,6 +13,8 @@ // use alloc::vec; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; use core::{ cmp::PartialEq, fmt, iter, @@ -20,9 +22,6 @@ use core::{ ptr, slice, }; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - #[derive(Clone, Eq)] enum SingleOrVecInner { Single(T), diff --git a/commons/zenoh-config/src/connection_retry.rs b/commons/zenoh-config/src/connection_retry.rs index a845fbfe6a..e5f88a05f3 100644 --- a/commons/zenoh-config/src/connection_retry.rs +++ b/commons/zenoh-config/src/connection_retry.rs @@ -12,18 +12,18 @@ // ZettaScale Zenoh Team, // +use serde::{Deserialize, Serialize}; +use zenoh_core::zparse_default; +use zenoh_protocol::core::WhatAmI; + use crate::{ defaults::{ self, DEFAULT_CONNECT_EXIT_ON_FAIL, DEFAULT_CONNECT_TIMEOUT_MS, DEFAULT_LISTEN_EXIT_ON_FAIL, DEFAULT_LISTEN_TIMEOUT_MS, }, + mode_dependent::*, Config, }; -use serde::{Deserialize, Serialize}; -use zenoh_core::zparse_default; -use zenoh_protocol::core::WhatAmI; - -use crate::mode_dependent::*; #[derive(Debug, Deserialize, Serialize, Clone)] pub struct ConnectionRetryModeDependentConf { diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 26f7cfefaa..c55480b2c5 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -16,10 +16,6 @@ pub mod defaults; mod include; -use include::recursive_include; -use secrecy::{CloneableSecret, DebugSecret, Secret, SerializableSecret, Zeroize}; -use serde::{Deserialize, Serialize}; -use serde_json::{Map, Value}; #[allow(unused_imports)] use std::convert::TryFrom; // This is a false positive from the rust analyser use std::{ @@ -31,6 +27,11 @@ use std::{ path::Path, sync::{Arc, Mutex, MutexGuard, Weak}, }; + +use include::recursive_include; +use secrecy::{CloneableSecret, DebugSecret, Secret, SerializableSecret, Zeroize}; +use serde::{Deserialize, Serialize}; +use serde_json::{Map, Value}; use validated_struct::ValidatedMapAssociatedTypes; pub use validated_struct::{GetError, ValidatedMap}; use zenoh_core::zlock; diff --git a/commons/zenoh-config/src/mode_dependent.rs b/commons/zenoh-config/src/mode_dependent.rs index 9f6cc2c7e4..074dd823d9 100644 --- a/commons/zenoh-config/src/mode_dependent.rs +++ b/commons/zenoh-config/src/mode_dependent.rs @@ -12,12 +12,12 @@ // ZettaScale Zenoh Team, // +use std::{fmt, marker::PhantomData}; + use serde::{ de::{self, MapAccess, Visitor}, Deserialize, Serialize, }; -use std::fmt; -use std::marker::PhantomData; pub use zenoh_protocol::core::{ whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, }; diff --git a/commons/zenoh-core/src/lib.rs b/commons/zenoh-core/src/lib.rs index 19cf3751ff..8d6fbfcc0a 100644 --- a/commons/zenoh-core/src/lib.rs +++ b/commons/zenoh-core/src/lib.rs @@ -27,8 +27,7 @@ pub use zenoh_result::{bail, to_zerror, zerror}; pub mod zresult { pub use zenoh_result::*; } -pub use zresult::Error; -pub use zresult::ZResult as Result; +pub use zresult::{Error, ZResult as Result}; /// A resolvable execution, either sync or async pub trait Resolvable { diff --git a/commons/zenoh-crypto/src/cipher.rs b/commons/zenoh-crypto/src/cipher.rs index 3d12712e56..aa78b97b46 100644 --- a/commons/zenoh-crypto/src/cipher.rs +++ b/commons/zenoh-crypto/src/cipher.rs @@ -11,12 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::PseudoRng; -use aes::cipher::{generic_array::GenericArray, BlockDecrypt, BlockEncrypt, KeyInit}; -use aes::Aes128; +use aes::{ + cipher::{generic_array::GenericArray, BlockDecrypt, BlockEncrypt, KeyInit}, + Aes128, +}; use rand::Rng; use zenoh_result::{bail, ZResult}; +use super::PseudoRng; + pub struct BlockCipher { inner: Aes128, } @@ -68,9 +71,10 @@ impl BlockCipher { mod tests { #[test] fn cipher() { - use super::{BlockCipher, PseudoRng}; use rand::{RngCore, SeedableRng}; + use super::{BlockCipher, PseudoRng}; + fn encrypt_decrypt(cipher: &BlockCipher, prng: &mut PseudoRng) { println!("\n[1]"); let t1 = "A".as_bytes().to_vec(); diff --git a/commons/zenoh-keyexpr/benches/keyexpr_tree.rs b/commons/zenoh-keyexpr/benches/keyexpr_tree.rs index 4047e3cf5c..7048521eda 100644 --- a/commons/zenoh-keyexpr/benches/keyexpr_tree.rs +++ b/commons/zenoh-keyexpr/benches/keyexpr_tree.rs @@ -18,12 +18,15 @@ use std::{ }; use rand::SeedableRng; -use zenoh_keyexpr::keyexpr_tree::{ - impls::{HashMapProvider, VecSetProvider}, - traits::*, - KeArcTree, KeBoxTree, +use zenoh_keyexpr::{ + fuzzer::KeyExprFuzzer, + keyexpr_tree::{ + impls::{HashMapProvider, VecSetProvider}, + traits::*, + KeArcTree, KeBoxTree, + }, + OwnedKeyExpr, }; -use zenoh_keyexpr::{fuzzer::KeyExprFuzzer, OwnedKeyExpr}; #[derive(Clone, Copy, Debug, Default)] pub struct Averager { diff --git a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs index 85b4ef79e2..fd87cef55f 100644 --- a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs +++ b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -use super::{canon::Canonizable, OwnedKeyExpr, FORBIDDEN_CHARS}; use alloc::{ borrow::{Borrow, ToOwned}, format, @@ -24,8 +23,11 @@ use core::{ fmt, ops::{Deref, Div}, }; + use zenoh_result::{bail, Error as ZError, ZResult}; +use super::{canon::Canonizable, OwnedKeyExpr, FORBIDDEN_CHARS}; + /// A [`str`] newtype that is statically known to be a valid key expression. /// /// The exact key expression specification can be found [here](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md). Here are the major lines: diff --git a/commons/zenoh-keyexpr/src/key_expr/canon.rs b/commons/zenoh-keyexpr/src/key_expr/canon.rs index 00e79b0c08..cccccdfba3 100644 --- a/commons/zenoh-keyexpr/src/key_expr/canon.rs +++ b/commons/zenoh-keyexpr/src/key_expr/canon.rs @@ -11,12 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // +use alloc::string::String; +use core::{slice, str}; + use crate::key_expr::{ utils::{Split, Writer}, DELIMITER, DOUBLE_WILD, SINGLE_WILD, }; -use alloc::string::String; -use core::{slice, str}; pub trait Canonizable { fn canonize(&mut self); diff --git a/commons/zenoh-keyexpr/src/key_expr/format/parsing.rs b/commons/zenoh-keyexpr/src/key_expr/format/parsing.rs index 52f01c5b6a..a6329cdf73 100644 --- a/commons/zenoh-keyexpr/src/key_expr/format/parsing.rs +++ b/commons/zenoh-keyexpr/src/key_expr/format/parsing.rs @@ -230,8 +230,9 @@ fn do_parse<'a>( #[test] fn parsing() { - use crate::key_expr::OwnedKeyExpr; use core::convert::TryFrom; + + use crate::key_expr::OwnedKeyExpr; for a_spec in ["${a:*}", "a/${a:*}"] { for b_spec in ["b/${b:**}", "${b:**}"] { let specs = [a_spec, b_spec, "c"]; diff --git a/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs b/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs index fa346a2d4a..77388a55c9 100644 --- a/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs +++ b/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs @@ -119,8 +119,7 @@ pub fn intersect(s1: &[u8], s2: &[u8]) -> bool { it_intersect::(s1, s2) } -use super::restiction::NoSubWilds; -use super::{Intersector, MayHaveVerbatim}; +use super::{restiction::NoSubWilds, Intersector, MayHaveVerbatim}; pub struct ClassicIntersector; impl Intersector, NoSubWilds<&[u8]>> for ClassicIntersector { diff --git a/commons/zenoh-keyexpr/src/key_expr/intersect/mod.rs b/commons/zenoh-keyexpr/src/key_expr/intersect/mod.rs index f5d7735d9e..06b990ee72 100644 --- a/commons/zenoh-keyexpr/src/key_expr/intersect/mod.rs +++ b/commons/zenoh-keyexpr/src/key_expr/intersect/mod.rs @@ -12,9 +12,8 @@ // ZettaScale Zenoh Team, // -use crate::DELIMITER; - use super::keyexpr; +use crate::DELIMITER; mod classical; pub use classical::ClassicIntersector; diff --git a/commons/zenoh-keyexpr/src/key_expr/owned.rs b/commons/zenoh-keyexpr/src/key_expr/owned.rs index 5164e4762c..a53fdec2f0 100644 --- a/commons/zenoh-keyexpr/src/key_expr/owned.rs +++ b/commons/zenoh-keyexpr/src/key_expr/owned.rs @@ -13,7 +13,6 @@ // extern crate alloc; -use super::{canon::Canonizable, keyexpr}; // use crate::core::WireExpr; use alloc::{borrow::ToOwned, boxed::Box, string::String, sync::Arc}; use core::{ @@ -23,6 +22,8 @@ use core::{ str::FromStr, }; +use super::{canon::Canonizable, keyexpr}; + /// A [`Arc`] newtype that is statically known to be a valid key expression. /// /// See [`keyexpr`](super::borrowed::keyexpr). diff --git a/commons/zenoh-keyexpr/src/key_expr/tests.rs b/commons/zenoh-keyexpr/src/key_expr/tests.rs index 6d9e64896e..c004666776 100644 --- a/commons/zenoh-keyexpr/src/key_expr/tests.rs +++ b/commons/zenoh-keyexpr/src/key_expr/tests.rs @@ -12,9 +12,10 @@ // ZettaScale Zenoh Team, // -use crate::key_expr::{fuzzer, intersect::*, keyexpr}; use std::{convert::TryInto, fmt::Debug}; +use crate::key_expr::{fuzzer, intersect::*, keyexpr}; + type BoxedIntersectors = Vec Intersector<&'a keyexpr, &'a keyexpr> + Send + Sync>>; lazy_static::lazy_static! { diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs index a0428ac563..e800697bef 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs @@ -20,10 +20,11 @@ use core::fmt::Debug; use token_cell::prelude::*; -use super::box_tree::PruneResult; -use super::support::IterOrOption; -use crate::keyexpr; -use crate::keyexpr_tree::{support::IWildness, *}; +use super::{box_tree::PruneResult, support::IterOrOption}; +use crate::{ + keyexpr, + keyexpr_tree::{support::IWildness, *}, +}; pub struct KeArcTreeInner< Weight, @@ -428,6 +429,7 @@ where pub(crate) mod sealed { use alloc::sync::Arc; use core::ops::{Deref, DerefMut}; + use token_cell::prelude::{TokenCell, TokenTrait}; pub struct Tokenized(pub A, pub(crate) B); diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs index 5aa23e78ac..fcf230731a 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs @@ -17,15 +17,15 @@ use alloc::boxed::Box; use alloc::string::String; use core::ptr::NonNull; -use crate::keyexpr; -use crate::keyexpr_tree::{ - support::{IWildness, NonWild, UnknownWildness}, - *, +use super::{impls::KeyedSetProvider, support::IterOrOption}; +use crate::{ + keyexpr, + keyexpr_tree::{ + support::{IWildness, NonWild, UnknownWildness}, + *, + }, }; -use super::impls::KeyedSetProvider; -use super::support::IterOrOption; - /// A fully owned KeTree. /// /// Note that most of `KeBoxTree`'s methods are declared in the [`IKeyExprTree`] and [`IKeyExprTreeMut`] traits. diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/hashmap_impl.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/hashmap_impl.rs index 72f830a912..a5a16e1d82 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/hashmap_impl.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/hashmap_impl.rs @@ -17,17 +17,18 @@ use core::hash::Hasher; // `SipHasher` is deprecated in favour of a symbol that only exists in `std` #[allow(deprecated)] use core::hash::SipHasher as DefaultHasher; -#[cfg(not(feature = "std"))] -use hashbrown::{ - hash_map::{Entry, Iter, IterMut, Values, ValuesMut}, - HashMap, -}; #[cfg(feature = "std")] use std::collections::{ hash_map::{DefaultHasher, Entry, Iter, IterMut, Values, ValuesMut}, HashMap, }; +#[cfg(not(feature = "std"))] +use hashbrown::{ + hash_map::{Entry, Iter, IterMut, Values, ValuesMut}, + HashMap, +}; + use crate::keyexpr_tree::*; #[cfg_attr(not(feature = "std"), allow(deprecated))] diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs index 4fab65a850..a6b1847697 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs @@ -20,9 +20,10 @@ use core::hash::SipHasher as DefaultHasher; #[cfg(feature = "std")] use std::collections::hash_map::DefaultHasher; -use crate::keyexpr_tree::*; use keyed_set::{KeyExtractor, KeyedSet}; +use crate::keyexpr_tree::*; + #[cfg_attr(not(feature = "std"), allow(deprecated))] pub struct KeyedSetProvider( core::marker::PhantomData, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/mod.rs index 2645c9d95b..48547429f3 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/mod.rs @@ -12,10 +12,11 @@ // ZettaScale Zenoh Team, // -use crate::keyexpr; pub use hashmap_impl::HashMapProvider; pub use keyed_set_impl::KeyedSetProvider; pub use vec_set_impl::VecSetProvider; + +use crate::keyexpr; mod hashmap_impl; mod keyed_set_impl; mod vec_set_impl; diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/vec_set_impl.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/vec_set_impl.rs index 96877ebda6..510755e3c4 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/vec_set_impl.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/vec_set_impl.rs @@ -13,6 +13,7 @@ // use alloc::vec::Vec; + use zenoh_result::unlikely; use crate::keyexpr_tree::*; diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/includer.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/includer.rs index a22d0804b1..bf09714f29 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/includer.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/includer.rs @@ -12,9 +12,10 @@ // ZettaScale Zenoh Team, // -use crate::keyexpr_tree::*; use alloc::vec::Vec; +use crate::keyexpr_tree::*; + struct StackFrame<'a, Children: IChildrenProvider, Node: UIKeyExprTreeNode, Weight> where Children::Assoc: IChildren + 'a, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/inclusion.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/inclusion.rs index 0ed2c96645..87e5af90a9 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/inclusion.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/inclusion.rs @@ -12,10 +12,12 @@ // ZettaScale Zenoh Team, // -use crate::keyexpr_tree::*; use alloc::vec::Vec; + use zenoh_result::unlikely; +use crate::keyexpr_tree::*; + struct StackFrame<'a, Children: IChildrenProvider, Node: UIKeyExprTreeNode, Weight> where Children::Assoc: IChildren + 'a, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/intersection.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/intersection.rs index 34902810f0..dccd571911 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/intersection.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/intersection.rs @@ -12,10 +12,12 @@ // ZettaScale Zenoh Team, // -use crate::keyexpr_tree::*; use alloc::vec::Vec; + use zenoh_result::unlikely; +use crate::keyexpr_tree::*; + struct StackFrame<'a, Children: IChildrenProvider, Node: UIKeyExprTreeNode, Weight> where Children::Assoc: IChildren + 'a, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/tree_iter.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/tree_iter.rs index 666f0cb2c2..05afae3885 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/tree_iter.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/tree_iter.rs @@ -12,9 +12,8 @@ // ZettaScale Zenoh Team, // -use core::num::NonZeroUsize; - use alloc::vec::Vec; +use core::num::NonZeroUsize; use crate::keyexpr_tree::*; pub struct TreeIter<'a, Children: IChildrenProvider, Node: UIKeyExprTreeNode, Weight> diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs index fc2372a67b..ac3d15c6ec 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs @@ -12,24 +12,25 @@ // ZettaScale Zenoh Team, // -use crate::fuzzer::KeyExprFuzzer; use alloc::vec::Vec; -use rand::Rng; - -use super::{ - impls::{KeyedSetProvider, VecSetProvider}, - *, -}; use core::{ convert::{TryFrom, TryInto}, fmt::Debug, ops::Deref, }; -#[cfg(not(feature = "std"))] -use hashbrown::HashMap; #[cfg(feature = "std")] use std::collections::HashMap; +#[cfg(not(feature = "std"))] +use hashbrown::HashMap; +use rand::Rng; + +use super::{ + impls::{KeyedSetProvider, VecSetProvider}, + *, +}; +use crate::fuzzer::KeyExprFuzzer; + fn insert<'a, K: TryInto<&'a keyexpr>, V: Clone + PartialEq + Debug + 'static>( ketree: &mut KeBoxTree, map: &mut HashMap>, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/default_impls.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/default_impls.rs index e6def16608..6a043ccda0 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/default_impls.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/default_impls.rs @@ -13,6 +13,7 @@ // use alloc::{boxed::Box, sync::Arc}; + use token_cell::prelude::{TokenCell, TokenCellTrait, TokenTrait}; use super::*; diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs index dd06cf14b8..03a97f5063 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs @@ -12,8 +12,9 @@ // ZettaScale Zenoh Team, // -use crate::{keyexpr, OwnedKeyExpr}; use alloc::boxed::Box; + +use crate::{keyexpr, OwnedKeyExpr}; pub mod default_impls; /// The basic immutable methods of all KeTrees. diff --git a/commons/zenoh-protocol/src/common/extension.rs b/commons/zenoh-protocol/src/common/extension.rs index f61df61cc6..1d9ff41d50 100644 --- a/commons/zenoh-protocol/src/common/extension.rs +++ b/commons/zenoh-protocol/src/common/extension.rs @@ -15,6 +15,7 @@ use core::{ convert::TryFrom, fmt::{self, Debug}, }; + use zenoh_buffers::ZBuf; /// # Zenoh extensions diff --git a/commons/zenoh-protocol/src/core/cowstr.rs b/commons/zenoh-protocol/src/core/cowstr.rs index 209d020f40..b31c1c4a5d 100644 --- a/commons/zenoh-protocol/src/core/cowstr.rs +++ b/commons/zenoh-protocol/src/core/cowstr.rs @@ -12,8 +12,10 @@ // ZettaScale Zenoh Team, // use alloc::{borrow::ToOwned, boxed::Box, string::String, vec::Vec}; -use core::fmt::{Debug, Display, Formatter}; -use core::num::NonZeroUsize; +use core::{ + fmt::{Debug, Display, Formatter}, + num::NonZeroUsize, +}; enum CowStrInner<'a> { Borrowed(&'a str), diff --git a/commons/zenoh-protocol/src/core/encoding.rs b/commons/zenoh-protocol/src/core/encoding.rs index 70afdbf143..e58088b581 100644 --- a/commons/zenoh-protocol/src/core/encoding.rs +++ b/commons/zenoh-protocol/src/core/encoding.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use core::fmt::Debug; + use zenoh_buffers::ZSlice; pub type EncodingId = u16; diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index a61fdd8e89..8b2c4ad01c 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{locator::*, parameters::Parameters}; use alloc::{borrow::ToOwned, format, string::String}; use core::{borrow::Borrow, convert::TryFrom, fmt, str::FromStr}; + use zenoh_result::{bail, zerror, Error as ZError, ZResult}; +use super::{locator::*, parameters::Parameters}; + // Parsing chars pub const PROTO_SEPARATOR: char = '/'; pub const METADATA_SEPARATOR: char = '?'; diff --git a/commons/zenoh-protocol/src/core/locator.rs b/commons/zenoh-protocol/src/core/locator.rs index 50b909b12f..14f899e7c6 100644 --- a/commons/zenoh-protocol/src/core/locator.rs +++ b/commons/zenoh-protocol/src/core/locator.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::endpoint::*; use alloc::{borrow::ToOwned, string::String}; use core::{convert::TryFrom, fmt, hash::Hash, str::FromStr}; + use zenoh_result::{Error as ZError, ZResult}; +use super::endpoint::*; + /// A string that respects the [`Locator`] canon form: `/
[?]`. /// /// `` is of the form `=;...;=` where keys are alphabetically sorted. diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 0920d55d01..9f10cab391 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -23,6 +23,7 @@ use core::{ hash::Hash, str::FromStr, }; + pub use uhlc::{Timestamp, NTP64}; use zenoh_keyexpr::OwnedKeyExpr; use zenoh_result::{bail, zerror}; @@ -33,7 +34,6 @@ pub type TimestampId = uhlc::ID; /// Constants and helpers for zenoh `whatami` flags. pub mod whatami; pub use whatami::*; - pub use zenoh_keyexpr::key_expr; pub mod wire_expr; diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index a4c2c35197..5264288448 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::parameters::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; use alloc::{ borrow::Cow, string::{String, ToString}, @@ -20,6 +19,8 @@ use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] use std::collections::HashMap; +use super::parameters::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; + /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties /// and `=` as separator between a key and its value. Keys and values are trimed. diff --git a/commons/zenoh-protocol/src/core/resolution.rs b/commons/zenoh-protocol/src/core/resolution.rs index 093fd33bb4..fb16a5c713 100644 --- a/commons/zenoh-protocol/src/core/resolution.rs +++ b/commons/zenoh-protocol/src/core/resolution.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{network::RequestId, transport::TransportSn}; use alloc::string::String; use core::{fmt, str::FromStr}; + use zenoh_result::{bail, ZError}; +use crate::{network::RequestId, transport::TransportSn}; + #[repr(u8)] // The value represents the 2-bit encoded value #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] diff --git a/commons/zenoh-protocol/src/core/whatami.rs b/commons/zenoh-protocol/src/core/whatami.rs index 6aacb0d356..10c5b42c78 100644 --- a/commons/zenoh-protocol/src/core/whatami.rs +++ b/commons/zenoh-protocol/src/core/whatami.rs @@ -12,8 +12,9 @@ // ZettaScale Zenoh Team, // use alloc::string::String; -use const_format::formatcp; use core::{convert::TryFrom, fmt, num::NonZeroU8, ops::BitOr, str::FromStr}; + +use const_format::formatcp; use zenoh_result::{bail, ZError}; #[repr(u8)] diff --git a/commons/zenoh-protocol/src/core/wire_expr.rs b/commons/zenoh-protocol/src/core/wire_expr.rs index a66b1aa212..9f5c432665 100644 --- a/commons/zenoh-protocol/src/core/wire_expr.rs +++ b/commons/zenoh-protocol/src/core/wire_expr.rs @@ -18,6 +18,7 @@ use alloc::{ string::{String, ToString}, }; use core::{convert::TryInto, fmt, sync::atomic::AtomicU16}; + use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; use zenoh_result::{bail, ZResult}; diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 9a41f42e56..a5373cd5f4 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -11,19 +11,21 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - common::{imsg, ZExtZ64, ZExtZBuf}, - core::{ExprId, Reliability, WireExpr}, - network::Mapping, - zextz64, zextzbuf, -}; use alloc::borrow::Cow; + pub use common::*; pub use keyexpr::*; pub use queryable::*; pub use subscriber::*; pub use token::*; +use crate::{ + common::{imsg, ZExtZ64, ZExtZBuf}, + core::{ExprId, Reliability, WireExpr}, + network::Mapping, + zextz64, zextzbuf, +}; + pub mod flag { pub const I: u8 = 1 << 5; // 0x20 Interest if I==1 then the declare is in a response to an Interest with future==false // pub const X: u8 = 1 << 6; // 0x40 Reserved @@ -288,9 +290,8 @@ pub mod keyexpr { } pub mod subscriber { - use crate::core::EntityId; - use super::*; + use crate::core::EntityId; pub type SubscriberId = EntityId; @@ -448,9 +449,8 @@ pub mod subscriber { } pub mod queryable { - use crate::core::EntityId; - use super::*; + use crate::core::EntityId; pub type QueryableId = EntityId; diff --git a/commons/zenoh-protocol/src/network/interest.rs b/commons/zenoh-protocol/src/network/interest.rs index e7eb75787e..46797b72ee 100644 --- a/commons/zenoh-protocol/src/network/interest.rs +++ b/commons/zenoh-protocol/src/network/interest.rs @@ -11,13 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::imsg, core::WireExpr, network::Mapping}; use core::{ fmt::{self, Debug}, ops::{Add, AddAssign, Sub, SubAssign}, sync::atomic::AtomicU32, }; +use crate::{common::imsg, core::WireExpr, network::Mapping}; + pub type InterestId = u32; pub mod flag { diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index 5a0635c9e0..952fe74e89 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -217,11 +217,12 @@ impl From for NetworkMessage { // Extensions pub mod ext { + use core::fmt; + use crate::{ common::{imsg, ZExtZ64}, core::{CongestionControl, EntityId, Priority, ZenohId}, }; - use core::fmt; /// ```text /// 7 6 5 4 3 2 1 0 diff --git a/commons/zenoh-protocol/src/network/request.rs b/commons/zenoh-protocol/src/network/request.rs index ff978744e8..09e8e6b2b6 100644 --- a/commons/zenoh-protocol/src/network/request.rs +++ b/commons/zenoh-protocol/src/network/request.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{core::WireExpr, zenoh::RequestBody}; use core::sync::atomic::AtomicU32; +use crate::{core::WireExpr, zenoh::RequestBody}; + /// The resolution of a RequestId pub type RequestId = u32; pub type AtomicRequestId = AtomicU32; @@ -64,11 +65,12 @@ pub struct Request { } pub mod ext { + use core::{num::NonZeroU32, time::Duration}; + use crate::{ common::{ZExtZ64, ZExtZBuf}, zextz64, zextzbuf, }; - use core::{num::NonZeroU32, time::Duration}; pub type QoS = zextz64!(0x1, false); pub type QoSType = crate::network::ext::QoSType<{ QoS::ID }>; diff --git a/commons/zenoh-protocol/src/scouting/hello.rs b/commons/zenoh-protocol/src/scouting/hello.rs index 562e2fb8c4..62ea915e5a 100644 --- a/commons/zenoh-protocol/src/scouting/hello.rs +++ b/commons/zenoh-protocol/src/scouting/hello.rs @@ -11,10 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::core::{Locator, WhatAmI, ZenohId}; use alloc::vec::Vec; use core::fmt; +use crate::core::{Locator, WhatAmI, ZenohId}; + /// # Hello message /// /// The [`Hello`] message is used to advertise the locators a zenoh node is reachable at. diff --git a/commons/zenoh-protocol/src/transport/fragment.rs b/commons/zenoh-protocol/src/transport/fragment.rs index 3e80c9cfbf..0a1df1fdf5 100644 --- a/commons/zenoh-protocol/src/transport/fragment.rs +++ b/commons/zenoh-protocol/src/transport/fragment.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // +use zenoh_buffers::ZSlice; + use crate::core::Reliability; pub use crate::transport::TransportSn; -use zenoh_buffers::ZSlice; /// # Fragment message /// diff --git a/commons/zenoh-protocol/src/transport/frame.rs b/commons/zenoh-protocol/src/transport/frame.rs index 184784f9f1..02a4ead48f 100644 --- a/commons/zenoh-protocol/src/transport/frame.rs +++ b/commons/zenoh-protocol/src/transport/frame.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{core::Reliability, network::NetworkMessage, transport::TransportSn}; use alloc::vec::Vec; +use crate::{core::Reliability, network::NetworkMessage, transport::TransportSn}; + /// # Frame message /// /// The [`Frame`] message is used to transmit one ore more complete serialized diff --git a/commons/zenoh-protocol/src/transport/init.rs b/commons/zenoh-protocol/src/transport/init.rs index 7e86d17af2..b1febac4b5 100644 --- a/commons/zenoh-protocol/src/transport/init.rs +++ b/commons/zenoh-protocol/src/transport/init.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // +use zenoh_buffers::ZSlice; + use crate::{ core::{Resolution, WhatAmI, ZenohId}, transport::BatchSize, }; -use zenoh_buffers::ZSlice; /// # Init message /// @@ -158,9 +159,10 @@ pub mod ext { impl InitSyn { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZBuf}; use rand::Rng; + use crate::common::{ZExtUnit, ZExtZBuf}; + let mut rng = rand::thread_rng(); let version: u8 = rng.gen(); @@ -213,9 +215,10 @@ pub struct InitAck { impl InitAck { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZBuf}; use rand::Rng; + use crate::common::{ZExtUnit, ZExtZBuf}; + let mut rng = rand::thread_rng(); let version: u8 = rng.gen(); diff --git a/commons/zenoh-protocol/src/transport/join.rs b/commons/zenoh-protocol/src/transport/join.rs index a5cf1422a6..9918de6acf 100644 --- a/commons/zenoh-protocol/src/transport/join.rs +++ b/commons/zenoh-protocol/src/transport/join.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // +use core::time::Duration; + use crate::{ core::{Priority, Resolution, WhatAmI, ZenohId}, transport::{BatchSize, PrioritySn}, }; -use core::time::Duration; /// # Join message /// @@ -115,9 +116,10 @@ pub struct Join { // Extensions pub mod ext { + use alloc::boxed::Box; + use super::{Priority, PrioritySn}; use crate::{common::ZExtZBuf, zextzbuf}; - use alloc::boxed::Box; /// # QoS extension /// Used to announce next sn when QoS is enabled @@ -132,9 +134,10 @@ pub mod ext { impl Join { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::ZExtZBuf; use rand::Rng; + use crate::common::ZExtZBuf; + let mut rng = rand::thread_rng(); let version: u8 = rng.gen(); diff --git a/commons/zenoh-protocol/src/transport/open.rs b/commons/zenoh-protocol/src/transport/open.rs index c643286193..8c2e1429ec 100644 --- a/commons/zenoh-protocol/src/transport/open.rs +++ b/commons/zenoh-protocol/src/transport/open.rs @@ -11,10 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::transport::TransportSn; use core::time::Duration; + use zenoh_buffers::ZSlice; +use crate::transport::TransportSn; + /// # Open message /// /// After having succesfully complete the [`super::InitSyn`]-[`super::InitAck`] message exchange, @@ -88,15 +90,14 @@ pub struct OpenSyn { // Extensions pub mod ext { - use crate::{ - common::{ZExtUnit, ZExtZBuf}, - zextunit, zextzbuf, - }; - #[cfg(feature = "shared-memory")] use crate::common::ZExtZ64; #[cfg(feature = "shared-memory")] use crate::zextz64; + use crate::{ + common::{ZExtUnit, ZExtZBuf}, + zextunit, zextzbuf, + }; /// # QoS extension /// Used to negotiate the use of QoS @@ -128,11 +129,11 @@ pub mod ext { impl OpenSyn { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZBuf}; use rand::Rng; #[cfg(feature = "shared-memory")] use crate::common::ZExtZ64; + use crate::common::{ZExtUnit, ZExtZBuf}; const MIN: usize = 32; const MAX: usize = 1_024; @@ -186,11 +187,11 @@ pub struct OpenAck { impl OpenAck { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZBuf}; use rand::Rng; #[cfg(feature = "shared-memory")] use crate::common::ZExtZ64; + use crate::common::{ZExtUnit, ZExtZBuf}; let mut rng = rand::thread_rng(); diff --git a/commons/zenoh-protocol/src/zenoh/del.rs b/commons/zenoh-protocol/src/zenoh/del.rs index 84fec5bc08..4723cd5415 100644 --- a/commons/zenoh-protocol/src/zenoh/del.rs +++ b/commons/zenoh-protocol/src/zenoh/del.rs @@ -11,10 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::common::ZExtUnknown; use alloc::vec::Vec; + use uhlc::Timestamp; +use crate::common::ZExtUnknown; + /// # Put message /// /// ```text @@ -62,8 +64,9 @@ pub mod ext { impl Del { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::{common::iext, core::ZenohId}; use rand::Rng; + + use crate::{common::iext, core::ZenohId}; let mut rng = rand::thread_rng(); let timestamp = rng.gen_bool(0.5).then_some({ diff --git a/commons/zenoh-protocol/src/zenoh/err.rs b/commons/zenoh-protocol/src/zenoh/err.rs index b6aa5f4954..b8808d96d7 100644 --- a/commons/zenoh-protocol/src/zenoh/err.rs +++ b/commons/zenoh-protocol/src/zenoh/err.rs @@ -11,10 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::ZExtUnknown, core::Encoding}; use alloc::vec::Vec; + use zenoh_buffers::ZBuf; +use crate::{common::ZExtUnknown, core::Encoding}; + /// # Err message /// /// ```text @@ -71,8 +73,9 @@ pub mod ext { impl Err { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::iext; use rand::Rng; + + use crate::common::iext; let mut rng = rand::thread_rng(); let encoding = Encoding::rand(); diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index 7bca48f3ba..af9ba853f5 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -17,13 +17,14 @@ pub mod put; pub mod query; pub mod reply; -use crate::core::Encoding; pub use del::Del; pub use err::Err; pub use put::Put; pub use query::{Consolidation, Query}; pub use reply::Reply; +use crate::core::Encoding; + pub mod id { pub const OAM: u8 = 0x00; pub const PUT: u8 = 0x01; diff --git a/commons/zenoh-protocol/src/zenoh/put.rs b/commons/zenoh-protocol/src/zenoh/put.rs index 14674e9ad9..ef0a71db09 100644 --- a/commons/zenoh-protocol/src/zenoh/put.rs +++ b/commons/zenoh-protocol/src/zenoh/put.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::ZExtUnknown, core::Encoding}; use alloc::vec::Vec; + use uhlc::Timestamp; use zenoh_buffers::ZBuf; +use crate::{common::ZExtUnknown, core::Encoding}; + /// # Put message /// /// ```text @@ -80,8 +82,9 @@ pub mod ext { impl Put { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::{common::iext, core::ZenohId}; use rand::Rng; + + use crate::{common::iext, core::ZenohId}; let mut rng = rand::thread_rng(); let timestamp = rng.gen_bool(0.5).then_some({ diff --git a/commons/zenoh-protocol/src/zenoh/query.rs b/commons/zenoh-protocol/src/zenoh/query.rs index f1baaebe20..988447b835 100644 --- a/commons/zenoh-protocol/src/zenoh/query.rs +++ b/commons/zenoh-protocol/src/zenoh/query.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::common::ZExtUnknown; use alloc::{string::String, vec::Vec}; +use crate::common::ZExtUnknown; + /// The kind of consolidation. #[repr(u8)] #[derive(Debug, Default, Clone, PartialEq, Eq, Copy)] @@ -108,11 +109,12 @@ pub mod ext { impl Query { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::iext; use rand::{ distributions::{Alphanumeric, DistString}, Rng, }; + + use crate::common::iext; let mut rng = rand::thread_rng(); const MIN: usize = 2; diff --git a/commons/zenoh-protocol/src/zenoh/reply.rs b/commons/zenoh-protocol/src/zenoh/reply.rs index 7cbab4ca0a..f29521a4a9 100644 --- a/commons/zenoh-protocol/src/zenoh/reply.rs +++ b/commons/zenoh-protocol/src/zenoh/reply.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // +use alloc::vec::Vec; + use crate::{ common::ZExtUnknown, zenoh::{query::Consolidation, PushBody}, }; -use alloc::vec::Vec; /// # Reply message /// diff --git a/commons/zenoh-result/src/lib.rs b/commons/zenoh-result/src/lib.rs index 60148c763f..79de74f4eb 100644 --- a/commons/zenoh-result/src/lib.rs +++ b/commons/zenoh-result/src/lib.rs @@ -20,9 +20,10 @@ #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; -use anyhow::Error as AnyError; use core::fmt; +use anyhow::Error as AnyError; + #[cold] pub const fn cold() {} pub const fn likely(b: bool) -> bool { diff --git a/commons/zenoh-runtime/src/lib.rs b/commons/zenoh-runtime/src/lib.rs index dcd46744e6..9c5af8a107 100644 --- a/commons/zenoh-runtime/src/lib.rs +++ b/commons/zenoh-runtime/src/lib.rs @@ -12,8 +12,6 @@ // ZettaScale Zenoh Team, // use core::panic; -use lazy_static::lazy_static; -use serde::Deserialize; use std::{ borrow::Borrow, collections::HashMap, @@ -26,6 +24,9 @@ use std::{ }, time::Duration, }; + +use lazy_static::lazy_static; +use serde::Deserialize; use tokio::runtime::{Handle, Runtime, RuntimeFlavor}; use zenoh_macros::{GenericRuntimeParam, RegisterParam}; use zenoh_result::ZResult as Result; diff --git a/commons/zenoh-shm/src/api/client/shared_memory_client.rs b/commons/zenoh-shm/src/api/client/shared_memory_client.rs index abc7221300..dd3cf5db12 100644 --- a/commons/zenoh-shm/src/api/client/shared_memory_client.rs +++ b/commons/zenoh-shm/src/api/client/shared_memory_client.rs @@ -12,15 +12,12 @@ // ZettaScale Zenoh Team, // -use std::fmt::Debug; - -use std::sync::Arc; +use std::{fmt::Debug, sync::Arc}; use zenoh_result::ZResult; -use crate::api::common::types::SegmentID; - use super::shared_memory_segment::SharedMemorySegment; +use crate::api::common::types::SegmentID; /// SharedMemoryClient - client factory implementation for particular shared memory protocol #[zenoh_macros::unstable_doc] diff --git a/commons/zenoh-shm/src/api/client/shared_memory_segment.rs b/commons/zenoh-shm/src/api/client/shared_memory_segment.rs index 88eaf8761f..e3aaf9ba39 100644 --- a/commons/zenoh-shm/src/api/client/shared_memory_segment.rs +++ b/commons/zenoh-shm/src/api/client/shared_memory_segment.rs @@ -12,9 +12,7 @@ // ZettaScale Zenoh Team, // -use std::fmt::Debug; - -use std::sync::atomic::AtomicPtr; +use std::{fmt::Debug, sync::atomic::AtomicPtr}; use zenoh_result::ZResult; diff --git a/commons/zenoh-shm/src/api/client_storage/mod.rs b/commons/zenoh-shm/src/api/client_storage/mod.rs index 0ce1a8af11..7b78c23182 100644 --- a/commons/zenoh-shm/src/api/client_storage/mod.rs +++ b/commons/zenoh-shm/src/api/client_storage/mod.rs @@ -12,26 +12,27 @@ // ZettaScale Zenoh Team, // -use lazy_static::lazy_static; use std::{ collections::HashMap, sync::{Arc, RwLock}, }; +use lazy_static::lazy_static; use zenoh_result::{bail, ZResult}; -use crate::api::{ - client::{ - shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, - }, - common::types::ProtocolID, - protocol_implementations::posix::{ - posix_shared_memory_client::PosixSharedMemoryClient, protocol_id::POSIX_PROTOCOL_ID, +use crate::{ + api::{ + client::{ + shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, + }, + common::types::ProtocolID, + protocol_implementations::posix::{ + posix_shared_memory_client::PosixSharedMemoryClient, protocol_id::POSIX_PROTOCOL_ID, + }, }, + reader::{ClientStorage, GlobalDataSegmentID}, }; -use crate::reader::{ClientStorage, GlobalDataSegmentID}; - lazy_static! { /// A global lazily-initialized SHM client storage. /// When initialized, contains default client set, diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs index 0184f50036..5684b0b15f 100644 --- a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs @@ -16,6 +16,7 @@ use std::sync::Arc; use zenoh_result::ZResult; +use super::posix_shared_memory_segment::PosixSharedMemorySegment; use crate::api::{ client::{ shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, @@ -23,8 +24,6 @@ use crate::api::{ common::types::SegmentID, }; -use super::posix_shared_memory_segment::PosixSharedMemorySegment; - /// Client factory implementation for particular shared memory protocol #[zenoh_macros::unstable_doc] #[derive(Debug)] diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs index 89c1b91387..60e2a10891 100644 --- a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs @@ -25,6 +25,7 @@ use std::{ use zenoh_core::zlock; use zenoh_result::ZResult; +use super::posix_shared_memory_segment::PosixSharedMemorySegment; use crate::api::{ common::types::ChunkID, provider::{ @@ -34,8 +35,6 @@ use crate::api::{ }, }; -use super::posix_shared_memory_segment::PosixSharedMemorySegment; - // TODO: MIN_FREE_CHUNK_SIZE limitation is made to reduce memory fragmentation and lower // the CPU time needed to defragment() - that's reasonable, and there is additional thing here: // our SHM\zerocopy functionality outperforms common buffer transmission only starting from 1K diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs index eb49d141ca..3f74594ad0 100644 --- a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs @@ -16,10 +16,13 @@ use std::sync::atomic::AtomicPtr; use zenoh_result::ZResult; -use crate::api::common::types::SegmentID; -use crate::api::{client::shared_memory_segment::SharedMemorySegment, common::types::ChunkID}; - -use crate::posix_shm::array::ArrayInSHM; +use crate::{ + api::{ + client::shared_memory_segment::SharedMemorySegment, + common::types::{ChunkID, SegmentID}, + }, + posix_shm::array::ArrayInSHM, +}; const POSIX_SHM_SEGMENT_PREFIX: &str = "posix_shm_provider_segment"; diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs index c3b8128300..58109a699d 100644 --- a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs +++ b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs @@ -22,6 +22,11 @@ use std::{ use async_trait::async_trait; use zenoh_result::ZResult; +use super::{ + chunk::{AllocatedChunk, ChunkDescriptor}, + shared_memory_provider_backend::SharedMemoryProviderBackend, + types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, +}; use crate::{ api::{common::types::ProtocolID, slice::zsliceshmmut::ZSliceShmMut}, header::{ @@ -38,12 +43,6 @@ use crate::{ SharedMemoryBuf, SharedMemoryBufInfo, }; -use super::{ - chunk::{AllocatedChunk, ChunkDescriptor}, - shared_memory_provider_backend::SharedMemoryProviderBackend, - types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, -}; - #[derive(Debug)] struct BusyChunk { descriptor: ChunkDescriptor, diff --git a/commons/zenoh-shm/src/api/provider/types.rs b/commons/zenoh-shm/src/api/provider/types.rs index 662482f567..ddf949ee75 100644 --- a/commons/zenoh-shm/src/api/provider/types.rs +++ b/commons/zenoh-shm/src/api/provider/types.rs @@ -16,9 +16,8 @@ use std::fmt::Display; use zenoh_result::{bail, ZResult}; -use crate::api::slice::zsliceshmmut::ZSliceShmMut; - use super::chunk::AllocatedChunk; +use crate::api::slice::zsliceshmmut::ZSliceShmMut; /// Allocation errors /// diff --git a/commons/zenoh-shm/src/api/slice/zsliceshm.rs b/commons/zenoh-shm/src/api/slice/zsliceshm.rs index 86f4395ebb..b2ba611b3c 100644 --- a/commons/zenoh-shm/src/api/slice/zsliceshm.rs +++ b/commons/zenoh-shm/src/api/slice/zsliceshm.rs @@ -20,9 +20,8 @@ use std::{ use zenoh_buffers::{ZBuf, ZSlice}; -use crate::SharedMemoryBuf; - use super::{traits::SHMBuf, zsliceshmmut::zsliceshmmut}; +use crate::SharedMemoryBuf; /// An immutable SHM slice #[zenoh_macros::unstable_doc] diff --git a/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs b/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs index 62823785da..d866e4173e 100644 --- a/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs +++ b/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs @@ -17,12 +17,11 @@ use std::borrow::{Borrow, BorrowMut}; use zenoh_buffers::{ZBuf, ZSlice}; -use crate::SharedMemoryBuf; - use super::{ traits::{SHMBuf, SHMBufMut}, zsliceshm::{zsliceshm, ZSliceShm}, }; +use crate::SharedMemoryBuf; /// A mutable SHM slice #[zenoh_macros::unstable_doc] diff --git a/commons/zenoh-shm/src/header/segment.rs b/commons/zenoh-shm/src/header/segment.rs index e36e54a233..ab2353c35d 100644 --- a/commons/zenoh-shm/src/header/segment.rs +++ b/commons/zenoh-shm/src/header/segment.rs @@ -14,12 +14,11 @@ use zenoh_result::ZResult; -use crate::posix_shm::array::ArrayInSHM; - use super::{ chunk_header::ChunkHeaderType, descriptor::{HeaderIndex, HeaderSegmentID}, }; +use crate::posix_shm::array::ArrayInSHM; const HEADER_SEGMENT_PREFIX: &str = "header"; diff --git a/commons/zenoh-shm/src/lib.rs b/commons/zenoh-shm/src/lib.rs index abcdd558fb..316477d26e 100644 --- a/commons/zenoh-shm/src/lib.rs +++ b/commons/zenoh-shm/src/lib.rs @@ -11,8 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use api::{common::types::ProtocolID, provider::chunk::ChunkDescriptor}; -use header::descriptor::{HeaderDescriptor, OwnedHeaderDescriptor}; use std::{ any::Any, sync::{ @@ -20,6 +18,9 @@ use std::{ Arc, }, }; + +use api::{common::types::ProtocolID, provider::chunk::ChunkDescriptor}; +use header::descriptor::{HeaderDescriptor, OwnedHeaderDescriptor}; use watchdog::{confirmator::ConfirmedDescriptor, descriptor::Descriptor}; use zenoh_buffers::ZSliceBuffer; diff --git a/commons/zenoh-shm/src/watchdog/segment.rs b/commons/zenoh-shm/src/watchdog/segment.rs index b4a273c01c..5943a10153 100644 --- a/commons/zenoh-shm/src/watchdog/segment.rs +++ b/commons/zenoh-shm/src/watchdog/segment.rs @@ -16,9 +16,8 @@ use std::sync::atomic::AtomicU64; use zenoh_result::ZResult; -use crate::posix_shm::array::ArrayInSHM; - use super::descriptor::SegmentID; +use crate::posix_shm::array::ArrayInSHM; const WATCHDOG_SEGMENT_PREFIX: &str = "watchdog"; diff --git a/commons/zenoh-sync/src/condition.rs b/commons/zenoh-sync/src/condition.rs index 098aa05411..99ba6d4ca2 100644 --- a/commons/zenoh-sync/src/condition.rs +++ b/commons/zenoh-sync/src/condition.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use event_listener::{Event, EventListener}; use std::{pin::Pin, sync::MutexGuard}; + +use event_listener::{Event, EventListener}; use tokio::sync::MutexGuard as AysncMutexGuard; pub type ConditionWaiter = Pin>; diff --git a/commons/zenoh-sync/src/fifo_queue.rs b/commons/zenoh-sync/src/fifo_queue.rs index e0ce57cb36..44bc2a5b17 100644 --- a/commons/zenoh-sync/src/fifo_queue.rs +++ b/commons/zenoh-sync/src/fifo_queue.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::Condition; use tokio::sync::Mutex; use zenoh_collections::RingBuffer; use zenoh_core::zasynclock; +use crate::Condition; + pub struct FifoQueue { not_empty: Condition, not_full: Condition, diff --git a/commons/zenoh-sync/src/lib.rs b/commons/zenoh-sync/src/lib.rs index 419246dc9d..20e95d2bb8 100644 --- a/commons/zenoh-sync/src/lib.rs +++ b/commons/zenoh-sync/src/lib.rs @@ -17,10 +17,13 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + use futures::FutureExt; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; pub mod fifo_queue; pub use fifo_queue::*; diff --git a/commons/zenoh-sync/src/lifo_queue.rs b/commons/zenoh-sync/src/lifo_queue.rs index f29614d4b2..9fe541da36 100644 --- a/commons/zenoh-sync/src/lifo_queue.rs +++ b/commons/zenoh-sync/src/lifo_queue.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::sync::{Condvar, Mutex}; + use zenoh_collections::StackBuffer; use zenoh_core::zlock; diff --git a/commons/zenoh-sync/src/mvar.rs b/commons/zenoh-sync/src/mvar.rs index 1b4a90e1e2..f818b44071 100644 --- a/commons/zenoh-sync/src/mvar.rs +++ b/commons/zenoh-sync/src/mvar.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::Condition; use std::sync::atomic::{AtomicUsize, Ordering}; + use tokio::sync::Mutex; use zenoh_core::zasynclock; +use crate::Condition; + pub struct Mvar { inner: Mutex>, cond_put: Condition, @@ -96,9 +98,9 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn mvar() -> ZResult<()> { + use std::{sync::Arc, time::Duration}; + use super::Mvar; - use std::sync::Arc; - use std::time::Duration; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/commons/zenoh-sync/src/object_pool.rs b/commons/zenoh-sync/src/object_pool.rs index 3386b2058b..ee6eed881b 100644 --- a/commons/zenoh-sync/src/object_pool.rs +++ b/commons/zenoh-sync/src/object_pool.rs @@ -11,15 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::LifoQueue; use std::{ any::Any, fmt, ops::{Deref, DerefMut, Drop}, sync::{Arc, Weak}, }; + use zenoh_buffers::ZSliceBuffer; +use super::LifoQueue; + /// Provides a pool of pre-allocated objects that are automaticlaly reinserted into /// the pool when dropped. pub struct RecyclingObjectPool diff --git a/commons/zenoh-sync/src/signal.rs b/commons/zenoh-sync/src/signal.rs index 74dd3e5199..053f5a13aa 100644 --- a/commons/zenoh-sync/src/signal.rs +++ b/commons/zenoh-sync/src/signal.rs @@ -11,8 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::sync::atomic::{AtomicBool, Ordering::*}; -use std::sync::Arc; +use std::sync::{ + atomic::{AtomicBool, Ordering::*}, + Arc, +}; + use tokio::sync::Semaphore; #[derive(Debug, Clone)] @@ -68,9 +71,10 @@ impl Default for Signal { #[cfg(test)] mod tests { - use super::*; use std::time::Duration; + use super::*; + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn signal_test() { let signal = Signal::new(); diff --git a/commons/zenoh-task/src/lib.rs b/commons/zenoh-task/src/lib.rs index d41eb50f34..7eab9d316f 100644 --- a/commons/zenoh-task/src/lib.rs +++ b/commons/zenoh-task/src/lib.rs @@ -18,12 +18,11 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) +use std::{future::Future, time::Duration}; + use futures::future::FutureExt; -use std::future::Future; -use std::time::Duration; use tokio::task::JoinHandle; -use tokio_util::sync::CancellationToken; -use tokio_util::task::TaskTracker; +use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh_core::{ResolveFuture, Wait}; use zenoh_runtime::ZRuntime; diff --git a/commons/zenoh-util/src/std_only/ffi/win.rs b/commons/zenoh-util/src/std_only/ffi/win.rs index 3a15871c20..7f0bbd986a 100644 --- a/commons/zenoh-util/src/std_only/ffi/win.rs +++ b/commons/zenoh-util/src/std_only/ffi/win.rs @@ -11,9 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::io; -use std::mem; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::{ + io, mem, + net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, +}; + use winapi::shared::{ws2def, ws2ipdef}; #[allow(clippy::many_single_char_names)] diff --git a/commons/zenoh-util/src/std_only/lib_loader.rs b/commons/zenoh-util/src/std_only/lib_loader.rs index 9c682e4343..d6b254eb35 100644 --- a/commons/zenoh-util/src/std_only/lib_loader.rs +++ b/commons/zenoh-util/src/std_only/lib_loader.rs @@ -11,11 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{ + env::consts::{DLL_PREFIX, DLL_SUFFIX}, + ffi::OsString, + ops::Deref, + path::PathBuf, +}; + use libloading::Library; -use std::env::consts::{DLL_PREFIX, DLL_SUFFIX}; -use std::ffi::OsString; -use std::ops::Deref; -use std::path::PathBuf; use tracing::{debug, warn}; use zenoh_core::zconfigurable; use zenoh_result::{bail, ZResult}; diff --git a/commons/zenoh-util/src/std_only/net/mod.rs b/commons/zenoh-util/src/std_only/net/mod.rs index 83ab08d678..65b665d31b 100644 --- a/commons/zenoh-util/src/std_only/net/mod.rs +++ b/commons/zenoh-util/src/std_only/net/mod.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::net::{IpAddr, Ipv6Addr}; + use tokio::net::{TcpSocket, UdpSocket}; use zenoh_core::zconfigurable; #[cfg(unix)] @@ -78,9 +79,10 @@ pub fn get_interface(name: &str) -> ZResult> { #[cfg(windows)] { unsafe { - use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; + use crate::ffi; + let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_INET)?; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); @@ -162,9 +164,10 @@ pub fn get_local_addresses(interface: Option<&str>) -> ZResult> { #[cfg(windows)] { unsafe { - use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; + use crate::ffi; + let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_UNSPEC)?; let mut result = vec![]; @@ -242,9 +245,10 @@ pub fn get_unicast_addresses_of_interface(name: &str) -> ZResult> { #[cfg(windows)] { unsafe { - use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; + use crate::ffi; + let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_INET)?; let mut addrs = vec![]; @@ -281,9 +285,10 @@ pub fn get_index_of_interface(addr: IpAddr) -> ZResult { #[cfg(windows)] { unsafe { - use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; + use crate::ffi; + let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_INET)?; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); @@ -324,9 +329,10 @@ pub fn get_interface_names_by_addr(addr: IpAddr) -> ZResult> { { let mut result = vec![]; unsafe { - use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; + use crate::ffi; + let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_UNSPEC)?; if addr.is_unspecified() { diff --git a/commons/zenoh-util/src/std_only/time_range.rs b/commons/zenoh-util/src/std_only/time_range.rs index 9cfaf32655..51bff157ba 100644 --- a/commons/zenoh-util/src/std_only/time_range.rs +++ b/commons/zenoh-util/src/std_only/time_range.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -use humantime::{format_rfc3339, parse_rfc3339_weak}; use std::{ convert::{TryFrom, TryInto}, fmt::Display, @@ -20,6 +19,8 @@ use std::{ str::FromStr, time::{Duration, SystemTime}, }; + +use humantime::{format_rfc3339, parse_rfc3339_weak}; use zenoh_result::{bail, zerror, ZError}; const U_TO_SECS: f64 = 0.000001; diff --git a/commons/zenoh-util/src/std_only/timer.rs b/commons/zenoh-util/src/std_only/timer.rs index 6e7dde065a..d18b9192a4 100644 --- a/commons/zenoh-util/src/std_only/timer.rs +++ b/commons/zenoh-util/src/std_only/timer.rs @@ -11,16 +11,19 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::*; -use async_std::sync::Mutex; -use async_std::task; +use std::{ + cmp::Ordering as ComparisonOrdering, + collections::BinaryHeap, + sync::{ + atomic::{AtomicBool, Ordering as AtomicOrdering}, + Arc, Weak, + }, + time::{Duration, Instant}, +}; + +use async_std::{prelude::*, sync::Mutex, task}; use async_trait::async_trait; use flume::{bounded, Receiver, RecvError, Sender}; -use std::cmp::Ordering as ComparisonOrdering; -use std::collections::BinaryHeap; -use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; -use std::sync::{Arc, Weak}; -use std::time::{Duration, Instant}; use zenoh_core::zconfigurable; zconfigurable! { @@ -296,12 +299,18 @@ impl Default for Timer { mod tests { #[test] fn timer() { - use super::{Timed, TimedEvent, Timer}; + use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::{Duration, Instant}, + }; + use async_std::task; use async_trait::async_trait; - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::Arc; - use std::time::{Duration, Instant}; + + use super::{Timed, TimedEvent, Timer}; #[derive(Clone)] struct MyEvent { diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 6b6326ebcf..2b5ba011f6 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::Duration; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 43747697b6..6a616bfa2d 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::Duration; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 81181f1a81..ad761bddd2 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::{Duration, Instant}; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 7a7bd61580..d4c5b4f162 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::{Duration, Instant}; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 7c2c9f2c65..0a2e4e09c1 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::Duration; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 5eb4f9e96e..d047d63203 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -12,8 +12,9 @@ // ZettaScale Zenoh Team, // -use clap::Parser; use std::convert::TryInto; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 55f211f111..1e13cefb2f 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::Duration; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 2b03e32d06..46ccfc8193 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -13,9 +13,10 @@ // #![recursion_limit = "256"] +use std::collections::HashMap; + use clap::Parser; use futures::select; -use std::collections::HashMap; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 5f5c77633f..9914539ed5 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -12,8 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::*; +use zenoh::{config::Config, prelude::*}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index 6913a7bf08..1006fdb434 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::Instant; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/io/zenoh-link-commons/src/lib.rs b/io/zenoh-link-commons/src/lib.rs index 138726fd4f..5a41050e94 100644 --- a/io/zenoh-link-commons/src/lib.rs +++ b/io/zenoh-link-commons/src/lib.rs @@ -25,14 +25,14 @@ pub mod tls; mod unicast; use alloc::{borrow::ToOwned, boxed::Box, string::String, vec, vec::Vec}; -use async_trait::async_trait; use core::{cmp::PartialEq, fmt, hash::Hash}; + +use async_trait::async_trait; pub use listener::*; pub use multicast::*; use serde::Serialize; pub use unicast::*; -use zenoh_protocol::core::Locator; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{core::Locator, transport::BatchSize}; use zenoh_result::ZResult; /*************************************/ diff --git a/io/zenoh-link-commons/src/listener.rs b/io/zenoh-link-commons/src/listener.rs index be61e9cf89..48930a7a65 100644 --- a/io/zenoh-link-commons/src/listener.rs +++ b/io/zenoh-link-commons/src/listener.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{ + collections::HashMap, + net::{IpAddr, SocketAddr}, + sync::{Arc, RwLock}, +}; + use futures::Future; -use std::collections::HashMap; -use std::net::IpAddr; -use std::net::SocketAddr; -use std::sync::{Arc, RwLock}; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use zenoh_core::{zread, zwrite}; diff --git a/io/zenoh-link-commons/src/multicast.rs b/io/zenoh-link-commons/src/multicast.rs index ccfe6842c1..ee07c4eb58 100644 --- a/io/zenoh-link-commons/src/multicast.rs +++ b/io/zenoh-link-commons/src/multicast.rs @@ -12,12 +12,13 @@ // ZettaScale Zenoh Team, // use alloc::{borrow::Cow, boxed::Box, sync::Arc, vec::Vec}; -use async_trait::async_trait; use core::{ fmt, hash::{Hash, Hasher}, ops::Deref, }; + +use async_trait::async_trait; use zenoh_buffers::{reader::HasReader, writer::HasWriter}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{ diff --git a/io/zenoh-link-commons/src/tls.rs b/io/zenoh-link-commons/src/tls.rs index 562b02c81e..427880b812 100644 --- a/io/zenoh-link-commons/src/tls.rs +++ b/io/zenoh-link-commons/src/tls.rs @@ -1,4 +1,5 @@ use alloc::vec::Vec; + use rustls::{ client::{ danger::{ServerCertVerified, ServerCertVerifier}, diff --git a/io/zenoh-link-commons/src/unicast.rs b/io/zenoh-link-commons/src/unicast.rs index 2bd1808acf..add4c3a27b 100644 --- a/io/zenoh-link-commons/src/unicast.rs +++ b/io/zenoh-link-commons/src/unicast.rs @@ -12,13 +12,14 @@ // ZettaScale Zenoh Team, // use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; -use async_trait::async_trait; use core::{ fmt, hash::{Hash, Hasher}, ops::Deref, }; use std::net::SocketAddr; + +use async_trait::async_trait; use zenoh_protocol::{ core::{EndPoint, Locator}, transport::BatchSize, diff --git a/io/zenoh-link/src/lib.rs b/io/zenoh-link/src/lib.rs index 21f26ecf1b..7898cf087d 100644 --- a/io/zenoh-link/src/lib.rs +++ b/io/zenoh-link/src/lib.rs @@ -18,40 +18,41 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use std::collections::HashMap; -use zenoh_config::Config; -use zenoh_result::{bail, ZResult}; +use zenoh_config::Config; +pub use zenoh_link_commons::*; +#[cfg(feature = "transport_quic")] +pub use zenoh_link_quic as quic; +#[cfg(feature = "transport_quic")] +use zenoh_link_quic::{ + LinkManagerUnicastQuic, QuicConfigurator, QuicLocatorInspector, QUIC_LOCATOR_PREFIX, +}; +#[cfg(feature = "transport_serial")] +pub use zenoh_link_serial as serial; +#[cfg(feature = "transport_serial")] +use zenoh_link_serial::{LinkManagerUnicastSerial, SerialLocatorInspector, SERIAL_LOCATOR_PREFIX}; #[cfg(feature = "transport_tcp")] pub use zenoh_link_tcp as tcp; #[cfg(feature = "transport_tcp")] use zenoh_link_tcp::{LinkManagerUnicastTcp, TcpLocatorInspector, TCP_LOCATOR_PREFIX}; - -#[cfg(feature = "transport_udp")] -pub use zenoh_link_udp as udp; -#[cfg(feature = "transport_udp")] -use zenoh_link_udp::{ - LinkManagerMulticastUdp, LinkManagerUnicastUdp, UdpLocatorInspector, UDP_LOCATOR_PREFIX, -}; - #[cfg(feature = "transport_tls")] pub use zenoh_link_tls as tls; #[cfg(feature = "transport_tls")] use zenoh_link_tls::{ LinkManagerUnicastTls, TlsConfigurator, TlsLocatorInspector, TLS_LOCATOR_PREFIX, }; - -#[cfg(feature = "transport_quic")] -pub use zenoh_link_quic as quic; -#[cfg(feature = "transport_quic")] -use zenoh_link_quic::{ - LinkManagerUnicastQuic, QuicConfigurator, QuicLocatorInspector, QUIC_LOCATOR_PREFIX, +#[cfg(feature = "transport_udp")] +pub use zenoh_link_udp as udp; +#[cfg(feature = "transport_udp")] +use zenoh_link_udp::{ + LinkManagerMulticastUdp, LinkManagerUnicastUdp, UdpLocatorInspector, UDP_LOCATOR_PREFIX, +}; +#[cfg(feature = "transport_unixpipe")] +pub use zenoh_link_unixpipe as unixpipe; +#[cfg(feature = "transport_unixpipe")] +use zenoh_link_unixpipe::{ + LinkManagerUnicastPipe, UnixPipeConfigurator, UnixPipeLocatorInspector, UNIXPIPE_LOCATOR_PREFIX, }; - -#[cfg(feature = "transport_ws")] -pub use zenoh_link_ws as ws; -#[cfg(feature = "transport_ws")] -use zenoh_link_ws::{LinkManagerUnicastWs, WsLocatorInspector, WS_LOCATOR_PREFIX}; - #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] pub use zenoh_link_unixsock_stream as unixsock_stream; #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] @@ -59,26 +60,16 @@ use zenoh_link_unixsock_stream::{ LinkManagerUnicastUnixSocketStream, UnixSockStreamLocatorInspector, UNIXSOCKSTREAM_LOCATOR_PREFIX, }; - -#[cfg(feature = "transport_serial")] -pub use zenoh_link_serial as serial; -#[cfg(feature = "transport_serial")] -use zenoh_link_serial::{LinkManagerUnicastSerial, SerialLocatorInspector, SERIAL_LOCATOR_PREFIX}; - -#[cfg(feature = "transport_unixpipe")] -pub use zenoh_link_unixpipe as unixpipe; -#[cfg(feature = "transport_unixpipe")] -use zenoh_link_unixpipe::{ - LinkManagerUnicastPipe, UnixPipeConfigurator, UnixPipeLocatorInspector, UNIXPIPE_LOCATOR_PREFIX, -}; - #[cfg(all(feature = "transport_vsock", target_os = "linux"))] pub use zenoh_link_vsock as vsock; #[cfg(all(feature = "transport_vsock", target_os = "linux"))] use zenoh_link_vsock::{LinkManagerUnicastVsock, VsockLocatorInspector, VSOCK_LOCATOR_PREFIX}; - -pub use zenoh_link_commons::*; +#[cfg(feature = "transport_ws")] +pub use zenoh_link_ws as ws; +#[cfg(feature = "transport_ws")] +use zenoh_link_ws::{LinkManagerUnicastWs, WsLocatorInspector, WS_LOCATOR_PREFIX}; pub use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_result::{bail, ZResult}; pub const PROTOCOLS: &[&str] = &[ #[cfg(feature = "transport_quic")] diff --git a/io/zenoh-links/zenoh-link-quic/src/lib.rs b/io/zenoh-links/zenoh-link-quic/src/lib.rs index a60f84c559..a7303a9622 100644 --- a/io/zenoh-links/zenoh-link-quic/src/lib.rs +++ b/io/zenoh-links/zenoh-link-quic/src/lib.rs @@ -18,7 +18,6 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use async_trait::async_trait; - use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::{core::Locator, transport::BatchSize}; diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index 05d33dff49..a3b2687b6f 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -12,17 +12,14 @@ // ZettaScale Zenoh Team, // -use crate::{ - config::*, - utils::{get_quic_addr, TlsClientConfig, TlsServerConfig}, - ALPN_QUIC_HTTP, QUIC_ACCEPT_THROTTLE_TIME, QUIC_DEFAULT_MTU, QUIC_LOCATOR_PREFIX, +use std::{ + fmt, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + time::Duration, }; + use async_trait::async_trait; -use std::fmt; -use std::net::IpAddr; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::sync::Arc; -use std::time::Duration; use tokio::sync::Mutex as AsyncMutex; use tokio_util::sync::CancellationToken; use zenoh_core::zasynclock; @@ -30,10 +27,18 @@ use zenoh_link_commons::{ get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, }; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, zerror, ZResult}; +use crate::{ + config::*, + utils::{get_quic_addr, TlsClientConfig, TlsServerConfig}, + ALPN_QUIC_HTTP, QUIC_ACCEPT_THROTTLE_TIME, QUIC_DEFAULT_MTU, QUIC_LOCATOR_PREFIX, +}; + pub struct LinkUnicastQuic { connection: quinn::Connection, src_addr: SocketAddr, diff --git a/io/zenoh-links/zenoh-link-quic/src/utils.rs b/io/zenoh-links/zenoh-link-quic/src/utils.rs index e7537bd658..1eb8f94380 100644 --- a/io/zenoh-links/zenoh-link-quic/src/utils.rs +++ b/io/zenoh-links/zenoh-link-quic/src/utils.rs @@ -11,30 +11,32 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::config::*; -use crate::verify::WebPkiVerifierAnyServerName; -use rustls::OwnedTrustAnchor; -use rustls::{ - server::AllowAnyAuthenticatedClient, version::TLS13, Certificate, ClientConfig, PrivateKey, - RootCertStore, ServerConfig, -}; -use rustls_pki_types::{CertificateDer, TrustAnchor}; -use secrecy::ExposeSecret; -use zenoh_link_commons::ConfigurationInspector; // use rustls_pki_types::{CertificateDer, PrivateKeyDer, TrustAnchor}; -use std::fs::File; -use std::io; -use std::net::SocketAddr; use std::{ + fs::File, + io, io::{BufReader, Cursor}, + net::SocketAddr, sync::Arc, }; + +use rustls::{ + server::AllowAnyAuthenticatedClient, version::TLS13, Certificate, ClientConfig, + OwnedTrustAnchor, PrivateKey, RootCertStore, ServerConfig, +}; +use rustls_pki_types::{CertificateDer, TrustAnchor}; +use secrecy::ExposeSecret; use webpki::anchor_from_trusted_cert; use zenoh_config::Config as ZenohConfig; -use zenoh_protocol::core::endpoint::{Address, Config}; -use zenoh_protocol::core::Parameters; +use zenoh_link_commons::ConfigurationInspector; +use zenoh_protocol::core::{ + endpoint::{Address, Config}, + Parameters, +}; use zenoh_result::{bail, zerror, ZError, ZResult}; +use crate::{config::*, verify::WebPkiVerifierAnyServerName}; + #[derive(Default, Clone, Copy, Debug)] pub struct TlsConfigurator; @@ -498,8 +500,7 @@ pub async fn get_quic_addr(address: &Address<'_>) -> ZResult { } pub fn base64_decode(data: &str) -> ZResult> { - use base64::engine::general_purpose; - use base64::Engine; + use base64::{engine::general_purpose, Engine}; Ok(general_purpose::STANDARD .decode(data) .map_err(|e| zerror!("Unable to perform base64 decoding: {e:?}"))?) diff --git a/io/zenoh-links/zenoh-link-quic/src/verify.rs b/io/zenoh-links/zenoh-link-quic/src/verify.rs index baa7864246..544d7c8a65 100644 --- a/io/zenoh-links/zenoh-link-quic/src/verify.rs +++ b/io/zenoh-links/zenoh-link-quic/src/verify.rs @@ -1,6 +1,6 @@ -use rustls::client::verify_server_cert_signed_by_trust_anchor; -use rustls::server::ParsedCertificate; use std::time::SystemTime; + +use rustls::{client::verify_server_cert_signed_by_trust_anchor, server::ParsedCertificate}; use tokio_rustls::rustls::{ client::{ServerCertVerified, ServerCertVerifier}, Certificate, RootCertStore, ServerName, diff --git a/io/zenoh-links/zenoh-link-serial/src/lib.rs b/io/zenoh-links/zenoh-link-serial/src/lib.rs index f7b0b7afeb..3d2ddcd0e6 100644 --- a/io/zenoh-links/zenoh-link-serial/src/lib.rs +++ b/io/zenoh-links/zenoh-link-serial/src/lib.rs @@ -19,13 +19,16 @@ //! [Click here for Zenoh's documentation](../zenoh/index.html) mod unicast; -use async_trait::async_trait; use std::str::FromStr; + +use async_trait::async_trait; pub use unicast::*; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::{endpoint::Address, EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{endpoint::Address, EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::ZResult; // Maximum MTU (Serial PDU) in bytes. diff --git a/io/zenoh-links/zenoh-link-serial/src/unicast.rs b/io/zenoh-links/zenoh-link-serial/src/unicast.rs index 239ff6bb9d..ca4efacdc6 100644 --- a/io/zenoh-links/zenoh-link-serial/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-serial/src/unicast.rs @@ -12,35 +12,40 @@ // ZettaScale Zenoh Team, // +use std::{ + cell::UnsafeCell, + collections::HashMap, + fmt, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; + use async_trait::async_trait; -use std::cell::UnsafeCell; -use std::collections::HashMap; -use std::fmt; -use std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, +use tokio::{ + sync::{Mutex as AsyncMutex, RwLock as AsyncRwLock}, + task::JoinHandle, }; -use std::time::Duration; -use tokio::sync::{Mutex as AsyncMutex, RwLock as AsyncRwLock}; -use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; +use z_serial::ZSerial; use zenoh_core::{zasynclock, zasyncread, zasyncwrite}; use zenoh_link_commons::{ ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{zerror, ZResult}; -use z_serial::ZSerial; - -use crate::get_exclusive; - use super::{ get_baud_rate, get_unix_path_as_string, SERIAL_ACCEPT_THROTTLE_TIME, SERIAL_DEFAULT_MTU, SERIAL_LOCATOR_PREFIX, }; +use crate::get_exclusive; struct LinkUnicastSerial { // The underlying serial port as returned by ZSerial (tokio-serial) diff --git a/io/zenoh-links/zenoh-link-tcp/src/lib.rs b/io/zenoh-links/zenoh-link-tcp/src/lib.rs index 0b075d9bf8..ebc2bba70b 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/lib.rs @@ -17,12 +17,15 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use async_trait::async_trait; use std::net::SocketAddr; + +use async_trait::async_trait; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::{endpoint::Address, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{endpoint::Address, Locator}, + transport::BatchSize, +}; use zenoh_result::{zerror, ZResult}; mod unicast; diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index 1a8a2302d1..79812c526e 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -11,28 +11,28 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{cell::UnsafeCell, convert::TryInto, fmt, net::SocketAddr, sync::Arc, time::Duration}; + use async_trait::async_trait; -use std::cell::UnsafeCell; -use std::convert::TryInto; -use std::fmt; -use std::net::SocketAddr; -use std::sync::Arc; -use std::time::Duration; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::{TcpListener, TcpSocket, TcpStream}, +}; use tokio_util::sync::CancellationToken; use zenoh_link_commons::{ get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; use super::{ get_tcp_addrs, TCP_ACCEPT_THROTTLE_TIME, TCP_DEFAULT_MTU, TCP_LINGER_TIMEOUT, TCP_LOCATOR_PREFIX, }; -use tokio::net::{TcpListener, TcpSocket, TcpStream}; pub struct LinkUnicastTcp { // The underlying socket as returned from the tokio library diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 5cf686cdc5..1ced1a26b1 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -11,21 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - utils::{get_tls_addr, get_tls_host, get_tls_server_name, TlsClientConfig, TlsServerConfig}, - TLS_ACCEPT_THROTTLE_TIME, TLS_DEFAULT_MTU, TLS_LINGER_TIMEOUT, TLS_LOCATOR_PREFIX, -}; +use std::{cell::UnsafeCell, convert::TryInto, fmt, net::SocketAddr, sync::Arc, time::Duration}; use async_trait::async_trait; -use std::cell::UnsafeCell; -use std::convert::TryInto; -use std::fmt; -use std::net::SocketAddr; -use std::sync::Arc; -use std::time::Duration; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::net::{TcpListener, TcpStream}; -use tokio::sync::Mutex as AsyncMutex; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::{TcpListener, TcpStream}, + sync::Mutex as AsyncMutex, +}; use tokio_rustls::{TlsAcceptor, TlsConnector, TlsStream}; use tokio_util::sync::CancellationToken; use zenoh_core::zasynclock; @@ -33,10 +26,17 @@ use zenoh_link_commons::{ get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, }; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{zerror, ZResult}; +use crate::{ + utils::{get_tls_addr, get_tls_host, get_tls_server_name, TlsClientConfig, TlsServerConfig}, + TLS_ACCEPT_THROTTLE_TIME, TLS_DEFAULT_MTU, TLS_LINGER_TIMEOUT, TLS_LOCATOR_PREFIX, +}; + pub struct LinkUnicastTls { // The underlying socket as returned from the async-rustls library // NOTE: TlsStream requires &mut for read and write operations. This means diff --git a/io/zenoh-links/zenoh-link-tls/src/utils.rs b/io/zenoh-links/zenoh-link-tls/src/utils.rs index d51a17c694..b646c6e80d 100644 --- a/io/zenoh-links/zenoh-link-tls/src/utils.rs +++ b/io/zenoh-links/zenoh-link-tls/src/utils.rs @@ -11,7 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::config::*; +use std::{ + convert::TryFrom, + fs::File, + io, + io::{BufReader, Cursor}, + net::SocketAddr, + sync::Arc, +}; + use rustls::{ pki_types::{CertificateDer, PrivateKeyDer, TrustAnchor}, server::WebPkiClientVerifier, @@ -20,20 +28,17 @@ use rustls::{ }; use rustls_pki_types::ServerName; use secrecy::ExposeSecret; -use std::fs::File; -use std::io; -use std::{convert::TryFrom, net::SocketAddr}; -use std::{ - io::{BufReader, Cursor}, - sync::Arc, -}; use webpki::anchor_from_trusted_cert; use zenoh_config::Config as ZenohConfig; use zenoh_link_commons::{tls::WebPkiVerifierAnyServerName, ConfigurationInspector}; -use zenoh_protocol::core::endpoint::{Address, Config}; -use zenoh_protocol::core::Parameters; +use zenoh_protocol::core::{ + endpoint::{Address, Config}, + Parameters, +}; use zenoh_result::{bail, zerror, ZError, ZResult}; +use crate::config::*; + #[derive(Default, Clone, Copy, Debug)] pub struct TlsConfigurator; @@ -450,8 +455,7 @@ fn load_trust_anchors(config: &Config<'_>) -> ZResult> { } pub fn base64_decode(data: &str) -> ZResult> { - use base64::engine::general_purpose; - use base64::Engine; + use base64::{engine::general_purpose, Engine}; Ok(general_purpose::STANDARD .decode(data) .map_err(|e| zerror!("Unable to perform base64 decoding: {e:?}"))?) diff --git a/io/zenoh-links/zenoh-link-udp/src/lib.rs b/io/zenoh-links/zenoh-link-udp/src/lib.rs index 86db845d8f..c89708fe5d 100644 --- a/io/zenoh-links/zenoh-link-udp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-udp/src/lib.rs @@ -20,14 +20,17 @@ mod multicast; mod unicast; +use std::net::SocketAddr; + use async_trait::async_trait; pub use multicast::*; -use std::net::SocketAddr; pub use unicast::*; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::{endpoint::Address, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{endpoint::Address, Locator}, + transport::BatchSize, +}; use zenoh_result::{zerror, ZResult}; // NOTE: In case of using UDP in high-throughput scenarios, it is recommended to set the diff --git a/io/zenoh-links/zenoh-link-udp/src/multicast.rs b/io/zenoh-links/zenoh-link-udp/src/multicast.rs index f4a23ced93..280f5eb203 100644 --- a/io/zenoh-links/zenoh-link-udp/src/multicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/multicast.rs @@ -11,19 +11,26 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{config::*, UDP_DEFAULT_MTU}; -use crate::{get_udp_addrs, socket_addr_to_udp_locator}; +use std::{ + borrow::Cow, + fmt, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, +}; + use async_trait::async_trait; use socket2::{Domain, Protocol, Socket, Type}; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::sync::Arc; -use std::{borrow::Cow, fmt}; use tokio::net::UdpSocket; use zenoh_link_commons::{LinkManagerMulticastTrait, LinkMulticast, LinkMulticastTrait}; -use zenoh_protocol::core::{Config, EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{Config, EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; +use super::{config::*, UDP_DEFAULT_MTU}; +use crate::{get_udp_addrs, socket_addr_to_udp_locator}; + pub struct LinkMulticastUdp { // The unicast socket address of this link unicast_addr: SocketAddr, diff --git a/io/zenoh-links/zenoh-link-udp/src/unicast.rs b/io/zenoh-links/zenoh-link-udp/src/unicast.rs index 9526ca74dd..79f980ca96 100644 --- a/io/zenoh-links/zenoh-link-udp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/unicast.rs @@ -11,29 +11,34 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{ - get_udp_addrs, socket_addr_to_udp_locator, UDP_ACCEPT_THROTTLE_TIME, UDP_DEFAULT_MTU, - UDP_MAX_MTU, +use std::{ + collections::HashMap, + fmt, + net::{Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::{Arc, Mutex, Weak}, + time::Duration, }; + use async_trait::async_trait; -use std::collections::HashMap; -use std::fmt; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::sync::{Arc, Mutex, Weak}; -use std::time::Duration; -use tokio::net::UdpSocket; -use tokio::sync::Mutex as AsyncMutex; +use tokio::{net::UdpSocket, sync::Mutex as AsyncMutex}; use tokio_util::sync::CancellationToken; use zenoh_core::{zasynclock, zlock}; use zenoh_link_commons::{ get_ip_interface_names, ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; use zenoh_sync::Mvar; +use super::{ + get_udp_addrs, socket_addr_to_udp_locator, UDP_ACCEPT_THROTTLE_TIME, UDP_DEFAULT_MTU, + UDP_MAX_MTU, +}; + type LinkHashMap = Arc>>>; type LinkInput = (Vec, usize); type LinkLeftOver = (Vec, usize, usize); diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index ea90630523..1b30ceb553 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -11,41 +11,43 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::config; +use std::{ + cell::UnsafeCell, + collections::HashMap, + fmt, + fs::{File, OpenOptions}, + io::{ErrorKind, Read, Write}, + os::unix::fs::OpenOptionsExt, + sync::Arc, +}; + #[cfg(not(target_os = "macos"))] use advisory_lock::{AdvisoryFileLock, FileLockMode}; use async_trait::async_trait; use filepath::FilePath; -use nix::libc; -use nix::unistd::unlink; +use nix::{libc, unistd::unlink}; use rand::Rng; -use std::cell::UnsafeCell; -use std::collections::HashMap; -use std::fmt; -use std::fs::{File, OpenOptions}; -use std::io::ErrorKind; -use std::io::{Read, Write}; -use std::os::unix::fs::OpenOptionsExt; -use std::sync::Arc; -use tokio::fs::remove_file; -use tokio::io::unix::AsyncFd; -use tokio::io::Interest; -use tokio::task::JoinHandle; +use tokio::{ + fs::remove_file, + io::{unix::AsyncFd, Interest}, + task::JoinHandle, +}; use tokio_util::sync::CancellationToken; -use zenoh_core::{zasyncread, zasyncwrite, ResolveFuture, Wait}; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; -use zenoh_runtime::ZRuntime; - use unix_named_pipe::{create, open_write}; - +use zenoh_core::{zasyncread, zasyncwrite, ResolveFuture, Wait}; use zenoh_link_commons::{ ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, ZResult}; +use zenoh_runtime::ZRuntime; use super::FILE_ACCESS_MASK; +use crate::config; const LINUX_PIPE_MAX_MTU: BatchSize = BatchSize::MAX; const LINUX_PIPE_DEDICATE_TRIES: usize = 100; diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs index ce067c1aa2..771782e62a 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs @@ -20,8 +20,10 @@ use async_trait::async_trait; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::{endpoint::Address, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{endpoint::Address, Locator}, + transport::BatchSize, +}; use zenoh_result::ZResult; #[cfg(target_family = "unix")] mod unicast; diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index 4ad1b68d88..cc7147c9e0 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -11,31 +11,34 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::UNIXSOCKSTREAM_ACCEPT_THROTTLE_TIME; +use std::{ + cell::UnsafeCell, collections::HashMap, fmt, fs::remove_file, os::unix::io::RawFd, + path::PathBuf, sync::Arc, time::Duration, +}; + use async_trait::async_trait; -use std::cell::UnsafeCell; -use std::collections::HashMap; -use std::fmt; -use std::fs::remove_file; -use std::os::unix::io::RawFd; -use std::path::PathBuf; -use std::sync::Arc; -use std::time::Duration; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::net::{UnixListener, UnixStream}; -use tokio::sync::RwLock as AsyncRwLock; -use tokio::task::JoinHandle; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::{UnixListener, UnixStream}, + sync::RwLock as AsyncRwLock, + task::JoinHandle, +}; use tokio_util::sync::CancellationToken; use uuid::Uuid; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{zerror, ZResult}; -use super::{get_unix_path_as_string, UNIXSOCKSTREAM_DEFAULT_MTU, UNIXSOCKSTREAM_LOCATOR_PREFIX}; +use super::{ + get_unix_path_as_string, UNIXSOCKSTREAM_ACCEPT_THROTTLE_TIME, UNIXSOCKSTREAM_DEFAULT_MTU, + UNIXSOCKSTREAM_LOCATOR_PREFIX, +}; pub struct LinkUnicastUnixSocketStream { // The underlying socket as returned from the tokio library diff --git a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs index 6616790a28..605f114173 100644 --- a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs @@ -12,17 +12,20 @@ // ZettaScale Zenoh Team, // +use std::{cell::UnsafeCell, collections::HashMap, fmt, sync::Arc, time::Duration}; + use async_trait::async_trait; use libc::VMADDR_PORT_ANY; -use std::cell::UnsafeCell; -use std::collections::HashMap; -use std::fmt; -use std::sync::Arc; -use std::time::Duration; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::sync::RwLock as AsyncRwLock; -use tokio::task::JoinHandle; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + sync::RwLock as AsyncRwLock, + task::JoinHandle, +}; use tokio_util::sync::CancellationToken; +use tokio_vsock::{ + VsockAddr, VsockListener, VsockStream, VMADDR_CID_ANY, VMADDR_CID_HOST, VMADDR_CID_HYPERVISOR, + VMADDR_CID_LOCAL, +}; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, @@ -34,10 +37,6 @@ use zenoh_protocol::{ use zenoh_result::{bail, zerror, ZResult}; use super::{VSOCK_ACCEPT_THROTTLE_TIME, VSOCK_DEFAULT_MTU, VSOCK_LOCATOR_PREFIX}; -use tokio_vsock::{ - VsockAddr, VsockListener, VsockStream, VMADDR_CID_ANY, VMADDR_CID_HOST, VMADDR_CID_HYPERVISOR, - VMADDR_CID_LOCAL, -}; pub const VSOCK_VMADDR_CID_ANY: &str = "VMADDR_CID_ANY"; pub const VSOCK_VMADDR_CID_HYPERVISOR: &str = "VMADDR_CID_HYPERVISOR"; diff --git a/io/zenoh-links/zenoh-link-ws/src/lib.rs b/io/zenoh-links/zenoh-link-ws/src/lib.rs index d165b480a9..6a97ed99b6 100644 --- a/io/zenoh-links/zenoh-link-ws/src/lib.rs +++ b/io/zenoh-links/zenoh-link-ws/src/lib.rs @@ -17,13 +17,16 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use async_trait::async_trait; use std::net::SocketAddr; + +use async_trait::async_trait; use url::Url; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::{endpoint::Address, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{endpoint::Address, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, ZResult}; mod unicast; pub use unicast::*; diff --git a/io/zenoh-links/zenoh-link-ws/src/unicast.rs b/io/zenoh-links/zenoh-link-ws/src/unicast.rs index 635f3b8808..b671bf67f2 100644 --- a/io/zenoh-links/zenoh-link-ws/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-ws/src/unicast.rs @@ -12,29 +12,34 @@ // ZettaScale Zenoh Team, // +use std::{ + collections::HashMap, + fmt, + net::{Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + time::Duration, +}; + use async_trait::async_trait; -use futures_util::stream::SplitSink; -use futures_util::stream::SplitStream; -use futures_util::SinkExt; -use futures_util::StreamExt; -use std::collections::HashMap; -use std::fmt; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::sync::Arc; -use std::time::Duration; -use tokio::net::{TcpListener, TcpStream}; -use tokio::sync::{Mutex as AsyncMutex, RwLock as AsyncRwLock}; -use tokio::task::JoinHandle; -use tokio_tungstenite::accept_async; -use tokio_tungstenite::tungstenite::Message; -use tokio_tungstenite::{MaybeTlsStream, WebSocketStream}; +use futures_util::{ + stream::{SplitSink, SplitStream}, + SinkExt, StreamExt, +}; +use tokio::{ + net::{TcpListener, TcpStream}, + sync::{Mutex as AsyncMutex, RwLock as AsyncRwLock}, + task::JoinHandle, +}; +use tokio_tungstenite::{accept_async, tungstenite::Message, MaybeTlsStream, WebSocketStream}; use tokio_util::sync::CancellationToken; use zenoh_core::{zasynclock, zasyncread, zasyncwrite}; use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, zerror, ZResult}; use super::{get_ws_addr, get_ws_url, TCP_ACCEPT_THROTTLE_TIME, WS_DEFAULT_MTU, WS_LOCATOR_PREFIX}; diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index 8048d9ff49..b91acdc7ff 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::num::NonZeroUsize; + use zenoh_buffers::{ buffer::Buffer, reader::{DidntRead, HasReader}, @@ -497,7 +498,6 @@ impl Decode<(TransportMessage, BatchSize)> for &mut RBatch { mod tests { use std::vec; - use super::*; use rand::Rng; use zenoh_buffers::ZBuf; use zenoh_core::zcondfeat; @@ -511,6 +511,8 @@ mod tests { zenoh::{PushBody, Put}, }; + use super::*; + #[test] fn rw_batch() { let mut rng = rand::thread_rng(); diff --git a/io/zenoh-transport/src/common/defragmentation.rs b/io/zenoh-transport/src/common/defragmentation.rs index 8fab075fe4..476fad632c 100644 --- a/io/zenoh-transport/src/common/defragmentation.rs +++ b/io/zenoh-transport/src/common/defragmentation.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::seq_num::SeqNum; use zenoh_buffers::{buffer::Buffer, reader::HasReader, ZBuf, ZSlice}; use zenoh_codec::{RCodec, Zenoh080Reliability}; use zenoh_protocol::{ @@ -21,6 +20,8 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, ZResult}; +use super::seq_num::SeqNum; + #[derive(Debug)] pub(crate) struct DefragBuffer { reliability: Reliability, diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index fe4e567617..349f9ed560 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -1,30 +1,13 @@ -use crate::common::batch::BatchConfig; - -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use super::{ - batch::{Encode, WBatch}, - priority::{TransportChannelTx, TransportPriorityTx}, +use std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, MutexGuard, + }, + time::{Duration, Instant}, }; + use flume::{bounded, Receiver, Sender}; use ringbuffer_spsc::{RingBuffer, RingBufferReader, RingBufferWriter}; -use std::sync::{Arc, Mutex, MutexGuard}; -use std::time::Duration; -use std::{ - sync::atomic::{AtomicBool, Ordering}, - time::Instant, -}; use zenoh_buffers::{ reader::{HasReader, Reader}, writer::HasWriter, @@ -33,10 +16,9 @@ use zenoh_buffers::{ use zenoh_codec::{transport::batch::BatchError, WCodec, Zenoh080}; use zenoh_config::QueueSizeConf; use zenoh_core::zlock; -use zenoh_protocol::core::Reliability; -use zenoh_protocol::network::NetworkMessage; use zenoh_protocol::{ - core::Priority, + core::{Priority, Reliability}, + network::NetworkMessage, transport::{ fragment::FragmentHeader, frame::{self, FrameHeader}, @@ -44,6 +26,25 @@ use zenoh_protocol::{ }, }; +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use super::{ + batch::{Encode, WBatch}, + priority::{TransportChannelTx, TransportPriorityTx}, +}; +use crate::common::batch::BatchConfig; + // It's faster to work directly with nanoseconds. // Backoff will never last more the u32::MAX nanoseconds. type NanoSeconds = u32; @@ -719,7 +720,6 @@ impl TransmissionPipelineConsumer { #[cfg(test)] mod tests { - use super::*; use std::{ convert::TryFrom, sync::{ @@ -728,8 +728,8 @@ mod tests { }, time::{Duration, Instant}, }; - use tokio::task; - use tokio::time::timeout; + + use tokio::{task, time::timeout}; use zenoh_buffers::{ reader::{DidntRead, HasReader}, ZBuf, @@ -743,6 +743,8 @@ mod tests { }; use zenoh_result::ZResult; + use super::*; + const SLEEP: Duration = Duration::from_millis(100); const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/io/zenoh-transport/src/common/priority.rs b/io/zenoh-transport/src/common/priority.rs index 8644cdacb7..fb5c520e3d 100644 --- a/io/zenoh-transport/src/common/priority.rs +++ b/io/zenoh-transport/src/common/priority.rs @@ -11,9 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::defragmentation::DefragBuffer; -use super::seq_num::{SeqNum, SeqNumGenerator}; use std::sync::{Arc, Mutex}; + use zenoh_core::zlock; use zenoh_protocol::{ core::{Bits, Reliability}, @@ -21,6 +20,11 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; +use super::{ + defragmentation::DefragBuffer, + seq_num::{SeqNum, SeqNumGenerator}, +}; + #[derive(Debug)] pub(crate) struct TransportChannelTx { pub(crate) sn: SeqNumGenerator, diff --git a/io/zenoh-transport/src/common/stats.rs b/io/zenoh-transport/src/common/stats.rs index aaf39641c0..da6e57d518 100644 --- a/io/zenoh-transport/src/common/stats.rs +++ b/io/zenoh-transport/src/common/stats.rs @@ -167,8 +167,9 @@ macro_rules! stats_struct { } } -use serde::{Deserialize, Serialize}; use std::sync::atomic::{AtomicUsize, Ordering}; + +use serde::{Deserialize, Serialize}; stats_struct! { #[derive(Clone, Debug, Deserialize, Serialize)] pub struct DiscriminatedStats { diff --git a/io/zenoh-transport/src/lib.rs b/io/zenoh-transport/src/lib.rs index 5e00bed2e7..f4c135c9d6 100644 --- a/io/zenoh-transport/src/lib.rs +++ b/io/zenoh-transport/src/lib.rs @@ -28,16 +28,19 @@ pub use common::stats; #[cfg(feature = "shared-memory")] mod shm; -use crate::{multicast::TransportMulticast, unicast::TransportUnicast}; +use std::{any::Any, sync::Arc}; + pub use manager::*; use serde::Serialize; -use std::any::Any; -use std::sync::Arc; use zenoh_link::Link; -use zenoh_protocol::core::{WhatAmI, ZenohId}; -use zenoh_protocol::network::NetworkMessage; +use zenoh_protocol::{ + core::{WhatAmI, ZenohId}, + network::NetworkMessage, +}; use zenoh_result::ZResult; +use crate::{multicast::TransportMulticast, unicast::TransportUnicast}; + /*************************************/ /* TRANSPORT */ /*************************************/ diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index ddf1fe23c1..3f57b3ceae 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -11,18 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::unicast::manager::{ - TransportManagerBuilderUnicast, TransportManagerConfigUnicast, TransportManagerStateUnicast, -}; -use super::TransportEventHandler; -use crate::multicast::manager::{ - TransportManagerBuilderMulticast, TransportManagerConfigMulticast, - TransportManagerStateMulticast, -}; +use std::{collections::HashMap, sync::Arc, time::Duration}; + use rand::{RngCore, SeedableRng}; -use std::collections::HashMap; -use std::sync::Arc; -use std::time::Duration; use tokio::sync::Mutex as AsyncMutex; use zenoh_config::{Config, LinkRxConf, QueueConf, QueueSizeConf}; use zenoh_crypto::{BlockCipher, PseudoRng}; @@ -39,6 +30,17 @@ use zenoh_shm::api::client_storage::GLOBAL_CLIENT_STORAGE; use zenoh_shm::reader::SharedMemoryReader; use zenoh_task::TaskController; +use super::{ + unicast::manager::{ + TransportManagerBuilderUnicast, TransportManagerConfigUnicast, TransportManagerStateUnicast, + }, + TransportEventHandler, +}; +use crate::multicast::manager::{ + TransportManagerBuilderMulticast, TransportManagerConfigMulticast, + TransportManagerStateMulticast, +}; + /// # Examples /// ``` /// use std::sync::Arc; diff --git a/io/zenoh-transport/src/multicast/establishment.rs b/io/zenoh-transport/src/multicast/establishment.rs index a0b7576f03..0c24626697 100644 --- a/io/zenoh-transport/src/multicast/establishment.rs +++ b/io/zenoh-transport/src/multicast/establishment.rs @@ -11,6 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::sync::Arc; + +use rand::Rng; +use zenoh_core::zasynclock; +use zenoh_link::LinkMulticast; +use zenoh_protocol::{ + core::{Field, Priority}, + transport::PrioritySn, +}; +use zenoh_result::{bail, ZResult}; + use crate::{ common::{batch::BatchConfig, seq_num}, multicast::{ @@ -20,15 +31,6 @@ use crate::{ }, TransportManager, }; -use rand::Rng; -use std::sync::Arc; -use zenoh_core::zasynclock; -use zenoh_link::LinkMulticast; -use zenoh_protocol::{ - core::{Field, Priority}, - transport::PrioritySn, -}; -use zenoh_result::{bail, ZResult}; pub(crate) async fn open_link( manager: &TransportManager, diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 883f978684..a1c9c2bae8 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -11,25 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "stats")] -use crate::stats::TransportStats; -use crate::{ - common::{ - batch::{BatchConfig, Encode, Finalize, RBatch, WBatch}, - pipeline::{ - TransmissionPipeline, TransmissionPipelineConf, TransmissionPipelineConsumer, - TransmissionPipelineProducer, - }, - priority::TransportPriorityTx, - }, - multicast::transport::TransportMulticastInner, -}; use std::{ convert::TryInto, fmt, sync::Arc, time::{Duration, Instant}, }; + use tokio::task::JoinHandle; use zenoh_buffers::{BBuf, ZSlice, ZSliceBuffer}; use zenoh_core::{zcondfeat, zlock}; @@ -41,6 +29,20 @@ use zenoh_protocol::{ use zenoh_result::{zerror, ZResult}; use zenoh_sync::{RecyclingObject, RecyclingObjectPool, Signal}; +#[cfg(feature = "stats")] +use crate::stats::TransportStats; +use crate::{ + common::{ + batch::{BatchConfig, Encode, Finalize, RBatch, WBatch}, + pipeline::{ + TransmissionPipeline, TransmissionPipelineConf, TransmissionPipelineConsumer, + TransmissionPipelineProducer, + }, + priority::TransportPriorityTx, + }, + multicast::transport::TransportMulticastInner, +}; + /****************************/ /* TRANSPORT MULTICAST LINK */ /****************************/ diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index ebc51a2ec6..3c04cf6425 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -11,11 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::multicast::{transport::TransportMulticastInner, TransportMulticast}; -use crate::TransportManager; -use std::collections::HashMap; -use std::sync::Arc; -use std::time::Duration; +use std::{collections::HashMap, sync::Arc, time::Duration}; + use tokio::sync::Mutex; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionMulticastConf; @@ -30,6 +27,11 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, zerror, ZResult}; +use crate::{ + multicast::{transport::TransportMulticastInner, TransportMulticast}, + TransportManager, +}; + pub struct TransportManagerConfigMulticast { pub lease: Duration, pub keep_alive: usize, diff --git a/io/zenoh-transport/src/multicast/mod.rs b/io/zenoh-transport/src/multicast/mod.rs index e205125b39..78d76bb6c8 100644 --- a/io/zenoh-transport/src/multicast/mod.rs +++ b/io/zenoh-transport/src/multicast/mod.rs @@ -17,18 +17,15 @@ pub(crate) mod rx; pub(crate) mod transport; pub(crate) mod tx; -use super::common; -use crate::{ - multicast::link::TransportLinkMulticast, TransportMulticastEventHandler, TransportPeer, +use std::{ + fmt::{self, Write}, + sync::{Arc, Weak}, }; + pub use manager::{ TransportManagerBuilderMulticast, TransportManagerConfigMulticast, TransportManagerParamsMulticast, }; -use std::{ - fmt::{self, Write}, - sync::{Arc, Weak}, -}; use transport::TransportMulticastInner; use zenoh_core::{zcondfeat, zread}; use zenoh_link::Link; @@ -39,6 +36,11 @@ use zenoh_protocol::{ }; use zenoh_result::{zerror, ZResult}; +use super::common; +use crate::{ + multicast::link::TransportLinkMulticast, TransportMulticastEventHandler, TransportPeer, +}; + /*************************************/ /* TRANSPORT MULTICAST */ /*************************************/ diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index 1576d65cd6..ee8e024bb6 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -11,12 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::{TransportMulticastInner, TransportMulticastPeer}; -use crate::common::{ - batch::{Decode, RBatch}, - priority::TransportChannelRx, -}; use std::sync::MutexGuard; + use zenoh_core::{zlock, zread}; use zenoh_protocol::{ core::{Locator, Priority, Reliability}, @@ -28,6 +24,12 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, zerror, ZResult}; +use super::transport::{TransportMulticastInner, TransportMulticastPeer}; +use crate::common::{ + batch::{Decode, RBatch}, + priority::TransportChannelRx, +}; + /*************************************/ /* TRANSPORT RX */ /*************************************/ diff --git a/io/zenoh-transport/src/multicast/transport.rs b/io/zenoh-transport/src/multicast/transport.rs index babf68ce61..a60ed180ee 100644 --- a/io/zenoh-transport/src/multicast/transport.rs +++ b/io/zenoh-transport/src/multicast/transport.rs @@ -11,18 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::common::priority::{TransportPriorityRx, TransportPriorityTx}; -use super::link::{TransportLinkMulticastConfigUniversal, TransportLinkMulticastUniversal}; -#[cfg(feature = "shared-memory")] -use crate::shm::MulticastTransportShmConfig; -#[cfg(feature = "stats")] -use crate::stats::TransportStats; -use crate::{ - multicast::{ - link::TransportLinkMulticast, TransportConfigMulticast, TransportMulticastEventHandler, - }, - TransportManager, TransportPeer, TransportPeerEventHandler, -}; use std::{ collections::HashMap, sync::{ @@ -31,17 +19,31 @@ use std::{ }, time::Duration, }; + use tokio_util::sync::CancellationToken; use zenoh_core::{zcondfeat, zread, zwrite}; use zenoh_link::{Link, Locator}; -use zenoh_protocol::core::Resolution; -use zenoh_protocol::transport::{batch_size, Close, TransportMessage}; use zenoh_protocol::{ - core::{Bits, Field, Priority, WhatAmI, ZenohId}, - transport::{close, Join}, + core::{Bits, Field, Priority, Resolution, WhatAmI, ZenohId}, + transport::{batch_size, close, Close, Join, TransportMessage}, }; use zenoh_result::{bail, ZResult}; use zenoh_task::TaskController; + +use super::{ + common::priority::{TransportPriorityRx, TransportPriorityTx}, + link::{TransportLinkMulticastConfigUniversal, TransportLinkMulticastUniversal}, +}; +#[cfg(feature = "shared-memory")] +use crate::shm::MulticastTransportShmConfig; +#[cfg(feature = "stats")] +use crate::stats::TransportStats; +use crate::{ + multicast::{ + link::TransportLinkMulticast, TransportConfigMulticast, TransportMulticastEventHandler, + }, + TransportManager, TransportPeer, TransportPeerEventHandler, +}; // use zenoh_util::{Timed, TimedEvent, TimedHandle, Timer}; /*************************************/ diff --git a/io/zenoh-transport/src/multicast/tx.rs b/io/zenoh-transport/src/multicast/tx.rs index ee7715d38b..775131703a 100644 --- a/io/zenoh-transport/src/multicast/tx.rs +++ b/io/zenoh-transport/src/multicast/tx.rs @@ -11,10 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportMulticastInner; use zenoh_core::zread; use zenoh_protocol::network::NetworkMessage; +use super::transport::TransportMulticastInner; #[cfg(feature = "shared-memory")] use crate::shm::map_zmsg_to_partner; diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 6dd65aab16..7a50a68742 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::collections::HashSet; + use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZBuf, ZSlice, ZSliceKind}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_core::zerror; diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index 48638834e0..d074ea9642 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -11,26 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "shared-memory")] -use super::ext::shm::AuthSegment; -#[cfg(feature = "shared-memory")] -use crate::shm::TransportShmConfig; +use std::time::Duration; -use crate::{ - common::batch::BatchConfig, - unicast::{ - establishment::{compute_sn, ext, AcceptFsm, Cookie, Zenoh080Cookie}, - link::{ - LinkUnicastWithOpenAck, TransportLinkUnicast, TransportLinkUnicastConfig, - TransportLinkUnicastDirection, - }, - TransportConfigUnicast, - }, - TransportManager, -}; use async_trait::async_trait; use rand::Rng; -use std::time::Duration; use tokio::sync::Mutex; use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZSlice}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; @@ -47,6 +31,23 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; +#[cfg(feature = "shared-memory")] +use super::ext::shm::AuthSegment; +#[cfg(feature = "shared-memory")] +use crate::shm::TransportShmConfig; +use crate::{ + common::batch::BatchConfig, + unicast::{ + establishment::{compute_sn, ext, AcceptFsm, Cookie, Zenoh080Cookie}, + link::{ + LinkUnicastWithOpenAck, TransportLinkUnicast, TransportLinkUnicastConfig, + TransportLinkUnicastDirection, + }, + TransportConfigUnicast, + }, + TransportManager, +}; + pub(super) type AcceptError = (zenoh_result::Error, Option); struct StateTransport { diff --git a/io/zenoh-transport/src/unicast/establishment/cookie.rs b/io/zenoh-transport/src/unicast/establishment/cookie.rs index 6f0295601c..fccce5e672 100644 --- a/io/zenoh-transport/src/unicast/establishment/cookie.rs +++ b/io/zenoh-transport/src/unicast/establishment/cookie.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::ext; use std::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, writer::{DidntWrite, HasWriter, Writer}, @@ -24,6 +24,8 @@ use zenoh_protocol::{ transport::BatchSize, }; +use crate::unicast::establishment::ext; + #[derive(Debug, PartialEq)] pub(crate) struct Cookie { pub(crate) zid: ZenohId, @@ -193,10 +195,11 @@ impl Cookie { mod tests { #[test] fn codec_cookie() { - use super::*; use rand::{Rng, SeedableRng}; use zenoh_buffers::ZBuf; + use super::*; + const NUM_ITER: usize = 1_000; macro_rules! run_single { diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs index beab85d18a..8d57434bc3 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs @@ -16,21 +16,19 @@ pub(crate) mod pubkey; #[cfg(feature = "auth_usrpwd")] pub(crate) mod usrpwd; -use crate::unicast::establishment::{AcceptFsm, OpenFsm}; +use std::{convert::TryInto, marker::PhantomData}; + use async_trait::async_trait; #[cfg(feature = "auth_pubkey")] pub use pubkey::*; use rand::{CryptoRng, Rng}; -use std::convert::TryInto; -use std::marker::PhantomData; use tokio::sync::{Mutex, RwLock}; #[cfg(feature = "auth_usrpwd")] pub use usrpwd::*; -use zenoh_buffers::reader::SiphonableReader; -use zenoh_buffers::ZBuf; use zenoh_buffers::{ - reader::{DidntRead, HasReader, Reader}, + reader::{DidntRead, HasReader, Reader, SiphonableReader}, writer::{DidntWrite, HasWriter, Writer}, + ZBuf, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_config::Config; @@ -41,6 +39,8 @@ use zenoh_protocol::{ transport::{init, open}, }; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; + pub(crate) mod id { #[cfg(feature = "auth_pubkey")] pub(crate) const PUBKEY: u8 = 0x1; diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs index 9a7c3d8f32..69b4707bf0 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs @@ -11,7 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{ext::auth::id, AcceptFsm, OpenFsm}; +use std::{collections::HashSet, fmt, ops::Deref, path::Path}; + use async_trait::async_trait; use rand::Rng; use rsa::{ @@ -19,7 +20,6 @@ use rsa::{ traits::PublicKeyParts, BigUint, Pkcs1v15Encrypt, RsaPrivateKey, RsaPublicKey, }; -use std::{collections::HashSet, fmt, ops::Deref, path::Path}; use tokio::sync::{Mutex, RwLock}; use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, @@ -31,10 +31,13 @@ use zenoh_core::{bail, zasynclock, zasyncread, zerror, Error as ZError, Result a use zenoh_crypto::PseudoRng; use zenoh_protocol::common::{ZExtUnit, ZExtZBuf}; +use crate::unicast::establishment::{ext::auth::id, AcceptFsm, OpenFsm}; + mod ext { - use super::{id::PUBKEY, ZExtUnit, ZExtZBuf}; use zenoh_protocol::{zextunit, zextzbuf}; + use super::{id::PUBKEY, ZExtUnit, ZExtZBuf}; + pub(super) type InitSyn = zextzbuf!(PUBKEY, false); pub(super) type InitAck = zextzbuf!(PUBKEY, false); pub(super) type OpenSyn = zextzbuf!(PUBKEY, false); diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs index 23560e307e..be24337fad 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs @@ -11,10 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{ext::auth::id, AcceptFsm, OpenFsm}; +use std::{collections::HashMap, fmt}; + use async_trait::async_trait; use rand::{CryptoRng, Rng}; -use std::{collections::HashMap, fmt}; use tokio::sync::RwLock; use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, @@ -26,10 +26,13 @@ use zenoh_core::{bail, zasyncread, zerror, Error as ZError, Result as ZResult}; use zenoh_crypto::hmac; use zenoh_protocol::common::{ZExtUnit, ZExtZ64, ZExtZBuf}; +use crate::unicast::establishment::{ext::auth::id, AcceptFsm, OpenFsm}; + mod ext { - use super::{id::USRPWD, ZExtUnit, ZExtZ64, ZExtZBuf}; use zenoh_protocol::{zextunit, zextz64, zextzbuf}; + use super::{id::USRPWD, ZExtUnit, ZExtZ64, ZExtZBuf}; + pub(super) type InitSyn = zextunit!(USRPWD, false); pub(super) type InitAck = zextz64!(USRPWD, false); pub(super) type OpenSyn = zextzbuf!(USRPWD, false); @@ -451,10 +454,12 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn authenticator_usrpwd_config() { async fn inner() { - use super::AuthUsrPwd; use std::{fs::File, io::Write}; + use zenoh_config::UsrPwdConf; + use super::AuthUsrPwd; + /* [CONFIG] */ let f1 = "zenoh-test-auth-usrpwd.txt"; diff --git a/io/zenoh-transport/src/unicast/establishment/ext/compression.rs b/io/zenoh-transport/src/unicast/establishment/ext/compression.rs index 2b57eb85db..1d4e995af6 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/compression.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/compression.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{AcceptFsm, OpenFsm}; -use async_trait::async_trait; use core::marker::PhantomData; + +use async_trait::async_trait; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -22,6 +22,8 @@ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::transport::{init, open}; use zenoh_result::Error as ZError; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; + // Extension Fsm pub(crate) struct CompressionFsm<'a> { _a: PhantomData<&'a ()>, diff --git a/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs b/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs index 9dda9175b1..ff1efc90b9 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{AcceptFsm, OpenFsm}; -use async_trait::async_trait; use core::marker::PhantomData; + +use async_trait::async_trait; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -22,6 +22,8 @@ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::transport::{init, open}; use zenoh_result::Error as ZError; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; + // Extension Fsm pub(crate) struct LowLatencyFsm<'a> { _a: PhantomData<&'a ()>, diff --git a/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs b/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs index f8e74779cf..8980766888 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs @@ -11,10 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{ - ext::auth::pubkey::{self, AuthPubKey, AuthPubKeyFsm, ZPublicKey}, - AcceptFsm, OpenFsm, -}; use async_trait::async_trait; use rand::{CryptoRng, Rng}; use rsa::{BigUint, RsaPrivateKey, RsaPublicKey}; @@ -28,6 +24,11 @@ use zenoh_core::{zerror, Error as ZError, Result as ZResult}; use zenoh_crypto::PseudoRng; use zenoh_protocol::transport::{init, open}; +use crate::unicast::establishment::{ + ext::auth::pubkey::{self, AuthPubKey, AuthPubKeyFsm, ZPublicKey}, + AcceptFsm, OpenFsm, +}; + const KEY_SIZE: usize = 512; // Extension Fsm diff --git a/io/zenoh-transport/src/unicast/establishment/ext/qos.rs b/io/zenoh-transport/src/unicast/establishment/ext/qos.rs index 4626ec5998..f749073805 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/qos.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/qos.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{AcceptFsm, OpenFsm}; -use async_trait::async_trait; use core::marker::PhantomData; + +use async_trait::async_trait; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -22,6 +22,8 @@ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::transport::{init, open}; use zenoh_result::Error as ZError; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; + // Extension Fsm pub(crate) struct QoSFsm<'a> { _a: PhantomData<&'a ()>, diff --git a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs index 1287095a51..bc96d2e34a 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs @@ -11,10 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{AcceptFsm, OpenFsm}; +use std::ops::Deref; + use async_trait::async_trait; use rand::{Rng, SeedableRng}; -use std::ops::Deref; use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, writer::{DidntWrite, HasWriter, Writer}, @@ -26,6 +26,8 @@ use zenoh_protocol::transport::{init, open}; use zenoh_result::{zerror, Error as ZError, ZResult}; use zenoh_shm::{api::common::types::ProtocolID, posix_shm::array::ArrayInSHM}; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; + /*************************************/ /* Segment */ /*************************************/ diff --git a/io/zenoh-transport/src/unicast/establishment/mod.rs b/io/zenoh-transport/src/unicast/establishment/mod.rs index f79aa826d0..79627f4c49 100644 --- a/io/zenoh-transport/src/unicast/establishment/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/mod.rs @@ -16,7 +16,6 @@ pub(super) mod cookie; pub mod ext; pub(crate) mod open; -use crate::common::seq_num; use async_trait::async_trait; use cookie::*; use sha3::{ @@ -28,6 +27,8 @@ use zenoh_protocol::{ transport::TransportSn, }; +use crate::common::seq_num; + /*************************************/ /* TRAITS */ /*************************************/ diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index 40aa959d10..49c57d9e9a 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -11,6 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::time::Duration; + +use async_trait::async_trait; +use zenoh_buffers::ZSlice; +#[cfg(feature = "transport_auth")] +use zenoh_core::zasynclock; +use zenoh_core::{zcondfeat, zerror}; +use zenoh_link::LinkUnicast; +use zenoh_protocol::{ + core::{Field, Resolution, WhatAmI, ZenohId}, + transport::{ + batch_size, close, BatchSize, Close, InitSyn, OpenSyn, TransportBody, TransportMessage, + TransportSn, + }, +}; +use zenoh_result::ZResult; + #[cfg(feature = "shared-memory")] use super::ext::shm::AuthSegment; #[cfg(feature = "shared-memory")] @@ -27,21 +44,6 @@ use crate::{ }, TransportManager, }; -use async_trait::async_trait; -use std::time::Duration; -use zenoh_buffers::ZSlice; -#[cfg(feature = "transport_auth")] -use zenoh_core::zasynclock; -use zenoh_core::{zcondfeat, zerror}; -use zenoh_link::LinkUnicast; -use zenoh_protocol::{ - core::{Field, Resolution, WhatAmI, ZenohId}, - transport::{ - batch_size, close, BatchSize, Close, InitSyn, OpenSyn, TransportBody, TransportMessage, - TransportSn, - }, -}; -use zenoh_result::ZResult; type OpenError = (zenoh_result::Error, Option); diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs index 1c9c190aae..b76bc764ef 100644 --- a/io/zenoh-transport/src/unicast/link.rs +++ b/io/zenoh-transport/src/unicast/link.rs @@ -11,15 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::common::batch::{BatchConfig, Decode, Encode, Finalize, RBatch, WBatch}; -use std::fmt; -use std::sync::Arc; +use std::{fmt, sync::Arc}; + use zenoh_buffers::{BBuf, ZSlice, ZSliceBuffer}; use zenoh_core::zcondfeat; use zenoh_link::{Link, LinkUnicast}; use zenoh_protocol::transport::{BatchSize, Close, OpenAck, TransportMessage}; use zenoh_result::{zerror, ZResult}; +use crate::common::batch::{BatchConfig, Decode, Encode, Finalize, RBatch, WBatch}; + #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub(crate) enum TransportLinkUnicastDirection { Inbound, diff --git a/io/zenoh-transport/src/unicast/lowlatency/link.rs b/io/zenoh-transport/src/unicast/lowlatency/link.rs index 7958631a8b..6dcd2fde44 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/link.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/link.rs @@ -11,24 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportUnicastLowlatency; -#[cfg(feature = "stats")] -use crate::stats::TransportStats; -use crate::unicast::link::TransportLinkUnicast; -use crate::unicast::link::TransportLinkUnicastRx; -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; + use tokio::sync::RwLock; use tokio_util::sync::CancellationToken; use zenoh_buffers::{writer::HasWriter, ZSlice}; use zenoh_codec::*; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link::LinkUnicast; -use zenoh_protocol::transport::TransportMessageLowLatency; -use zenoh_protocol::transport::{KeepAlive, TransportBodyLowLatency}; +use zenoh_protocol::transport::{KeepAlive, TransportBodyLowLatency, TransportMessageLowLatency}; use zenoh_result::{zerror, ZResult}; use zenoh_runtime::ZRuntime; +use super::transport::TransportUnicastLowlatency; +#[cfg(feature = "stats")] +use crate::stats::TransportStats; +use crate::unicast::link::{TransportLinkUnicast, TransportLinkUnicastRx}; + pub(crate) async fn send_with_link( link: &LinkUnicast, msg: TransportMessageLowLatency, diff --git a/io/zenoh-transport/src/unicast/lowlatency/rx.rs b/io/zenoh-transport/src/unicast/lowlatency/rx.rs index de0b62354f..c82e172c7b 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/rx.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/rx.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportUnicastLowlatency; use zenoh_buffers::{ reader::{HasReader, Reader}, ZSlice, @@ -22,6 +21,8 @@ use zenoh_link::LinkUnicast; use zenoh_protocol::{network::NetworkMessage, transport::TransportMessageLowLatency}; use zenoh_result::{zerror, ZResult}; +use super::transport::TransportUnicastLowlatency; + /*************************************/ /* TRANSPORT RX */ /*************************************/ diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index 726d21bb84..9f122e9c72 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -11,6 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{ + sync::{Arc, RwLock as SyncRwLock}, + time::Duration, +}; + +use async_trait::async_trait; +use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard, RwLock}; +use tokio_util::{sync::CancellationToken, task::TaskTracker}; +use zenoh_core::{zasynclock, zasyncread, zasyncwrite, zread, zwrite}; +use zenoh_link::Link; +use zenoh_protocol::{ + core::{WhatAmI, ZenohId}, + network::NetworkMessage, + transport::{close, Close, TransportBodyLowLatency, TransportMessageLowLatency, TransportSn}, +}; +use zenoh_result::{zerror, ZResult}; + #[cfg(feature = "stats")] use crate::stats::TransportStats; use crate::{ @@ -21,23 +38,6 @@ use crate::{ }, TransportManager, TransportPeerEventHandler, }; -use async_trait::async_trait; -use std::sync::{Arc, RwLock as SyncRwLock}; -use std::time::Duration; -use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard, RwLock}; -use tokio_util::sync::CancellationToken; -use tokio_util::task::TaskTracker; -use zenoh_core::{zasynclock, zasyncread, zasyncwrite, zread, zwrite}; -use zenoh_link::Link; -use zenoh_protocol::network::NetworkMessage; -use zenoh_protocol::transport::TransportBodyLowLatency; -use zenoh_protocol::transport::TransportMessageLowLatency; -use zenoh_protocol::transport::{Close, TransportSn}; -use zenoh_protocol::{ - core::{WhatAmI, ZenohId}, - transport::close, -}; -use zenoh_result::{zerror, ZResult}; /*************************************/ /* LOW-LATENCY TRANSPORT */ diff --git a/io/zenoh-transport/src/unicast/lowlatency/tx.rs b/io/zenoh-transport/src/unicast/lowlatency/tx.rs index d573544340..90304a196d 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/tx.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/tx.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportUnicastLowlatency; use zenoh_protocol::{ network::NetworkMessage, transport::{TransportBodyLowLatency, TransportMessageLowLatency}, @@ -20,6 +19,7 @@ use zenoh_protocol::{ use zenoh_result::bail; use zenoh_result::ZResult; +use super::transport::TransportUnicastLowlatency; #[cfg(feature = "shared-memory")] use crate::shm::map_zmsg_to_partner; diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 6844f30163..b92462276a 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -11,22 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "shared-memory")] -use super::establishment::ext::shm::AuthUnicast; -use super::{link::LinkUnicastWithOpenAck, transport_unicast_inner::InitTransportResult}; -#[cfg(feature = "transport_auth")] -use crate::unicast::establishment::ext::auth::Auth; -#[cfg(feature = "transport_multilink")] -use crate::unicast::establishment::ext::multilink::MultiLink; -use crate::{ - unicast::{ - lowlatency::transport::TransportUnicastLowlatency, - transport_unicast_inner::{InitTransportError, TransportUnicastTrait}, - universal::transport::TransportUnicastUniversal, - TransportConfigUnicast, TransportUnicast, - }, - TransportManager, TransportPeer, -}; use std::{ collections::HashMap, sync::{ @@ -35,6 +19,7 @@ use std::{ }, time::Duration, }; + use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionUnicastConf; @@ -52,6 +37,23 @@ use zenoh_result::{bail, zerror, ZResult}; #[cfg(feature = "shared-memory")] use zenoh_shm::reader::SharedMemoryReader; +#[cfg(feature = "shared-memory")] +use super::establishment::ext::shm::AuthUnicast; +use super::{link::LinkUnicastWithOpenAck, transport_unicast_inner::InitTransportResult}; +#[cfg(feature = "transport_auth")] +use crate::unicast::establishment::ext::auth::Auth; +#[cfg(feature = "transport_multilink")] +use crate::unicast::establishment::ext::multilink::MultiLink; +use crate::{ + unicast::{ + lowlatency::transport::TransportUnicastLowlatency, + transport_unicast_inner::{InitTransportError, TransportUnicastTrait}, + universal::transport::TransportUnicastUniversal, + TransportConfigUnicast, TransportUnicast, + }, + TransportManager, TransportPeer, +}; + /*************************************/ /* TRANSPORT CONFIG */ /*************************************/ diff --git a/io/zenoh-transport/src/unicast/mod.rs b/io/zenoh-transport/src/unicast/mod.rs index 630b56aa1b..1726ba2559 100644 --- a/io/zenoh-transport/src/unicast/mod.rs +++ b/io/zenoh-transport/src/unicast/mod.rs @@ -21,26 +21,28 @@ pub(crate) mod universal; #[cfg(feature = "test")] pub mod test_helpers; -#[cfg(feature = "shared-memory")] -use crate::shm::TransportShmConfig; - -use self::transport_unicast_inner::TransportUnicastTrait; +use std::{ + fmt, + sync::{Arc, Weak}, +}; -use super::{TransportPeer, TransportPeerEventHandler}; #[cfg(feature = "transport_multilink")] use establishment::ext::auth::ZPublicKey; pub use manager::*; -use std::fmt; -use std::sync::{Arc, Weak}; use zenoh_core::zcondfeat; use zenoh_link::Link; -use zenoh_protocol::network::NetworkMessage; use zenoh_protocol::{ core::{Bits, WhatAmI, ZenohId}, + network::NetworkMessage, transport::{close, TransportSn}, }; use zenoh_result::{zerror, ZResult}; +use self::transport_unicast_inner::TransportUnicastTrait; +use super::{TransportPeer, TransportPeerEventHandler}; +#[cfg(feature = "shared-memory")] +use crate::shm::TransportShmConfig; + /*************************************/ /* TRANSPORT UNICAST */ /*************************************/ diff --git a/io/zenoh-transport/src/unicast/test_helpers.rs b/io/zenoh-transport/src/unicast/test_helpers.rs index 42ed6db927..6d25ae0d77 100644 --- a/io/zenoh-transport/src/unicast/test_helpers.rs +++ b/io/zenoh-transport/src/unicast/test_helpers.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{unicast::TransportManagerBuilderUnicast, TransportManager}; use zenoh_core::zcondfeat; +use crate::{unicast::TransportManagerBuilderUnicast, TransportManager}; + pub fn make_transport_manager_builder( #[cfg(feature = "transport_multilink")] max_links: usize, #[cfg(feature = "shared-memory")] with_shm: bool, diff --git a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs index f6dc39529d..fcc5d41029 100644 --- a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs +++ b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs @@ -12,12 +12,9 @@ // ZettaScale Zenoh Team, // -use crate::{ - unicast::{link::TransportLinkUnicast, TransportConfigUnicast}, - TransportPeerEventHandler, -}; -use async_trait::async_trait; use std::{fmt::DebugStruct, sync::Arc, time::Duration}; + +use async_trait::async_trait; use tokio::sync::MutexGuard as AsyncMutexGuard; use zenoh_link::Link; use zenoh_protocol::{ @@ -28,6 +25,10 @@ use zenoh_protocol::{ use zenoh_result::ZResult; use super::link::{LinkUnicastWithOpenAck, MaybeOpenAck}; +use crate::{ + unicast::{link::TransportLinkUnicast, TransportConfigUnicast}, + TransportPeerEventHandler, +}; pub(crate) type LinkError = (zenoh_result::Error, TransportLinkUnicast, u8); pub(crate) type TransportError = (zenoh_result::Error, Arc, u8); diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index 8d5d703be1..e0c3cd3db5 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -11,6 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::time::Duration; + +use tokio_util::{sync::CancellationToken, task::TaskTracker}; +use zenoh_buffers::ZSliceBuffer; +use zenoh_protocol::transport::{KeepAlive, TransportMessage}; +use zenoh_result::{zerror, ZResult}; +use zenoh_sync::{RecyclingObject, RecyclingObjectPool}; +#[cfg(feature = "stats")] +use {crate::common::stats::TransportStats, std::sync::Arc}; + use super::transport::TransportUnicastUniversal; use crate::{ common::{ @@ -23,14 +33,6 @@ use crate::{ }, unicast::link::{TransportLinkUnicast, TransportLinkUnicastRx, TransportLinkUnicastTx}, }; -use std::time::Duration; -use tokio_util::{sync::CancellationToken, task::TaskTracker}; -use zenoh_buffers::ZSliceBuffer; -use zenoh_protocol::transport::{KeepAlive, TransportMessage}; -use zenoh_result::{zerror, ZResult}; -use zenoh_sync::{RecyclingObject, RecyclingObjectPool}; -#[cfg(feature = "stats")] -use {crate::common::stats::TransportStats, std::sync::Arc}; #[derive(Clone)] pub(super) struct TransportLinkUnicastUniversal { diff --git a/io/zenoh-transport/src/unicast/universal/rx.rs b/io/zenoh-transport/src/unicast/universal/rx.rs index 3edf57f507..f97f29b0c7 100644 --- a/io/zenoh-transport/src/unicast/universal/rx.rs +++ b/io/zenoh-transport/src/unicast/universal/rx.rs @@ -11,16 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportUnicastUniversal; -use crate::{ - common::{ - batch::{Decode, RBatch}, - priority::TransportChannelRx, - }, - unicast::transport_unicast_inner::TransportUnicastTrait, - TransportPeerEventHandler, -}; use std::sync::MutexGuard; + use zenoh_core::{zlock, zread}; use zenoh_link::Link; use zenoh_protocol::{ @@ -30,6 +22,16 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, zerror, ZResult}; +use super::transport::TransportUnicastUniversal; +use crate::{ + common::{ + batch::{Decode, RBatch}, + priority::TransportChannelRx, + }, + unicast::transport_unicast_inner::TransportUnicastTrait, + TransportPeerEventHandler, +}; + /*************************************/ /* TRANSPORT RX */ /*************************************/ diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index 5f581673e9..52b4769e82 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -11,6 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{ + fmt::DebugStruct, + sync::{Arc, RwLock}, + time::Duration, +}; + +use async_trait::async_trait; +use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; +use zenoh_core::{zasynclock, zcondfeat, zread, zwrite}; +use zenoh_link::Link; +use zenoh_protocol::{ + core::{Priority, WhatAmI, ZenohId}, + network::NetworkMessage, + transport::{close, Close, PrioritySn, TransportMessage, TransportSn}, +}; +use zenoh_result::{bail, zerror, ZResult}; + #[cfg(feature = "stats")] use crate::stats::TransportStats; use crate::{ @@ -23,19 +40,6 @@ use crate::{ }, TransportManager, TransportPeerEventHandler, }; -use async_trait::async_trait; -use std::fmt::DebugStruct; -use std::sync::{Arc, RwLock}; -use std::time::Duration; -use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; -use zenoh_core::{zasynclock, zcondfeat, zread, zwrite}; -use zenoh_link::Link; -use zenoh_protocol::{ - core::{Priority, WhatAmI, ZenohId}, - network::NetworkMessage, - transport::{close, Close, PrioritySn, TransportMessage, TransportSn}, -}; -use zenoh_result::{bail, zerror, ZResult}; macro_rules! zlinkget { ($guard:expr, $link:expr) => { diff --git a/io/zenoh-transport/src/unicast/universal/tx.rs b/io/zenoh-transport/src/unicast/universal/tx.rs index a381bb4d29..f7754489ef 100644 --- a/io/zenoh-transport/src/unicast/universal/tx.rs +++ b/io/zenoh-transport/src/unicast/universal/tx.rs @@ -11,10 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportUnicastUniversal; use zenoh_core::zread; use zenoh_protocol::network::NetworkMessage; +use super::transport::TransportUnicastUniversal; #[cfg(feature = "shared-memory")] use crate::shm::map_zmsg_to_partner; diff --git a/io/zenoh-transport/tests/endpoints.rs b/io/zenoh-transport/tests/endpoints.rs index daf79d3e98..e765165a81 100644 --- a/io/zenoh-transport/tests/endpoints.rs +++ b/io/zenoh-transport/tests/endpoints.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::{any::Any, convert::TryFrom, sync::Arc, time::Duration}; + use zenoh_core::ztimeout; use zenoh_link::{EndPoint, Link}; use zenoh_protocol::{ diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs index d5eb62c961..124dfeaad8 100644 --- a/io/zenoh-transport/tests/multicast_compression.rs +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -24,6 +24,7 @@ mod tests { }, time::Duration, }; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index d0bb603836..e1d5bfc52c 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -25,6 +25,7 @@ mod tests { }, time::Duration, }; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ diff --git a/io/zenoh-transport/tests/transport_whitelist.rs b/io/zenoh-transport/tests/transport_whitelist.rs index a859a1c0c9..4ace68a87b 100644 --- a/io/zenoh-transport/tests/transport_whitelist.rs +++ b/io/zenoh-transport/tests/transport_whitelist.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::{any::Any, convert::TryFrom, iter::FromIterator, sync::Arc, time::Duration}; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ diff --git a/io/zenoh-transport/tests/unicast_authenticator.rs b/io/zenoh-transport/tests/unicast_authenticator.rs index abcf011eed..b25fb77a63 100644 --- a/io/zenoh-transport/tests/unicast_authenticator.rs +++ b/io/zenoh-transport/tests/unicast_authenticator.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::{any::Any, sync::Arc, time::Duration}; + use zenoh_core::{zasyncwrite, ztimeout}; use zenoh_link::Link; use zenoh_protocol::{ @@ -20,11 +21,9 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; use zenoh_transport::{ - multicast::TransportMulticast, unicast::establishment::ext::auth::Auth, - TransportMulticastEventHandler, -}; -use zenoh_transport::{ - unicast::TransportUnicast, DummyTransportPeerEventHandler, TransportEventHandler, + multicast::TransportMulticast, + unicast::{establishment::ext::auth::Auth, TransportUnicast}, + DummyTransportPeerEventHandler, TransportEventHandler, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index 6f80e7dd58..df122eeedd 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -13,16 +13,17 @@ // #[cfg(feature = "transport_compression")] mod tests { - use std::fmt::Write as _; use std::{ any::Any, convert::TryFrom, + fmt::Write as _, sync::{ atomic::{AtomicUsize, Ordering}, Arc, }, time::Duration, }; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index dc4c0fbd3d..9c9b58acde 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -10,11 +10,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::any::Any; -use std::convert::TryFrom; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::Duration; +use std::{ + any::Any, + convert::TryFrom, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + use tokio::sync::Barrier; use zenoh_core::ztimeout; use zenoh_link::Link; diff --git a/io/zenoh-transport/tests/unicast_defragmentation.rs b/io/zenoh-transport/tests/unicast_defragmentation.rs index 40a513b874..28b085ab39 100644 --- a/io/zenoh-transport/tests/unicast_defragmentation.rs +++ b/io/zenoh-transport/tests/unicast_defragmentation.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::{convert::TryFrom, sync::Arc, time::Duration}; + use zenoh_core::ztimeout; use zenoh_protocol::{ core::{ diff --git a/io/zenoh-transport/tests/unicast_intermittent.rs b/io/zenoh-transport/tests/unicast_intermittent.rs index 14670bf532..9830820cf1 100644 --- a/io/zenoh-transport/tests/unicast_intermittent.rs +++ b/io/zenoh-transport/tests/unicast_intermittent.rs @@ -11,12 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::any::Any; -use std::convert::TryFrom; -use std::io::Write; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::Duration; +use std::{ + any::Any, + convert::TryFrom, + io::Write, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ diff --git a/io/zenoh-transport/tests/unicast_multilink.rs b/io/zenoh-transport/tests/unicast_multilink.rs index c06485fd06..d9337b790d 100644 --- a/io/zenoh-transport/tests/unicast_multilink.rs +++ b/io/zenoh-transport/tests/unicast_multilink.rs @@ -14,6 +14,7 @@ #[cfg(feature = "transport_multilink")] mod tests { use std::{convert::TryFrom, sync::Arc, time::Duration}; + use zenoh_core::ztimeout; use zenoh_link::EndPoint; use zenoh_protocol::core::{WhatAmI, ZenohId}; diff --git a/io/zenoh-transport/tests/unicast_openclose.rs b/io/zenoh-transport/tests/unicast_openclose.rs index 3c46fc9a80..03af046a3d 100644 --- a/io/zenoh-transport/tests/unicast_openclose.rs +++ b/io/zenoh-transport/tests/unicast_openclose.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::{convert::TryFrom, sync::Arc, time::Duration}; + use zenoh_core::ztimeout; use zenoh_link::EndPoint; use zenoh_protocol::core::{WhatAmI, ZenohId}; @@ -22,7 +23,6 @@ use zenoh_transport::{ DummyTransportPeerEventHandler, TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; - #[cfg(target_os = "linux")] #[cfg(any(feature = "transport_tcp", feature = "transport_udp"))] use zenoh_util::net::get_ipv4_ipaddrs; diff --git a/io/zenoh-transport/tests/unicast_priorities.rs b/io/zenoh-transport/tests/unicast_priorities.rs index fa7f68a8a9..c7e468b5c5 100644 --- a/io/zenoh-transport/tests/unicast_priorities.rs +++ b/io/zenoh-transport/tests/unicast_priorities.rs @@ -11,15 +11,19 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::any::Any; -use std::convert::TryFrom; -use std::fmt::Write as _; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::Duration; +use std::{ + any::Any, + convert::TryFrom, + fmt::Write as _, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + use zenoh_core::ztimeout; use zenoh_link::Link; -use zenoh_protocol::network::NetworkBody; use zenoh_protocol::{ core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, network::{ @@ -27,7 +31,7 @@ use zenoh_protocol::{ ext::{NodeIdType, QoSType}, Push, }, - NetworkMessage, + NetworkBody, NetworkMessage, }, zenoh::Put, }; diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index 637f9f8a86..f7b884f6b9 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -22,6 +22,7 @@ mod tests { }, time::Duration, }; + use zenoh_buffers::buffer::SplitBuffer; use zenoh_core::ztimeout; use zenoh_link::Link; diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index 92267458f0..8f9b23a6f1 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -13,11 +13,16 @@ // #[cfg(target_family = "unix")] mod tests { - use std::any::Any; - use std::convert::TryFrom; - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::Arc; - use std::time::Duration; + use std::{ + any::Any, + convert::TryFrom, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, + }; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs index 5b9209ada3..efe8842c12 100644 --- a/io/zenoh-transport/tests/unicast_time.rs +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -16,6 +16,7 @@ use std::{ sync::Arc, time::{Duration, Instant}, }; + use zenoh_core::ztimeout; use zenoh_link::EndPoint; use zenoh_protocol::core::{WhatAmI, ZenohId}; diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index 4b833bc5e7..8fed09e8c2 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -11,16 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::fmt::Write as _; use std::{ any::Any, convert::TryFrom, + fmt::Write as _, sync::{ atomic::{AtomicUsize, Ordering}, Arc, }, time::Duration, }; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ @@ -1158,6 +1159,7 @@ async fn transport_unicast_tls_only_mutual_success() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { use std::vec; + use zenoh_link::tls::config::*; zenoh_util::try_init_log_from_env(); @@ -1373,6 +1375,7 @@ async fn transport_unicast_quic_only_mutual_success() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_quic_only_mutual_no_client_certs_failure() { use std::vec; + use zenoh_link::quic::config::*; zenoh_util::try_init_log_from_env(); diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index 3663f3249e..13cc427268 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::collections::{hash_map::Entry, HashMap}; + use async_std::sync::RwLock; use async_trait::async_trait; -use std::collections::{hash_map::Entry, HashMap}; use zenoh::{key_expr::OwnedKeyExpr, time::Timestamp, value::Value}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, diff --git a/plugins/zenoh-backend-traits/src/config.rs b/plugins/zenoh-backend-traits/src/config.rs index 096255fb59..9f5e9bb25a 100644 --- a/plugins/zenoh-backend-traits/src/config.rs +++ b/plugins/zenoh-backend-traits/src/config.rs @@ -11,13 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{convert::TryFrom, time::Duration}; + use const_format::concatcp; use derive_more::{AsMut, AsRef}; use schemars::JsonSchema; use serde_json::{Map, Value}; -use std::convert::TryFrom; -use std::time::Duration; -use zenoh::{core::Result as ZResult, key_expr::keyexpr, key_expr::OwnedKeyExpr}; +use zenoh::{ + core::Result as ZResult, + key_expr::{keyexpr, OwnedKeyExpr}, +}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_result::{bail, zerror, Error}; diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 761f653064..61d70b28b1 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -123,10 +123,12 @@ use async_trait::async_trait; use const_format::concatcp; -use zenoh::core::Result as ZResult; -use zenoh::key_expr::{keyexpr, OwnedKeyExpr}; -use zenoh::time::Timestamp; -use zenoh::value::Value; +use zenoh::{ + core::Result as ZResult, + key_expr::{keyexpr, OwnedKeyExpr}, + time::Timestamp, + value::Value, +}; use zenoh_plugin_trait::{PluginControl, PluginInstance, PluginStatusRec, StructVersion}; use zenoh_util::concat_enabled_features; diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 3c84e039a8..41e88fb417 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -13,20 +13,25 @@ // #![recursion_limit = "256"] -use futures::select; -use std::borrow::Cow; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::sync::{ - atomic::{AtomicBool, Ordering::Relaxed}, - Arc, Mutex, +use std::{ + borrow::Cow, + collections::HashMap, + convert::TryFrom, + sync::{ + atomic::{AtomicBool, Ordering::Relaxed}, + Arc, Mutex, + }, }; + +use futures::select; use tracing::{debug, info}; -use zenoh::key_expr::{keyexpr, KeyExpr}; -use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; -use zenoh::runtime::Runtime; -use zenoh::sample::Sample; -use zenoh::session::SessionDeclarations; +use zenoh::{ + key_expr::{keyexpr, KeyExpr}, + plugins::{RunningPluginTrait, ZenohPlugin}, + runtime::Runtime, + sample::Sample, + session::SessionDeclarations, +}; use zenoh_core::zlock; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, ZResult}; diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 59562391ea..c1c8f69ce7 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -11,14 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{arg, Command}; use std::time::Duration; -use zenoh::config::Config; -use zenoh::core::try_init_log_from_env; -use zenoh::key_expr::keyexpr; -use zenoh::publication::CongestionControl; -use zenoh::sample::QoSBuilderTrait; -use zenoh::session::SessionDeclarations; + +use clap::{arg, Command}; +use zenoh::{ + config::Config, core::try_init_log_from_env, key_expr::keyexpr, publication::CongestionControl, + sample::QoSBuilderTrait, session::SessionDeclarations, +}; const HTML: &str = r#"
diff --git a/plugins/zenoh-plugin-rest/src/config.rs b/plugins/zenoh-plugin-rest/src/config.rs index 56b9960467..719dc79fbf 100644 --- a/plugins/zenoh-plugin-rest/src/config.rs +++ b/plugins/zenoh-plugin-rest/src/config.rs @@ -11,11 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use schemars::JsonSchema; -use serde::de::{Unexpected, Visitor}; -use serde::{de, Deserialize, Deserializer}; use std::fmt; +use schemars::JsonSchema; +use serde::{ + de, + de::{Unexpected, Visitor}, + Deserialize, Deserializer, +}; + const DEFAULT_HTTP_INTERFACE: &str = "[::]"; #[derive(JsonSchema, Deserialize, serde::Serialize, Clone, Debug)] diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index c712a1add6..4dd30f9a5f 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -17,29 +17,27 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) +use std::{borrow::Cow, convert::TryFrom, str::FromStr, sync::Arc}; + use async_std::prelude::FutureExt; use base64::Engine; use futures::StreamExt; use http_types::Method; use serde::{Deserialize, Serialize}; -use std::borrow::Cow; -use std::convert::TryFrom; -use std::str::FromStr; -use std::sync::Arc; -use tide::http::Mime; -use tide::sse::Sender; -use tide::{Request, Response, Server, StatusCode}; -use zenoh::bytes::{StringOrBase64, ZBytes}; -use zenoh::core::try_init_log_from_env; -use zenoh::encoding::Encoding; -use zenoh::key_expr::{keyexpr, KeyExpr}; -use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; -use zenoh::query::{QueryConsolidation, Reply}; -use zenoh::runtime::Runtime; -use zenoh::sample::{Sample, SampleKind, ValueBuilderTrait}; -use zenoh::selector::{Selector, TIME_RANGE_KEY}; -use zenoh::session::{Session, SessionDeclarations}; -use zenoh::value::Value; +use tide::{http::Mime, sse::Sender, Request, Response, Server, StatusCode}; +use zenoh::{ + bytes::{StringOrBase64, ZBytes}, + core::try_init_log_from_env, + encoding::Encoding, + key_expr::{keyexpr, KeyExpr}, + plugins::{RunningPluginTrait, ZenohPlugin}, + query::{QueryConsolidation, Reply}, + runtime::Runtime, + sample::{Sample, SampleKind, ValueBuilderTrait}, + selector::{Selector, TIME_RANGE_KEY}, + session::{Session, SessionDeclarations}, + value::Value, +}; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, zerror, ZResult}; diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index dcdefda406..1bb8af4330 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -11,13 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::storages_mgt::*; -use flume::Sender; use std::sync::Arc; -use zenoh::core::Result as ZResult; -use zenoh::session::Session; -use zenoh_backend_traits::config::StorageConfig; -use zenoh_backend_traits::{Capability, VolumeInstance}; + +use flume::Sender; +use zenoh::{core::Result as ZResult, session::Session}; +use zenoh_backend_traits::{config::StorageConfig, Capability, VolumeInstance}; + +use super::storages_mgt::*; pub struct StoreIntercept { pub storage: Box, diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 8818d44688..3415f6db65 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -19,34 +19,32 @@ //! [Click here for Zenoh's documentation](../zenoh/index.html) #![recursion_limit = "512"] +use std::{ + collections::HashMap, + convert::TryFrom, + sync::{Arc, Mutex}, +}; + use async_std::task; use flume::Sender; use memory_backend::MemoryBackend; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::sync::Arc; -use std::sync::Mutex; use storages_mgt::StorageMessage; -use zenoh::core::try_init_log_from_env; -use zenoh::core::Result as ZResult; -use zenoh::internal::zlock; -use zenoh::internal::LibLoader; -use zenoh::key_expr::keyexpr; -use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; -use zenoh::runtime::Runtime; -use zenoh::selector::Selector; -use zenoh::session::Session; -use zenoh_backend_traits::config::ConfigDiff; -use zenoh_backend_traits::config::PluginConfig; -use zenoh_backend_traits::config::StorageConfig; -use zenoh_backend_traits::config::VolumeConfig; -use zenoh_backend_traits::VolumeInstance; -use zenoh_plugin_trait::plugin_long_version; -use zenoh_plugin_trait::plugin_version; -use zenoh_plugin_trait::Plugin; -use zenoh_plugin_trait::PluginControl; -use zenoh_plugin_trait::PluginReport; -use zenoh_plugin_trait::PluginStatusRec; +use zenoh::{ + core::{try_init_log_from_env, Result as ZResult}, + internal::{zlock, LibLoader}, + key_expr::keyexpr, + plugins::{RunningPluginTrait, ZenohPlugin}, + runtime::Runtime, + selector::Selector, + session::Session, +}; +use zenoh_backend_traits::{ + config::{ConfigDiff, PluginConfig, StorageConfig, VolumeConfig}, + VolumeInstance, +}; +use zenoh_plugin_trait::{ + plugin_long_version, plugin_version, Plugin, PluginControl, PluginReport, PluginStatusRec, +}; mod backends_mgt; use backends_mgt::*; diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index 60982c350d..1534d95e32 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -11,16 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{collections::HashMap, sync::Arc}; + use async_std::sync::RwLock; use async_trait::async_trait; -use std::collections::HashMap; -use std::sync::Arc; -use zenoh::core::Result as ZResult; -use zenoh::key_expr::OwnedKeyExpr; -use zenoh::time::Timestamp; -use zenoh::value::Value; -use zenoh_backend_traits::config::{StorageConfig, VolumeConfig}; -use zenoh_backend_traits::*; +use zenoh::{core::Result as ZResult, key_expr::OwnedKeyExpr, time::Timestamp, value::Value}; +use zenoh_backend_traits::{ + config::{StorageConfig, VolumeConfig}, + *, +}; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin}; use crate::MEMORY_BACKEND_NAME; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 694e259a18..50c93fe3dd 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -11,15 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::digest::*; -use super::Snapshotter; +use std::{ + cmp::Ordering, + collections::{BTreeSet, HashMap, HashSet}, + str, + str::FromStr, +}; + use async_std::sync::Arc; -use std::cmp::Ordering; -use std::collections::{BTreeSet, HashMap, HashSet}; -use std::str; -use std::str::FromStr; use zenoh::prelude::*; +use super::{digest::*, Snapshotter}; + pub struct AlignQueryable { session: Arc, digest_key: OwnedKeyExpr, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 46ccdc2935..e0301f1a4e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -12,14 +12,17 @@ // ZettaScale Zenoh Team, // -use super::{Digest, EraType, LogEntry, Snapshotter}; -use super::{CONTENTS, ERA, INTERVALS, SUBINTERVALS}; +use std::{ + collections::{HashMap, HashSet}, + str, +}; + use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; -use std::collections::{HashMap, HashSet}; -use std::str; use zenoh::prelude::*; +use super::{Digest, EraType, LogEntry, Snapshotter, CONTENTS, ERA, INTERVALS, SUBINTERVALS}; + pub struct Aligner { session: Arc, digest_key: OwnedKeyExpr, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs b/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs index c70f26ea1f..bf06c61f25 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs @@ -12,16 +12,18 @@ // ZettaScale Zenoh Team, // +use std::{ + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + convert::TryFrom, + str::FromStr, + string::ParseError, + time::Duration, +}; + use crc::{Crc, CRC_64_ECMA_182}; use derive_new::new; use serde::{Deserialize, Serialize}; -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; -use std::convert::TryFrom; -use std::str::FromStr; -use std::string::ParseError; -use std::time::Duration; -use zenoh::key_expr::OwnedKeyExpr; -use zenoh::time::Timestamp; +use zenoh::{key_expr::OwnedKeyExpr, time::Timestamp}; #[derive(Eq, PartialEq, Clone, Debug, Deserialize, Serialize)] pub struct DigestConfig { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index c9d9e03bcf..421d45ade6 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -14,21 +14,25 @@ // This module extends Storage with alignment protocol that aligns storages subscribing to the same key_expr -use crate::backends_mgt::StoreIntercept; -use crate::storages_mgt::StorageMessage; -use async_std::stream::{interval, StreamExt}; -use async_std::sync::Arc; -use async_std::sync::RwLock; +use std::{ + collections::{HashMap, HashSet}, + str, + str::FromStr, + time::{Duration, SystemTime}, +}; + +use async_std::{ + stream::{interval, StreamExt}, + sync::{Arc, RwLock}, +}; use flume::{Receiver, Sender}; use futures::{pin_mut, select, FutureExt}; -use std::collections::{HashMap, HashSet}; -use std::str; -use std::str::FromStr; -use std::time::{Duration, SystemTime}; use urlencoding::encode; use zenoh::prelude::*; use zenoh_backend_traits::config::{ReplicaConfig, StorageConfig}; +use crate::{backends_mgt::StoreIntercept, storages_mgt::StorageMessage}; + pub mod align_queryable; pub mod aligner; pub mod digest; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs index e66a6e88ca..d5708686ee 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs @@ -11,20 +11,24 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{Digest, DigestConfig, LogEntry}; -use async_std::stream::{interval, StreamExt}; -use async_std::sync::Arc; -use async_std::sync::RwLock; -use async_std::task::sleep; +use std::{ + collections::{HashMap, HashSet}, + convert::TryFrom, + time::Duration, +}; + +use async_std::{ + stream::{interval, StreamExt}, + sync::{Arc, RwLock}, + task::sleep, +}; use flume::Receiver; use futures::join; -use std::collections::{HashMap, HashSet}; -use std::convert::TryFrom; -use std::time::Duration; -use zenoh::key_expr::OwnedKeyExpr; -use zenoh::time::Timestamp; +use zenoh::{key_expr::OwnedKeyExpr, time::Timestamp}; use zenoh_backend_traits::config::ReplicaConfig; +use super::{Digest, DigestConfig, LogEntry}; + pub struct Snapshotter { // channel to get updates from the storage storage_update: Receiver<(OwnedKeyExpr, Timestamp)>, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 476893539e..bd7d56f7fc 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -11,35 +11,39 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::backends_mgt::StoreIntercept; -use crate::storages_mgt::StorageMessage; -use async_std::sync::Arc; -use async_std::sync::{Mutex, RwLock}; +use std::{ + collections::{HashMap, HashSet}, + str::{self, FromStr}, + time::{SystemTime, UNIX_EPOCH}, +}; + +use async_std::sync::{Arc, Mutex, RwLock}; use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; -use std::collections::{HashMap, HashSet}; -use std::str::{self, FromStr}; -use std::time::{SystemTime, UNIX_EPOCH}; -use zenoh::buffers::SplitBuffer; -use zenoh::buffers::ZBuf; -use zenoh::internal::bail; -use zenoh::internal::{zenoh_home, Timed, TimedEvent, Timer}; -use zenoh::key_expr::keyexpr_tree::KeyedSetProvider; -use zenoh::key_expr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; -use zenoh::key_expr::keyexpr_tree::{KeBoxTree, NonWild, UnknownWildness}; -use zenoh::key_expr::KeyExpr; -use zenoh::key_expr::OwnedKeyExpr; -use zenoh::query::{ConsolidationMode, QueryTarget}; -use zenoh::sample::{Sample, SampleKind, TimestampBuilderTrait}; -use zenoh::sample::{SampleBuilder, ValueBuilderTrait}; -use zenoh::selector::Selector; -use zenoh::session::SessionDeclarations; -use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; -use zenoh::value::Value; -use zenoh::{core::Result as ZResult, session::Session}; -use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; -use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; +use zenoh::{ + buffers::{SplitBuffer, ZBuf}, + core::Result as ZResult, + internal::{bail, zenoh_home, Timed, TimedEvent, Timer}, + key_expr::{ + keyexpr_tree::{ + IKeyExprTree, IKeyExprTreeMut, KeBoxTree, KeyedSetProvider, NonWild, UnknownWildness, + }, + KeyExpr, OwnedKeyExpr, + }, + query::{ConsolidationMode, QueryTarget}, + sample::{Sample, SampleBuilder, SampleKind, TimestampBuilderTrait, ValueBuilderTrait}, + selector::Selector, + session::{Session, SessionDeclarations}, + time::{new_reception_timestamp, Timestamp, NTP64}, + value::Value, +}; +use zenoh_backend_traits::{ + config::{GarbageCollectionConfig, StorageConfig}, + Capability, History, Persistence, StorageInsertionResult, StoredData, +}; + +use crate::{backends_mgt::StoreIntercept, storages_mgt::StorageMessage}; pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; diff --git a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs index a77cdd936f..1670310fcf 100644 --- a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs @@ -12,8 +12,7 @@ // ZettaScale Zenoh Team, // use async_std::sync::Arc; -use zenoh::core::Result as ZResult; -use zenoh::session::Session; +use zenoh::{core::Result as ZResult, session::Session}; use zenoh_backend_traits::config::StorageConfig; pub use super::replica::{Replica, StorageService}; diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 61ea53deba..dd20c71936 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -16,12 +16,10 @@ // 1. normal case, just some wild card puts and deletes on existing keys and ensure it works // 2. check for dealing with out of order updates -use std::str::FromStr; -use std::thread::sleep; +use std::{str::FromStr, thread::sleep}; use async_std::task; -use zenoh::internal::zasync_executor_init; -use zenoh::prelude::*; +use zenoh::{internal::zasync_executor_init, prelude::*}; use zenoh_plugin_trait::Plugin; async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index f2482da8e5..8bafeb9bbe 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -16,13 +16,11 @@ // 1. normal case, just some wild card puts and deletes on existing keys and ensure it works // 2. check for dealing with out of order updates -use std::str::FromStr; -use std::thread::sleep; +use std::{str::FromStr, thread::sleep}; // use std::collections::HashMap; use async_std::task; -use zenoh::internal::zasync_executor_init; -use zenoh::prelude::*; +use zenoh::{internal::zasync_executor_init, prelude::*}; use zenoh_plugin_trait::Plugin; async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { diff --git a/plugins/zenoh-plugin-trait/src/manager.rs b/plugins/zenoh-plugin-trait/src/manager.rs index a205c3972d..2f5336d1fc 100644 --- a/plugins/zenoh-plugin-trait/src/manager.rs +++ b/plugins/zenoh-plugin-trait/src/manager.rs @@ -13,7 +13,6 @@ mod dynamic_plugin; mod static_plugin; -use crate::*; use zenoh_keyexpr::keyexpr; use zenoh_result::ZResult; use zenoh_util::LibLoader; @@ -22,6 +21,7 @@ use self::{ dynamic_plugin::{DynamicPlugin, DynamicPluginSource}, static_plugin::StaticPlugin, }; +use crate::*; pub trait DeclaredPlugin: PluginStatus { fn as_status(&self) -> &dyn PluginStatus; diff --git a/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs b/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs index a8a78306ea..89a0032fc1 100644 --- a/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs +++ b/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs @@ -10,13 +10,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::*; use std::path::{Path, PathBuf}; use libloading::Library; use zenoh_result::{bail, ZResult}; use zenoh_util::LibLoader; +use crate::*; + /// This enum contains information where to load the plugin from. pub enum DynamicPluginSource { /// Load plugin with the name in String + `.so | .dll | .dylib` diff --git a/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs b/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs index c275fb9818..3841f50a86 100644 --- a/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs +++ b/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs @@ -11,10 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::*; use std::marker::PhantomData; + use zenoh_result::ZResult; +use crate::*; + pub struct StaticPlugin where P: Plugin, diff --git a/plugins/zenoh-plugin-trait/src/plugin.rs b/plugins/zenoh-plugin-trait/src/plugin.rs index 6911d614d5..703f4fb0b1 100644 --- a/plugins/zenoh-plugin-trait/src/plugin.rs +++ b/plugins/zenoh-plugin-trait/src/plugin.rs @@ -11,12 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::StructVersion; -use serde::{Deserialize, Serialize}; use std::{borrow::Cow, ops::BitOrAssign}; + +use serde::{Deserialize, Serialize}; use zenoh_keyexpr::keyexpr; use zenoh_result::ZResult; +use crate::StructVersion; + /// The plugin can be in one of these states: /// - Declared: the plugin is declared in the configuration file, but not loaded yet or failed to load /// - Loaded: the plugin is loaded, but not started yet or failed to start diff --git a/zenoh-ext/examples/examples/z_member.rs b/zenoh-ext/examples/examples/z_member.rs index 35513b1b56..783ee97a9e 100644 --- a/zenoh-ext/examples/examples/z_member.rs +++ b/zenoh-ext/examples/examples/z_member.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{sync::Arc, time::Duration}; + use futures::StreamExt; -use std::sync::Arc; -use std::time::Duration; use zenoh::prelude::*; use zenoh_ext::group::*; diff --git a/zenoh-ext/examples/examples/z_pub_cache.rs b/zenoh-ext/examples/examples/z_pub_cache.rs index 09c888cb0b..684cc7cb75 100644 --- a/zenoh-ext/examples/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/examples/z_pub_cache.rs @@ -11,10 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{arg, Parser}; use std::time::Duration; -use zenoh::config::{Config, ModeDependentValue}; -use zenoh::prelude::*; + +use clap::{arg, Parser}; +use zenoh::{ + config::{Config, ModeDependentValue}, + prelude::*, +}; use zenoh_ext::*; use zenoh_ext_examples::CommonArgs; diff --git a/zenoh-ext/examples/examples/z_query_sub.rs b/zenoh-ext/examples/examples/z_query_sub.rs index a735ecec66..2fa077eba1 100644 --- a/zenoh-ext/examples/examples/z_query_sub.rs +++ b/zenoh-ext/examples/examples/z_query_sub.rs @@ -11,10 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::arg; -use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::*; +use clap::{arg, Parser}; +use zenoh::{config::Config, prelude::*}; use zenoh_ext::*; use zenoh_ext_examples::CommonArgs; diff --git a/zenoh-ext/examples/examples/z_view_size.rs b/zenoh-ext/examples/examples/z_view_size.rs index 52e78790bb..fd8220d506 100644 --- a/zenoh-ext/examples/examples/z_view_size.rs +++ b/zenoh-ext/examples/examples/z_view_size.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{sync::Arc, time::Duration}; + use clap::{arg, Parser}; -use std::sync::Arc; -use std::time::Duration; use zenoh::config::Config; use zenoh_ext::group::*; use zenoh_ext_examples::CommonArgs; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index d764e5ed9c..44600b038c 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -14,18 +14,22 @@ //! To manage groups and group memeberships +use std::{ + collections::HashMap, + convert::TryInto, + ops::Add, + sync::Arc, + time::{Duration, Instant}, +}; + use flume::{Receiver, Sender}; -use futures::prelude::*; -use futures::select; +use futures::{prelude::*, select}; use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::convert::TryInto; -use std::ops::Add; -use std::sync::Arc; -use std::time::{Duration, Instant}; use tokio::sync::Mutex; -use zenoh::internal::{bail, Condition, TaskController}; -use zenoh::prelude::*; +use zenoh::{ + internal::{bail, Condition, TaskController}, + prelude::*, +}; const GROUP_PREFIX: &str = "zenoh/ext/net/group"; const EVENT_POSTFIX: &str = "evt"; diff --git a/zenoh-ext/src/lib.rs b/zenoh-ext/src/lib.rs index 41eea0b074..9802d04e3a 100644 --- a/zenoh-ext/src/lib.rs +++ b/zenoh-ext/src/lib.rs @@ -21,11 +21,8 @@ pub use querying_subscriber::{ FetchingSubscriber, FetchingSubscriberBuilder, QueryingSubscriberBuilder, }; pub use session_ext::SessionExt; -pub use subscriber_ext::SubscriberBuilderExt; -pub use subscriber_ext::SubscriberForward; -use zenoh::internal::zerror; -use zenoh::query::Reply; -use zenoh::{core::Result as ZResult, sample::Sample}; +pub use subscriber_ext::{SubscriberBuilderExt, SubscriberForward}; +use zenoh::{core::Result as ZResult, internal::zerror, query::Reply, sample::Sample}; /// The space of keys to use in a [`FetchingSubscriber`]. pub enum KeySpace { diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 11fb8fb72a..1796668f1c 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -11,21 +11,24 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::collections::{HashMap, VecDeque}; -use std::convert::TryInto; -use std::future::{IntoFuture, Ready}; -use std::time::Duration; -use zenoh::core::Error; -use zenoh::core::{Resolvable, Resolve}; -use zenoh::internal::{ResolveFuture, TerminatableTask}; -use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; -use zenoh::prelude::Wait; -use zenoh::queryable::{Query, Queryable}; -use zenoh::runtime::ZRuntime; -use zenoh::sample::{Locality, Sample}; -use zenoh::session::{SessionDeclarations, SessionRef}; -use zenoh::subscriber::FlumeSubscriber; -use zenoh::{core::Result as ZResult, internal::bail}; +use std::{ + collections::{HashMap, VecDeque}, + convert::TryInto, + future::{IntoFuture, Ready}, + time::Duration, +}; + +use zenoh::{ + core::{Error, Resolvable, Resolve, Result as ZResult}, + internal::{bail, ResolveFuture, TerminatableTask}, + key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}, + prelude::Wait, + queryable::{Query, Queryable}, + runtime::ZRuntime, + sample::{Locality, Sample}, + session::{SessionDeclarations, SessionRef}, + subscriber::FlumeSubscriber, +}; /// The builder of PublicationCache, allowing to configure it. #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 6febef7395..e76c6f7f5c 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -11,24 +11,28 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::collections::{btree_map, BTreeMap, VecDeque}; -use std::convert::TryInto; -use std::future::{IntoFuture, Ready}; -use std::mem::swap; -use std::sync::{Arc, Mutex}; -use std::time::Duration; -use zenoh::core::{Resolvable, Resolve}; -use zenoh::handlers::{locked, DefaultHandler, IntoHandler}; -use zenoh::internal::zlock; -use zenoh::key_expr::KeyExpr; -use zenoh::prelude::Wait; -use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}; -use zenoh::selector::Selector; -use zenoh::session::{SessionDeclarations, SessionRef}; -use zenoh::subscriber::{Reliability, Subscriber}; -use zenoh::time::{new_reception_timestamp, Timestamp}; -use zenoh::{core::Error, core::Result as ZResult}; +use std::{ + collections::{btree_map, BTreeMap, VecDeque}, + convert::TryInto, + future::{IntoFuture, Ready}, + mem::swap, + sync::{Arc, Mutex}, + time::Duration, +}; + +use zenoh::{ + core::{Error, Resolvable, Resolve, Result as ZResult}, + handlers::{locked, DefaultHandler, IntoHandler}, + internal::zlock, + key_expr::KeyExpr, + prelude::Wait, + query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}, + sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}, + selector::Selector, + session::{SessionDeclarations, SessionRef}, + subscriber::{Reliability, Subscriber}, + time::{new_reception_timestamp, Timestamp}, +}; use crate::ExtractSample; diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index d005cafc86..3b33bc9b16 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -11,15 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::PublicationCacheBuilder; -use std::convert::TryInto; -use std::sync::Arc; +use std::{convert::TryInto, sync::Arc}; + use zenoh::{ core::Error, key_expr::KeyExpr, session::{Session, SessionRef}, }; +use super::PublicationCacheBuilder; + /// Some extensions to the [`zenoh::Session`](zenoh::Session) pub trait SessionExt<'s, 'a> { fn declare_publication_cache<'b, 'c, TryIntoKeyExpr>( diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 8c3b1239b6..81c969a223 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -11,21 +11,21 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::time::Duration; + use flume::r#async::RecvStream; use futures::stream::{Forward, Map}; -use std::time::Duration; -use zenoh::core::Result as ZResult; -use zenoh::query::ReplyKeyExpr; -use zenoh::sample::Locality; use zenoh::{ + core::Result as ZResult, liveliness::LivelinessSubscriberBuilder, - query::{QueryConsolidation, QueryTarget}, - sample::Sample, + query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}, + sample::{Locality, Sample}, subscriber::{Reliability, Subscriber, SubscriberBuilder}, }; -use crate::ExtractSample; -use crate::{querying_subscriber::QueryingSubscriberBuilder, FetchingSubscriberBuilder}; +use crate::{ + querying_subscriber::QueryingSubscriberBuilder, ExtractSample, FetchingSubscriberBuilder, +}; /// Allows writing `subscriber.forward(receiver)` instead of `subscriber.stream().map(Ok).forward(publisher)` pub trait SubscriberForward<'a, S> { diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index e720fde1c3..6e7605e95b 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -11,20 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{ - bytes::ZBytes, - encoding::Encoding, - key_expr::KeyExpr, - queryable::Query, - sample::Locality, - sample::{DataInfo, SampleKind}, - session::Session, -}; use std::{ collections::hash_map::DefaultHasher, hash::{Hash, Hasher}, sync::Arc, }; + use zenoh_core::{Result as ZResult, Wait}; use zenoh_keyexpr::keyexpr; use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; @@ -32,6 +24,15 @@ use zenoh_transport::{ TransportEventHandler, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; +use super::{ + bytes::ZBytes, + encoding::Encoding, + key_expr::KeyExpr, + queryable::Query, + sample::{DataInfo, Locality, SampleKind}, + session::Session, +}; + macro_rules! ke_for_sure { ($val:expr) => { unsafe { keyexpr::from_str_unchecked($val) } diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index 5285825b29..d4dc1b54d2 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -11,24 +11,27 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::api::builders::sample::SampleBuilderTrait; -use crate::api::builders::sample::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; +use std::future::{IntoFuture, Ready}; + +use zenoh_core::{Resolvable, Result as ZResult, Wait}; +use zenoh_protocol::{core::CongestionControl, network::Mapping}; + #[cfg(feature = "unstable")] use crate::api::bytes::OptionZBytes; -use crate::api::bytes::ZBytes; -use crate::api::key_expr::KeyExpr; -use crate::api::publication::Priority; -use crate::api::sample::Locality; -use crate::api::sample::SampleKind; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; -use crate::api::session::SessionRef; -use crate::api::value::Value; -use crate::api::{encoding::Encoding, publication::Publisher}; -use std::future::{IntoFuture, Ready}; -use zenoh_core::{Resolvable, Result as ZResult, Wait}; -use zenoh_protocol::core::CongestionControl; -use zenoh_protocol::network::Mapping; +use crate::api::{ + builders::sample::{ + QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, + }, + bytes::ZBytes, + encoding::Encoding, + key_expr::KeyExpr, + publication::{Priority, Publisher}, + sample::{Locality, SampleKind}, + session::SessionRef, + value::Value, +}; pub type SessionPutBuilder<'a, 'b> = PublicationBuilder, PublicationBuilderPut>; diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 0335949b82..56ae8c6c1b 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -11,22 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::api::bytes::ZBytes; -use crate::api::encoding::Encoding; -use crate::api::key_expr::KeyExpr; -use crate::api::publication::Priority; -use crate::api::sample::QoS; -use crate::api::sample::QoSBuilder; -use crate::api::sample::Sample; -use crate::api::sample::SampleKind; -use crate::api::value::Value; -#[cfg(feature = "unstable")] -use crate::{api::bytes::OptionZBytes, sample::SourceInfo}; use std::marker::PhantomData; + use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; +use crate::api::{ + bytes::ZBytes, + encoding::Encoding, + key_expr::KeyExpr, + publication::Priority, + sample::{QoS, QoSBuilder, Sample, SampleKind}, + value::Value, +}; +#[cfg(feature = "unstable")] +use crate::{api::bytes::OptionZBytes, sample::SourceInfo}; + pub trait QoSBuilderTrait { /// Change the `congestion_control` to apply when routing the data. fn congestion_control(self, congestion_control: CongestionControl) -> Self; diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index c36136ef81..fb32910b54 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -13,11 +13,11 @@ // //! ZBytes primitives. -use crate::buffers::ZBuf; use std::{ borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, str::Utf8Error, string::FromUtf8Error, sync::Arc, }; + use unwrap_infallible::UnwrapInfallible; use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, @@ -37,6 +37,8 @@ use zenoh_shm::{ SharedMemoryBuf, }; +use crate::buffers::ZBuf; + /// Trait to encode a type `T` into a [`Value`]. pub trait Serialize { type Output; @@ -1825,12 +1827,11 @@ impl From> for ZBytes { mod tests { #[test] fn serializer() { - use super::ZBytes; - use rand::Rng; use std::borrow::Cow; + + use rand::Rng; use zenoh_buffers::{ZBuf, ZSlice}; use zenoh_protocol::core::Properties; - #[cfg(all(feature = "shared-memory", feature = "unstable"))] use zenoh_shm::api::{ protocol_implementations::posix::{ @@ -1841,6 +1842,8 @@ mod tests { slice::zsliceshm::{zsliceshm, ZSliceShm}, }; + use super::ZBytes; + const NUM: usize = 1_000; macro_rules! serialize_deserialize { diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index 6c08303612..f1be92c7ac 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -11,14 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::bytes::ZBytes; -use phf::phf_map; use std::{borrow::Cow, convert::Infallible, fmt, str::FromStr}; + +use phf::phf_map; use zenoh_buffers::{ZBuf, ZSlice}; use zenoh_protocol::core::EncodingId; #[cfg(feature = "shared-memory")] use zenoh_shm::api::slice::{zsliceshm::ZSliceShm, zsliceshmmut::ZSliceShmMut}; +use super::bytes::ZBytes; + /// Default encoding values used by Zenoh. /// /// An encoding has a similar role to Content-type in HTTP: it indicates, when present, how data should be interpreted by the application. diff --git a/zenoh/src/api/handlers/ring.rs b/zenoh/src/api/handlers/ring.rs index 755c6d9bce..77ad867d36 100644 --- a/zenoh/src/api/handlers/ring.rs +++ b/zenoh/src/api/handlers/ring.rs @@ -13,13 +13,14 @@ // //! Callback handler trait. -use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; - -use super::{callback::Callback, Dyn, IntoHandler}; use std::sync::{Arc, Weak}; + use zenoh_collections::RingBuffer; use zenoh_result::ZResult; +use super::{callback::Callback, Dyn, IntoHandler}; +use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; + /// A synchrounous ring channel with a limited size that allows users to keep the last N data. pub struct RingChannel { capacity: usize, diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index 205a412142..0c75252a78 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -13,11 +13,13 @@ // //! Tools to access information about the current zenoh [`Session`](crate::Session). -use super::session::SessionRef; use std::future::{IntoFuture, Ready}; + use zenoh_core::{Resolvable, Wait}; use zenoh_protocol::core::{WhatAmI, ZenohId}; +use super::session::SessionRef; + /// A builder retuned by [`SessionInfo::zid()`](SessionInfo::zid) that allows /// to access the [`ZenohId`] of the current zenoh [`Session`](crate::Session). /// diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 20dcf9cbee..c5fdf12609 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -12,14 +12,12 @@ // ZettaScale Zenoh Team, // -use super::session::{Session, Undeclarable}; -use crate::net::primitives::Primitives; -use std::future::IntoFuture; use std::{ convert::{TryFrom, TryInto}, - future::Ready, + future::{IntoFuture, Ready}, str::FromStr, }; + use zenoh_core::{Resolvable, Wait}; use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; use zenoh_protocol::{ @@ -28,6 +26,9 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; +use super::session::{Session, Undeclarable}; +use crate::net::primitives::Primitives; + #[derive(Clone, Debug)] pub(crate) enum KeyExprInner<'a> { Borrowed(&'a keyexpr), diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index f7235426c3..640c639dec 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -12,6 +12,18 @@ // ZettaScale Zenoh Team, // +use std::{ + convert::TryInto, + future::{IntoFuture, Ready}, + sync::Arc, + time::Duration, +}; + +use zenoh_config::unwrap_or_default; +use zenoh_core::{Resolvable, Resolve, Result as ZResult, Wait}; +use zenoh_keyexpr::keyexpr; +use zenoh_protocol::network::{declare::subscriber::ext::SubscriberInfo, request}; + use super::{ handlers::{locked, DefaultHandler, IntoHandler}, key_expr::KeyExpr, @@ -21,13 +33,6 @@ use super::{ subscriber::{Subscriber, SubscriberInner}, Id, }; -use std::future::IntoFuture; -use std::{convert::TryInto, future::Ready, sync::Arc, time::Duration}; -use zenoh_config::unwrap_or_default; -use zenoh_core::{Resolvable, Result as ZResult}; -use zenoh_core::{Resolve, Wait}; -use zenoh_keyexpr::keyexpr; -use zenoh_protocol::network::{declare::subscriber::ext::SubscriberInfo, request}; #[zenoh_macros::unstable] pub(crate) static PREFIX_LIVELINESS: &str = crate::net::routing::PREFIX_LIVELINESS; diff --git a/zenoh/src/api/loader.rs b/zenoh/src/api/loader.rs index e4a28de02e..ad4dac61fb 100644 --- a/zenoh/src/api/loader.rs +++ b/zenoh/src/api/loader.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::plugins::{PluginsManager, PLUGIN_PREFIX}; -use crate::runtime::Runtime; use zenoh_config::{Config, PluginLoad}; use zenoh_result::ZResult; +use super::plugins::{PluginsManager, PLUGIN_PREFIX}; +use crate::runtime::Runtime; + pub(crate) fn load_plugin( plugin_mgr: &mut PluginsManager, name: &str, diff --git a/zenoh/src/api/plugins.rs b/zenoh/src/api/plugins.rs index 27f6f18d7a..b7f1954a6b 100644 --- a/zenoh/src/api/plugins.rs +++ b/zenoh/src/api/plugins.rs @@ -14,8 +14,6 @@ //! `zenohd`'s plugin system. For more details, consult the [detailed documentation](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Plugins/Zenoh%20Plugins.md). -use super::selector::Selector; -use crate::net::runtime::Runtime; use zenoh_core::zconfigurable; use zenoh_plugin_trait::{ Plugin, PluginControl, PluginInstance, PluginReport, PluginStatusRec, StructVersion, @@ -23,6 +21,9 @@ use zenoh_plugin_trait::{ use zenoh_protocol::core::key_expr::keyexpr; use zenoh_result::ZResult; +use super::selector::Selector; +use crate::net::runtime::Runtime; + zconfigurable! { pub static ref PLUGIN_PREFIX: String = "zenoh_plugin_".to_string(); } diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 518ddc4d1b..553170e76a 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -12,26 +12,14 @@ // ZettaScale Zenoh Team, // -use super::{ - builders::publication::{ - PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, - PublisherDeleteBuilder, PublisherPutBuilder, - }, - bytes::ZBytes, - encoding::Encoding, - key_expr::KeyExpr, - sample::{DataInfo, Locality, QoS, Sample, SampleFields, SampleKind}, - session::{SessionRef, Undeclarable}, -}; -use crate::net::primitives::Primitives; -use futures::Sink; -use std::future::IntoFuture; use std::{ convert::TryFrom, - future::Ready, + future::{IntoFuture, Ready}, pin::Pin, task::{Context, Poll}, }; + +use futures::Sink; use zenoh_core::{zread, Resolvable, Resolve, Wait}; use zenoh_keyexpr::keyexpr; use zenoh_protocol::{ @@ -40,7 +28,6 @@ use zenoh_protocol::{ zenoh::{Del, PushBody, Put}, }; use zenoh_result::{Error, ZResult}; - #[zenoh_macros::unstable] use { crate::api::handlers::{Callback, DefaultHandler, IntoHandler}, @@ -50,6 +37,19 @@ use { zenoh_protocol::core::EntityId, }; +use super::{ + builders::publication::{ + PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, + PublisherDeleteBuilder, PublisherPutBuilder, + }, + bytes::ZBytes, + encoding::Encoding, + key_expr::KeyExpr, + sample::{DataInfo, Locality, QoS, Sample, SampleFields, SampleKind}, + session::{SessionRef, Undeclarable}, +}; +use crate::net::primitives::Primitives; + #[zenoh_macros::unstable] #[derive(Clone)] pub enum PublisherRef<'a> { @@ -1087,16 +1087,19 @@ impl Drop for MatchingListenerInner<'_> { #[cfg(test)] mod tests { - use crate::api::{sample::SampleKind, session::SessionDeclarations}; use zenoh_config::Config; use zenoh_core::Wait; + use crate::api::{sample::SampleKind, session::SessionDeclarations}; + #[test] fn priority_from() { - use super::Priority as APrio; use std::convert::TryInto; + use zenoh_protocol::core::Priority as TPrio; + use super::Priority as APrio; + for i in APrio::MAX as u8..=APrio::MIN as u8 { let p: APrio = i.try_into().unwrap(); diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 311402b618..e344237087 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -12,6 +12,19 @@ // ZettaScale Zenoh Team, // +use std::{ + collections::HashMap, + future::{IntoFuture, Ready}, + time::Duration, +}; + +use zenoh_core::{Resolvable, Wait}; +use zenoh_keyexpr::OwnedKeyExpr; +use zenoh_protocol::core::{CongestionControl, ZenohId}; +use zenoh_result::ZResult; + +#[zenoh_macros::unstable] +use super::{builders::sample::SampleBuilderTrait, bytes::OptionZBytes, sample::SourceInfo}; use super::{ builders::sample::{QoSBuilderTrait, ValueBuilderTrait}, bytes::ZBytes, @@ -24,15 +37,6 @@ use super::{ session::Session, value::Value, }; -use std::future::IntoFuture; -use std::{collections::HashMap, future::Ready, time::Duration}; -use zenoh_core::{Resolvable, Wait}; -use zenoh_keyexpr::OwnedKeyExpr; -use zenoh_protocol::core::{CongestionControl, ZenohId}; -use zenoh_result::ZResult; - -#[zenoh_macros::unstable] -use super::{builders::sample::SampleBuilderTrait, bytes::OptionZBytes, sample::SourceInfo}; /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index c83b4b6081..e2343811db 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -11,27 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{ - builders::sample::{QoSBuilderTrait, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}, - bytes::ZBytes, - encoding::Encoding, - handlers::{locked, DefaultHandler, IntoHandler}, - key_expr::KeyExpr, - publication::Priority, - sample::{Locality, QoSBuilder, Sample, SampleKind}, - selector::{Parameters, Selector}, - session::{SessionRef, Undeclarable}, - value::Value, - Id, -}; -use crate::net::primitives::Primitives; -use std::future::IntoFuture; use std::{ fmt, - future::Ready, + future::{IntoFuture, Ready}, ops::{Deref, DerefMut}, sync::Arc, }; + use uhlc::Timestamp; use zenoh_core::{Resolvable, Resolve, Wait}; use zenoh_protocol::{ @@ -40,7 +26,6 @@ use zenoh_protocol::{ zenoh::{self, reply::ReplyBody, Del, Put, ResponseBody}, }; use zenoh_result::ZResult; - #[zenoh_macros::unstable] use { super::{ @@ -50,6 +35,21 @@ use { zenoh_protocol::core::EntityGlobalId, }; +use super::{ + builders::sample::{QoSBuilderTrait, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}, + bytes::ZBytes, + encoding::Encoding, + handlers::{locked, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + publication::Priority, + sample::{Locality, QoSBuilder, Sample, SampleKind}, + selector::{Parameters, Selector}, + session::{SessionRef, Undeclarable}, + value::Value, + Id, +}; +use crate::net::primitives::Primitives; + pub(crate) struct QueryInner { /// The key expression of this Query. pub(crate) key_expr: KeyExpr<'static>, diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index ca2354db85..2551a2a0d9 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -13,18 +13,20 @@ // //! Sample primitives -use super::{ - builders::sample::QoSBuilderTrait, bytes::ZBytes, encoding::Encoding, key_expr::KeyExpr, - publication::Priority, value::Value, -}; +use std::{convert::TryFrom, fmt}; + #[cfg(feature = "unstable")] use serde::Serialize; -use std::{convert::TryFrom, fmt}; use zenoh_protocol::{ core::{CongestionControl, EntityGlobalId, Timestamp}, network::declare::ext::QoSType, }; +use super::{ + builders::sample::QoSBuilderTrait, bytes::ZBytes, encoding::Encoding, key_expr::KeyExpr, + publication::Priority, value::Value, +}; + pub type SourceSn = u64; /// The locality of samples to be received by subscribers or targeted by publishers. @@ -150,9 +152,10 @@ pub struct SourceInfo { #[test] #[cfg(feature = "unstable")] fn source_info_stack_size() { - use crate::api::sample::{SourceInfo, SourceSn}; use zenoh_protocol::core::ZenohId; + use crate::api::sample::{SourceInfo, SourceSn}; + assert_eq!(std::mem::size_of::(), 16); assert_eq!(std::mem::size_of::>(), 17); assert_eq!(std::mem::size_of::>(), 16); diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 8e7853a411..8963d37e30 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -11,17 +11,25 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; -use crate::net::runtime::{orchestrator::Loop, Runtime}; -use std::future::IntoFuture; -use std::time::Duration; -use std::{fmt, future::Ready, net::SocketAddr, ops::Deref}; +use std::{ + fmt, + future::{IntoFuture, Ready}, + net::SocketAddr, + ops::Deref, + time::Duration, +}; + use tokio::net::UdpSocket; use zenoh_core::{Resolvable, Wait}; use zenoh_protocol::{core::WhatAmIMatcher, scouting::Hello}; use zenoh_result::ZResult; use zenoh_task::TerminatableTask; +use crate::{ + api::handlers::{locked, Callback, DefaultHandler, IntoHandler}, + net::runtime::{orchestrator::Loop, Runtime}, +}; + /// A builder for initializing a [`Scout`]. /// /// # Examples diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 59e52edc62..2dc77dc967 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -13,13 +13,13 @@ // //! [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries -use super::{key_expr::KeyExpr, queryable::Query}; use std::{ collections::HashMap, convert::TryFrom, ops::{Deref, DerefMut}, str::FromStr, }; + use zenoh_protocol::core::{ key_expr::{keyexpr, OwnedKeyExpr}, Properties, @@ -29,6 +29,8 @@ use zenoh_result::ZResult; #[cfg(feature = "unstable")] use zenoh_util::time_range::TimeRange; +use super::{key_expr::KeyExpr, queryable::Query}; + /// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters /// with a few intendend uses: diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 703fca2e9d..2e718ecccb 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -11,37 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{ - admin, - builders::publication::{ - PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, SessionDeleteBuilder, - SessionPutBuilder, - }, - bytes::ZBytes, - encoding::Encoding, - handlers::{Callback, DefaultHandler}, - info::SessionInfo, - key_expr::{KeyExpr, KeyExprInner}, - publication::Priority, - query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, - queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, - sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, - selector::{Selector, TIME_RANGE_KEY}, - subscriber::{SubscriberBuilder, SubscriberState}, - value::Value, - Id, -}; -use crate::net::{ - primitives::Primitives, - routing::dispatcher::face::Face, - runtime::{Runtime, RuntimeBuilder}, -}; -use std::future::IntoFuture; use std::{ collections::HashMap, convert::{TryFrom, TryInto}, fmt, - future::Ready, + future::{IntoFuture, Ready}, ops::Deref, sync::{ atomic::{AtomicU16, Ordering}, @@ -49,6 +23,7 @@ use std::{ }, time::Duration, }; + use tracing::{error, trace, warn}; use uhlc::HLC; use zenoh_buffers::ZBuf; @@ -82,6 +57,26 @@ use zenoh_result::ZResult; use zenoh_shm::api::client_storage::SharedMemoryClientStorage; use zenoh_task::TaskController; +use super::{ + admin, + builders::publication::{ + PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, SessionDeleteBuilder, + SessionPutBuilder, + }, + bytes::ZBytes, + encoding::Encoding, + handlers::{Callback, DefaultHandler}, + info::SessionInfo, + key_expr::{KeyExpr, KeyExprInner}, + publication::Priority, + query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, + queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, + sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, + selector::{Selector, TIME_RANGE_KEY}, + subscriber::{SubscriberBuilder, SubscriberState}, + value::Value, + Id, +}; #[cfg(feature = "unstable")] use super::{ liveliness::{Liveliness, LivelinessTokenState}, @@ -90,6 +85,11 @@ use super::{ query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, sample::SourceInfo, }; +use crate::net::{ + primitives::Primitives, + routing::dispatcher::face::Face, + runtime::{Runtime, RuntimeBuilder}, +}; zconfigurable! { pub(crate) static ref API_DATA_RECEPTION_CHANNEL_SIZE: usize = 256; diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 0c4e21b547..ba345f5116 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -12,26 +12,26 @@ // ZettaScale Zenoh Team, // -use super::{ - handlers::{locked, Callback, DefaultHandler, IntoHandler}, - key_expr::KeyExpr, - sample::{Locality, Sample}, - session::{SessionRef, Undeclarable}, - Id, -}; -use std::future::IntoFuture; use std::{ fmt, - future::Ready, + future::{IntoFuture, Ready}, ops::{Deref, DerefMut}, sync::Arc, }; + use zenoh_core::{Resolvable, Wait}; +#[cfg(feature = "unstable")] +use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::{core::Reliability, network::declare::subscriber::ext::SubscriberInfo}; use zenoh_result::ZResult; -#[cfg(feature = "unstable")] -use zenoh_protocol::core::EntityGlobalId; +use super::{ + handlers::{locked, Callback, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + sample::{Locality, Sample}, + session::{SessionRef, Undeclarable}, + Id, +}; pub(crate) struct SubscriberState { pub(crate) id: Id, diff --git a/zenoh/src/api/time.rs b/zenoh/src/api/time.rs index 5d0d06765d..cbdabe3a7e 100644 --- a/zenoh/src/api/time.rs +++ b/zenoh/src/api/time.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::convert::TryFrom; + use zenoh_protocol::core::{Timestamp, TimestampId}; /// Generates a reception [`Timestamp`] with id=0x01. diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 71ab3a72e8..bb2bccb869 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -110,8 +110,7 @@ pub const FEATURES: &str = zenoh_util::concat_enabled_features!( ); // Expose some functions directly to root `zenoh::`` namespace for convenience -pub use crate::api::scouting::scout; -pub use crate::api::session::open; +pub use crate::api::{scouting::scout, session::open}; pub mod prelude; @@ -119,27 +118,24 @@ pub mod prelude; pub mod core { #[allow(deprecated)] pub use zenoh_core::AsyncResolve; - pub use zenoh_core::Resolvable; - pub use zenoh_core::Resolve; #[allow(deprecated)] pub use zenoh_core::SyncResolve; - pub use zenoh_core::Wait; + pub use zenoh_core::{Resolvable, Resolve, Wait}; /// A zenoh error. pub use zenoh_result::Error; /// A zenoh result. pub use zenoh_result::ZResult as Result; - pub use zenoh_util::core::zresult::ErrNo; - pub use zenoh_util::try_init_log_from_env; + pub use zenoh_util::{core::zresult::ErrNo, try_init_log_from_env}; } /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate /// reading and writing data. pub mod buffers { - pub use zenoh_buffers::buffer::SplitBuffer; - pub use zenoh_buffers::reader::HasReader; - pub use zenoh_buffers::reader::Reader; - pub use zenoh_buffers::ZBufReader; - pub use zenoh_buffers::{ZBuf, ZSlice, ZSliceBuffer}; + pub use zenoh_buffers::{ + buffer::SplitBuffer, + reader::{HasReader, Reader}, + ZBuf, ZBufReader, ZSlice, ZSliceBuffer, + }; } /// [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. @@ -174,18 +170,16 @@ pub mod buffers { /// as the [`keformat`] and [`kewrite`] macros will be able to tell you if you're attempting to set fields of the format that do not exist. pub mod key_expr { pub mod keyexpr_tree { - pub use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; pub use zenoh_keyexpr::keyexpr_tree::{ - support::NonWild, support::UnknownWildness, KeBoxTree, + impls::KeyedSetProvider, + support::{NonWild, UnknownWildness}, + IKeyExprTree, IKeyExprTreeMut, KeBoxTree, }; - pub use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; } - pub use crate::api::key_expr::KeyExpr; - pub use crate::api::key_expr::KeyExprUndeclaration; - pub use zenoh_keyexpr::keyexpr; - pub use zenoh_keyexpr::OwnedKeyExpr; - pub use zenoh_keyexpr::SetIntersectionLevel; + pub use zenoh_keyexpr::{keyexpr, OwnedKeyExpr, SetIntersectionLevel}; pub use zenoh_macros::{kedefine, keformat, kewrite}; + + pub use crate::api::key_expr::{KeyExpr, KeyExprUndeclaration}; // keyexpr format macro support pub mod format { pub use zenoh_keyexpr::format::*; @@ -197,20 +191,16 @@ pub mod key_expr { /// Zenoh [`Session`](crate::session::Session) and associated types pub mod session { - pub use crate::api::builders::publication::SessionDeleteBuilder; - pub use crate::api::builders::publication::SessionPutBuilder; #[zenoh_macros::unstable] #[doc(hidden)] pub use crate::api::session::init; - pub use crate::api::session::open; #[zenoh_macros::unstable] #[doc(hidden)] pub use crate::api::session::InitBuilder; - pub use crate::api::session::OpenBuilder; - pub use crate::api::session::Session; - pub use crate::api::session::SessionDeclarations; - pub use crate::api::session::SessionRef; - pub use crate::api::session::Undeclarable; + pub use crate::api::{ + builders::publication::{SessionDeleteBuilder, SessionPutBuilder}, + session::{open, OpenBuilder, Session, SessionDeclarations, SessionRef, Undeclarable}, + }; } /// Tools to access information about the current zenoh [`Session`](crate::Session). @@ -223,22 +213,17 @@ pub mod info { /// Sample primitives pub mod sample { - pub use crate::api::builders::sample::QoSBuilderTrait; - pub use crate::api::builders::sample::SampleBuilder; - pub use crate::api::builders::sample::SampleBuilderAny; - pub use crate::api::builders::sample::SampleBuilderDelete; - pub use crate::api::builders::sample::SampleBuilderPut; - pub use crate::api::builders::sample::SampleBuilderTrait; - pub use crate::api::builders::sample::TimestampBuilderTrait; - pub use crate::api::builders::sample::ValueBuilderTrait; #[zenoh_macros::unstable] pub use crate::api::sample::Locality; - pub use crate::api::sample::Sample; - pub use crate::api::sample::SampleFields; - pub use crate::api::sample::SampleKind; #[zenoh_macros::unstable] pub use crate::api::sample::SourceInfo; - pub use crate::api::sample::SourceSn; + pub use crate::api::{ + builders::sample::{ + QoSBuilderTrait, SampleBuilder, SampleBuilderAny, SampleBuilderDelete, + SampleBuilderPut, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, + }, + sample::{Sample, SampleFields, SampleKind, SourceSn}, + }; } /// Value primitives @@ -253,42 +238,32 @@ pub mod encoding { /// Payload primitives pub mod bytes { - pub use crate::api::bytes::Deserialize; - pub use crate::api::bytes::OptionZBytes; - pub use crate::api::bytes::Serialize; - pub use crate::api::bytes::StringOrBase64; - pub use crate::api::bytes::ZBytes; - pub use crate::api::bytes::ZBytesIterator; - pub use crate::api::bytes::ZBytesReader; - pub use crate::api::bytes::ZBytesWriter; - pub use crate::api::bytes::ZDeserializeError; - pub use crate::api::bytes::ZSerde; + pub use crate::api::bytes::{ + Deserialize, OptionZBytes, Serialize, StringOrBase64, ZBytes, ZBytesIterator, ZBytesReader, + ZBytesWriter, ZDeserializeError, ZSerde, + }; } /// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries pub mod selector { - pub use crate::api::selector::Parameters; - pub use crate::api::selector::Selector; - pub use crate::api::selector::TIME_RANGE_KEY; pub use zenoh_protocol::core::Properties; pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; + + pub use crate::api::selector::{Parameters, Selector, TIME_RANGE_KEY}; } /// Subscribing primitives pub mod subscriber { - pub use crate::api::subscriber::FlumeSubscriber; - pub use crate::api::subscriber::Subscriber; - pub use crate::api::subscriber::SubscriberBuilder; /// The kind of reliability. pub use zenoh_protocol::core::Reliability; + + pub use crate::api::subscriber::{FlumeSubscriber, Subscriber, SubscriberBuilder}; } /// Publishing primitives pub mod publication { - pub use crate::api::builders::publication::PublicationBuilderDelete; - pub use crate::api::builders::publication::PublicationBuilderPut; - pub use crate::api::builders::publication::PublisherBuilder; - pub use crate::api::builders::publication::PublisherDeleteBuilder; + pub use zenoh_protocol::core::CongestionControl; + #[zenoh_macros::unstable] pub use crate::api::publication::MatchingListener; #[zenoh_macros::unstable] @@ -297,88 +272,81 @@ pub mod publication { pub use crate::api::publication::MatchingListenerUndeclaration; #[zenoh_macros::unstable] pub use crate::api::publication::MatchingStatus; - pub use crate::api::publication::Priority; - pub use crate::api::publication::Publisher; #[zenoh_macros::unstable] pub use crate::api::publication::PublisherDeclarations; #[zenoh_macros::unstable] pub use crate::api::publication::PublisherRef; - pub use crate::api::publication::PublisherUndeclaration; - pub use zenoh_protocol::core::CongestionControl; + pub use crate::api::{ + builders::publication::{ + PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, + PublisherDeleteBuilder, + }, + publication::{Priority, Publisher, PublisherUndeclaration}, + }; } /// Query primitives pub mod query { - pub use crate::api::query::GetBuilder; - pub use crate::api::query::Reply; #[zenoh_macros::unstable] pub use crate::api::query::ReplyKeyExpr; #[zenoh_macros::unstable] pub use crate::api::query::REPLY_KEY_EXPR_ANY_SEL_PARAM; - pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; + pub use crate::api::query::{ + ConsolidationMode, GetBuilder, QueryConsolidation, QueryTarget, Reply, + }; } /// Queryable primitives pub mod queryable { - pub use crate::api::queryable::Query; - pub use crate::api::queryable::Queryable; - pub use crate::api::queryable::QueryableBuilder; - pub use crate::api::queryable::QueryableUndeclaration; - pub use crate::api::queryable::ReplyBuilder; - pub use crate::api::queryable::ReplyBuilderDelete; - pub use crate::api::queryable::ReplyBuilderPut; - pub use crate::api::queryable::ReplyErrBuilder; #[zenoh_macros::unstable] pub use crate::api::queryable::ReplySample; + pub use crate::api::queryable::{ + Query, Queryable, QueryableBuilder, QueryableUndeclaration, ReplyBuilder, + ReplyBuilderDelete, ReplyBuilderPut, ReplyErrBuilder, + }; } /// Callback handler trait pub mod handlers { - pub use crate::api::handlers::locked; - pub use crate::api::handlers::Callback; - pub use crate::api::handlers::CallbackDrop; - pub use crate::api::handlers::DefaultHandler; - pub use crate::api::handlers::FifoChannel; - pub use crate::api::handlers::IntoHandler; - pub use crate::api::handlers::RingChannel; - pub use crate::api::handlers::RingChannelHandler; + pub use crate::api::handlers::{ + locked, Callback, CallbackDrop, DefaultHandler, FifoChannel, IntoHandler, RingChannel, + RingChannelHandler, + }; } /// Scouting primitives pub mod scouting { - pub use crate::api::scouting::scout; - pub use crate::api::scouting::Scout; - pub use crate::api::scouting::ScoutBuilder; /// Constants and helpers for zenoh `whatami` flags. pub use zenoh_protocol::core::WhatAmI; /// A zenoh Hello message. pub use zenoh_protocol::scouting::Hello; + + pub use crate::api::scouting::{scout, Scout, ScoutBuilder}; } /// Liveliness primitives #[cfg(feature = "unstable")] pub mod liveliness { - pub use crate::api::liveliness::Liveliness; - pub use crate::api::liveliness::LivelinessGetBuilder; - pub use crate::api::liveliness::LivelinessSubscriberBuilder; - pub use crate::api::liveliness::LivelinessToken; - pub use crate::api::liveliness::LivelinessTokenBuilder; - pub use crate::api::liveliness::LivelinessTokenUndeclaration; + pub use crate::api::liveliness::{ + Liveliness, LivelinessGetBuilder, LivelinessSubscriberBuilder, LivelinessToken, + LivelinessTokenBuilder, LivelinessTokenUndeclaration, + }; } /// Timestamp support pub mod time { - pub use crate::api::time::new_reception_timestamp; pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; + + pub use crate::api::time::new_reception_timestamp; } /// Initialize a Session with an existing Runtime. /// This operation is used by the plugins to share the same Runtime as the router. #[doc(hidden)] pub mod runtime { - pub use crate::net::runtime::RuntimeBuilder; - pub use crate::net::runtime::{AdminSpace, Runtime}; pub use zenoh_runtime::ZRuntime; + + pub use crate::net::runtime::{AdminSpace, Runtime, RuntimeBuilder}; } /// Configuration to pass to [`open`](crate::session::open) and [`scout`](crate::scouting::scout) functions and associated constants @@ -393,42 +361,39 @@ pub mod config { #[doc(hidden)] #[cfg(all(feature = "unstable", feature = "plugins"))] pub mod plugins { - pub use crate::api::plugins::PluginsManager; - pub use crate::api::plugins::Response; - pub use crate::api::plugins::RunningPlugin; - pub use crate::api::plugins::PLUGIN_PREFIX; - pub use crate::api::plugins::{RunningPluginTrait, ZenohPlugin}; + pub use crate::api::plugins::{ + PluginsManager, Response, RunningPlugin, RunningPluginTrait, ZenohPlugin, PLUGIN_PREFIX, + }; } #[doc(hidden)] pub mod internal { - pub use zenoh_core::zasync_executor_init; - pub use zenoh_core::zerror; - pub use zenoh_core::zlock; - pub use zenoh_core::ztimeout; + pub use zenoh_core::{zasync_executor_init, zerror, zlock, ztimeout}; pub use zenoh_result::bail; pub use zenoh_sync::Condition; - pub use zenoh_task::TaskController; - pub use zenoh_task::TerminatableTask; - pub use zenoh_util::core::ResolveFuture; - pub use zenoh_util::LibLoader; - pub use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; + pub use zenoh_task::{TaskController, TerminatableTask}; + pub use zenoh_util::{ + core::ResolveFuture, zenoh_home, LibLoader, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR, + }; } #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub mod shm { - pub use zenoh_shm::api::client_storage::SharedMemoryClientStorage; - pub use zenoh_shm::api::provider::shared_memory_provider::{BlockOn, GarbageCollect}; - pub use zenoh_shm::api::provider::shared_memory_provider::{Deallocate, Defragment}; - pub use zenoh_shm::api::provider::types::AllocAlignment; - pub use zenoh_shm::api::provider::types::MemoryLayout; - pub use zenoh_shm::api::slice::zsliceshm::{zsliceshm, ZSliceShm}; - pub use zenoh_shm::api::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; pub use zenoh_shm::api::{ + client_storage::SharedMemoryClientStorage, protocol_implementations::posix::{ posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, protocol_id::POSIX_PROTOCOL_ID, }, - provider::shared_memory_provider::SharedMemoryProviderBuilder, + provider::{ + shared_memory_provider::{ + BlockOn, Deallocate, Defragment, GarbageCollect, SharedMemoryProviderBuilder, + }, + types::{AllocAlignment, MemoryLayout}, + }, + slice::{ + zsliceshm::{zsliceshm, ZSliceShm}, + zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, + }, }; } diff --git a/zenoh/src/net/codec/linkstate.rs b/zenoh/src/net/codec/linkstate.rs index 4954062a3d..a66163728c 100644 --- a/zenoh/src/net/codec/linkstate.rs +++ b/zenoh/src/net/codec/linkstate.rs @@ -11,12 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::Zenoh080Routing; -use crate::net::protocol::{ - linkstate, - linkstate::{LinkState, LinkStateList}, -}; use core::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -27,6 +23,12 @@ use zenoh_protocol::{ core::{Locator, WhatAmI, ZenohId}, }; +use super::Zenoh080Routing; +use crate::net::protocol::{ + linkstate, + linkstate::{LinkState, LinkStateList}, +}; + // LinkState impl WCodec<&LinkState, &mut W> for Zenoh080Routing where diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index e58e01a1b5..b400d1a254 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -11,18 +11,19 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{any::Any, sync::Arc}; + +use zenoh_link::Link; +use zenoh_protocol::network::{NetworkBody, NetworkMessage}; +use zenoh_result::ZResult; +use zenoh_transport::{unicast::TransportUnicast, TransportPeerEventHandler}; + use super::Primitives; use crate::net::routing::{ dispatcher::face::Face, interceptor::{InterceptorTrait, InterceptorsChain}, RoutingContext, }; -use std::{any::Any, sync::Arc}; -use zenoh_link::Link; -use zenoh_protocol::network::{NetworkBody, NetworkMessage}; -use zenoh_result::ZResult; -use zenoh_transport::unicast::TransportUnicast; -use zenoh_transport::TransportPeerEventHandler; pub struct DeMux { face: Face, diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index 8589fab518..df292b4315 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -11,19 +11,21 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{EPrimitives, Primitives}; -use crate::net::routing::{ - dispatcher::face::{Face, WeakFace}, - interceptor::{InterceptorTrait, InterceptorsChain}, - RoutingContext, -}; use std::sync::OnceLock; + use zenoh_protocol::network::{ interest::Interest, Declare, NetworkBody, NetworkMessage, Push, Request, Response, ResponseFinal, }; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; +use super::{EPrimitives, Primitives}; +use crate::net::routing::{ + dispatcher::face::{Face, WeakFace}, + interceptor::{InterceptorTrait, InterceptorsChain}, + RoutingContext, +}; + pub struct Mux { pub handler: TransportUnicast, pub(crate) face: OnceLock, diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 06d55de920..c5129f76e2 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -11,21 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::super::router::*; -use super::tables::TablesLock; -use super::{resource::*, tables}; -use crate::api::key_expr::KeyExpr; -use crate::net::primitives::{McastMux, Mux, Primitives}; -use crate::net::routing::interceptor::{InterceptorTrait, InterceptorsChain}; -use std::any::Any; -use std::collections::HashMap; -use std::fmt; -use std::sync::{Arc, Weak}; +use std::{ + any::Any, + collections::HashMap, + fmt, + sync::{Arc, Weak}, +}; + use tokio_util::sync::CancellationToken; -use zenoh_protocol::zenoh::RequestBody; use zenoh_protocol::{ core::{ExprId, WhatAmI, ZenohId}, network::{Mapping, Push, Request, RequestId, Response, ResponseFinal}, + zenoh::RequestBody, }; use zenoh_sync::get_mut_unchecked; use zenoh_task::TaskController; @@ -33,6 +30,15 @@ use zenoh_transport::multicast::TransportMulticast; #[cfg(feature = "stats")] use zenoh_transport::stats::TransportStats; +use super::{super::router::*, resource::*, tables, tables::TablesLock}; +use crate::{ + api::key_expr::KeyExpr, + net::{ + primitives::{McastMux, Mux, Primitives}, + routing::interceptor::{InterceptorTrait, InterceptorsChain}, + }, +}; + pub struct FaceState { pub(crate) id: usize, pub(crate) zid: ZenohId, diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index fe2274ed64..94c6f7b1a6 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -11,23 +11,26 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::face::FaceState; -use super::resource::{DataRoutes, Direction, Resource}; -use super::tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}; -use crate::net::routing::hat::HatTrait; -use std::collections::HashMap; -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; + use zenoh_core::zread; -use zenoh_protocol::core::key_expr::keyexpr; -use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ - core::{WhatAmI, WireExpr}, - network::{declare::ext, Push}, + core::{key_expr::keyexpr, WhatAmI, WireExpr}, + network::{ + declare::{ext, subscriber::ext::SubscriberInfo, SubscriberId}, + Push, + }, zenoh::PushBody, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face::FaceState, + resource::{DataRoutes, Direction, Resource}, + tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}, +}; +use crate::net::routing::hat::HatTrait; + pub(crate) fn declare_subscription( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index cd17f1339f..2bbc924e0b 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -11,16 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::face::FaceState; -use super::resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}; -use super::tables::NodeId; -use super::tables::{RoutingExpr, Tables, TablesLock}; -use crate::net::routing::hat::HatTrait; -use crate::net::routing::RoutingContext; +use std::{ + collections::HashMap, + sync::{Arc, Weak}, + time::Duration, +}; + use async_trait::async_trait; -use std::collections::HashMap; -use std::sync::{Arc, Weak}; -use std::time::Duration; use tokio_util::sync::CancellationToken; use zenoh_config::WhatAmI; use zenoh_protocol::{ @@ -35,6 +32,13 @@ use zenoh_protocol::{ use zenoh_sync::get_mut_unchecked; use zenoh_util::Timed; +use super::{ + face::FaceState, + resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}, + tables::{NodeId, RoutingExpr, Tables, TablesLock}, +}; +use crate::net::routing::{hat::HatTrait, RoutingContext}; + pub(crate) struct Query { src_face: Arc, src_qid: RequestId, diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index bc0aecb9bb..d8765e16ae 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -11,17 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::face::FaceState; -use super::tables::{Tables, TablesLock}; -use crate::net::routing::dispatcher::face::Face; -use crate::net::routing::RoutingContext; -use std::any::Any; -use std::collections::HashMap; -use std::convert::TryInto; -use std::hash::{Hash, Hasher}; -use std::sync::{Arc, Weak}; +use std::{ + any::Any, + collections::HashMap, + convert::TryInto, + hash::{Hash, Hasher}, + sync::{Arc, Weak}, +}; + use zenoh_config::WhatAmI; -use zenoh_protocol::network::RequestId; use zenoh_protocol::{ core::{key_expr::keyexpr, ExprId, WireExpr}, network::{ @@ -29,11 +27,17 @@ use zenoh_protocol::{ ext, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, }, - Mapping, + Mapping, RequestId, }, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face::FaceState, + tables::{Tables, TablesLock}, +}; +use crate::net::routing::{dispatcher::face::Face, RoutingContext}; + pub(crate) type NodeId = u16; pub(crate) type Direction = (Arc, WireExpr<'static>, NodeId); diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 72cee0b452..2853cc5a9f 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -11,27 +11,30 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::face::FaceState; -pub use super::pubsub::*; -pub use super::queries::*; -pub use super::resource::*; -use crate::net::routing::hat; -use crate::net::routing::hat::HatTrait; -use crate::net::routing::interceptor::interceptor_factories; -use crate::net::routing::interceptor::InterceptorFactory; -use std::any::Any; -use std::collections::HashMap; -use std::sync::{Arc, Weak}; -use std::sync::{Mutex, RwLock}; -use std::time::Duration; +use std::{ + any::Any, + collections::HashMap, + sync::{Arc, Mutex, RwLock, Weak}, + time::Duration, +}; + use uhlc::HLC; -use zenoh_config::unwrap_or_default; -use zenoh_config::Config; -use zenoh_protocol::core::{ExprId, WhatAmI, ZenohId}; -use zenoh_protocol::network::Mapping; +use zenoh_config::{unwrap_or_default, Config}; +use zenoh_protocol::{ + core::{ExprId, WhatAmI, ZenohId}, + network::Mapping, +}; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; +use super::face::FaceState; +pub use super::{pubsub::*, queries::*, resource::*}; +use crate::net::routing::{ + hat, + hat::HatTrait, + interceptor::{interceptor_factories, InterceptorFactory}, +}; + pub(crate) struct RoutingExpr<'a> { pub(crate) prefix: &'a Arc, pub(crate) suffix: &'a str, diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 6ca0af1e17..3b4e7c7103 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -17,14 +17,21 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use crate::{ - net::routing::{ - dispatcher::face::Face, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, - }, - net::runtime::Runtime, +use std::{ + any::Any, + collections::HashMap, + sync::{atomic::AtomicU32, Arc}, }; +use zenoh_config::WhatAmI; +use zenoh_protocol::network::{ + declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, + Oam, +}; +use zenoh_result::ZResult; +use zenoh_sync::get_mut_unchecked; +use zenoh_transport::unicast::TransportUnicast; + use self::{ pubsub::{pubsub_new_face, undeclare_client_subscription}, queries::{queries_new_face, undeclare_client_queryable}, @@ -36,19 +43,13 @@ use super::{ }, HatBaseTrait, HatTrait, }; -use std::{ - any::Any, - collections::HashMap, - sync::{atomic::AtomicU32, Arc}, -}; -use zenoh_config::WhatAmI; -use zenoh_protocol::network::declare::{ - queryable::ext::QueryableInfoType, QueryableId, SubscriberId, +use crate::net::{ + routing::{ + dispatcher::face::Face, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + }, + runtime::Runtime, }; -use zenoh_protocol::network::Oam; -use zenoh_result::ZResult; -use zenoh_sync::get_mut_unchecked; -use zenoh_transport::unicast::TransportUnicast; mod pubsub; mod queries; diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index dd35cf24c8..3334fbfb14 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -11,30 +11,33 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut, get_routes_entries}; -use super::{HatCode, HatFace}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::SubscriberId; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use zenoh_protocol::{ - core::{Reliability, WhatAmI}, + core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; +use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn propagate_simple_subscription_to( _tables: &mut Tables, diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 777198ed95..c915d788a9 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -11,33 +11,41 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut, get_routes_entries}; -use super::{HatCode, HatFace}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use ordered_float::OrderedFloat; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::atomic::Ordering; -use std::sync::Arc; use zenoh_buffers::ZBuf; -use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ - core::{WhatAmI, WireExpr}, + core::{ + key_expr::{ + include::{Includer, DEFAULT_INCLUDER}, + OwnedKeyExpr, + }, + WhatAmI, WireExpr, + }, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareQueryable, QueryableId, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; +use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + resource::{NodeId, Resource, SessionContext}, + tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, + }, + hat::{HatQueriesTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { this.complete = this.complete || info.complete; diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index beb2d6ef68..e76f53a0dd 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -17,36 +17,13 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use self::{ - network::Network, - pubsub::{pubsub_new_face, pubsub_remove_node, undeclare_client_subscription}, - queries::{queries_new_face, queries_remove_node, undeclare_client_queryable}, -}; -use super::{ - super::dispatcher::{ - face::FaceState, - tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, - }, - HatBaseTrait, HatTrait, -}; -use crate::{ - net::runtime::Runtime, - net::{ - codec::Zenoh080Routing, - protocol::linkstate::LinkStateList, - routing::{ - dispatcher::face::Face, - hat::TREES_COMPUTATION_DELAY_MS, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, - }, - }, -}; use std::{ any::Any, collections::{HashMap, HashSet}, sync::{atomic::AtomicU32, Arc}, time::Duration, }; + use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, @@ -61,6 +38,29 @@ use zenoh_sync::get_mut_unchecked; use zenoh_task::TerminatableTask; use zenoh_transport::unicast::TransportUnicast; +use self::{ + network::Network, + pubsub::{pubsub_new_face, pubsub_remove_node, undeclare_client_subscription}, + queries::{queries_new_face, queries_remove_node, undeclare_client_queryable}, +}; +use super::{ + super::dispatcher::{ + face::FaceState, + tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, + }, + HatBaseTrait, HatTrait, +}; +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + routing::{ + dispatcher::face::Face, + hat::TREES_COMPUTATION_DELAY_MS, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + }, + runtime::Runtime, +}; + mod network; mod pubsub; mod queries; diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index 9c8e0c8860..2a26b1f583 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -11,26 +11,34 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::net::codec::Zenoh080Routing; -use crate::net::protocol::linkstate::{LinkState, LinkStateList}; -use crate::net::routing::dispatcher::tables::NodeId; -use crate::net::runtime::Runtime; -use crate::net::runtime::WeakRuntime; -use petgraph::graph::NodeIndex; -use petgraph::visit::{VisitMap, Visitable}; -use rand::Rng; use std::convert::TryInto; + +use petgraph::{ + graph::NodeIndex, + visit::{VisitMap, Visitable}, +}; +use rand::Rng; use vec_map::VecMap; -use zenoh_buffers::writer::{DidntWrite, HasWriter}; -use zenoh_buffers::ZBuf; +use zenoh_buffers::{ + writer::{DidntWrite, HasWriter}, + ZBuf, +}; use zenoh_codec::WCodec; use zenoh_link::Locator; -use zenoh_protocol::common::ZExtBody; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; -use zenoh_protocol::network::oam::id::OAM_LINKSTATE; -use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; +use zenoh_protocol::{ + common::ZExtBody, + core::{WhatAmI, WhatAmIMatcher, ZenohId}, + network::{oam, oam::id::OAM_LINKSTATE, NetworkBody, NetworkMessage, Oam}, +}; use zenoh_transport::unicast::TransportUnicast; +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::{LinkState, LinkStateList}, + routing::dispatcher::tables::NodeId, + runtime::{Runtime, WeakRuntime}, +}; + #[derive(Clone)] struct Details { zid: bool, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 2c1cbb23e7..e5f7da81f7 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -11,33 +11,38 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, HatCode, HatContext, HatFace, HatTables}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::pubsub::*; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, + sync::{atomic::Ordering, Arc}, +}; + use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ - core::{Reliability, WhatAmI, ZenohId}, + core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI, ZenohId}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face_hat, face_hat_mut, get_peer, get_routes_entries, hat, hat_mut, network::Network, res_hat, + res_hat_mut, HatCode, HatContext, HatFace, HatTables, +}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + pubsub::*, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn send_sourced_subscription_to_net_childs( tables: &Tables, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index a227d845ba..bed683f717 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -11,36 +11,46 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, HatCode, HatContext, HatFace, HatTables}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::queries::*; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::atomic::Ordering; -use std::sync::Arc; use zenoh_buffers::ZBuf; -use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ - core::{WhatAmI, WireExpr, ZenohId}, + core::{ + key_expr::{ + include::{Includer, DEFAULT_INCLUDER}, + OwnedKeyExpr, + }, + WhatAmI, WireExpr, ZenohId, + }, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareQueryable, QueryableId, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face_hat, face_hat_mut, get_peer, get_routes_entries, hat, hat_mut, network::Network, res_hat, + res_hat_mut, HatCode, HatContext, HatFace, HatTables, +}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + queries::*, + resource::{NodeId, Resource, SessionContext}, + tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, + }, + hat::{HatQueriesTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { this.complete = this.complete || info.complete; diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index ee6557aac3..5eb812df71 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -17,15 +17,8 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use super::{ - dispatcher::{ - face::{Face, FaceState}, - tables::{NodeId, QueryTargetQablSet, Resource, Route, RoutingExpr, Tables, TablesLock}, - }, - router::RoutesIndexes, -}; -use crate::net::runtime::Runtime; use std::{any::Any, sync::Arc}; + use zenoh_buffers::ZBuf; use zenoh_config::{unwrap_or_default, Config, WhatAmI, ZenohId}; use zenoh_protocol::{ @@ -41,6 +34,15 @@ use zenoh_protocol::{ use zenoh_result::ZResult; use zenoh_transport::unicast::TransportUnicast; +use super::{ + dispatcher::{ + face::{Face, FaceState}, + tables::{NodeId, QueryTargetQablSet, Resource, Route, RoutingExpr, Tables, TablesLock}, + }, + router::RoutesIndexes, +}; +use crate::net::runtime::Runtime; + mod client; mod linkstate_peer; mod p2p_peer; diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index df04b396ab..57b76fc086 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -11,24 +11,30 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::net::codec::Zenoh080Routing; -use crate::net::protocol::linkstate::{LinkState, LinkStateList}; -use crate::net::runtime::Runtime; -use crate::net::runtime::WeakRuntime; +use std::convert::TryInto; + use petgraph::graph::NodeIndex; use rand::Rng; -use std::convert::TryInto; use vec_map::VecMap; -use zenoh_buffers::writer::{DidntWrite, HasWriter}; -use zenoh_buffers::ZBuf; +use zenoh_buffers::{ + writer::{DidntWrite, HasWriter}, + ZBuf, +}; use zenoh_codec::WCodec; use zenoh_link::Locator; -use zenoh_protocol::common::ZExtBody; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; -use zenoh_protocol::network::oam::id::OAM_LINKSTATE; -use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; +use zenoh_protocol::{ + common::ZExtBody, + core::{WhatAmI, WhatAmIMatcher, ZenohId}, + network::{oam, oam::id::OAM_LINKSTATE, NetworkBody, NetworkMessage, Oam}, +}; use zenoh_transport::unicast::TransportUnicast; +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::{LinkState, LinkStateList}, + runtime::{Runtime, WeakRuntime}, +}; + #[derive(Clone)] struct Details { zid: bool, diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index ba41e0f114..530c181335 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -17,17 +17,24 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use crate::{ - net::runtime::Runtime, - net::{ - codec::Zenoh080Routing, - protocol::linkstate::LinkStateList, - routing::{ - dispatcher::face::Face, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, - }, +use std::{ + any::Any, + collections::HashMap, + sync::{atomic::AtomicU32, Arc}, +}; + +use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; +use zenoh_protocol::{ + common::ZExtBody, + network::{ + declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, + oam::id::OAM_LINKSTATE, + Oam, }, }; +use zenoh_result::ZResult; +use zenoh_sync::get_mut_unchecked; +use zenoh_transport::unicast::TransportUnicast; use self::{ gossip::Network, @@ -41,23 +48,15 @@ use super::{ }, HatBaseTrait, HatTrait, }; -use std::{ - any::Any, - collections::HashMap, - sync::{atomic::AtomicU32, Arc}, -}; -use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; -use zenoh_protocol::network::{ - declare::{QueryableId, SubscriberId}, - Oam, -}; -use zenoh_protocol::{ - common::ZExtBody, - network::{declare::queryable::ext::QueryableInfoType, oam::id::OAM_LINKSTATE}, +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + routing::{ + dispatcher::face::Face, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + }, + runtime::Runtime, }; -use zenoh_result::ZResult; -use zenoh_sync::get_mut_unchecked; -use zenoh_transport::unicast::TransportUnicast; mod gossip; mod pubsub; diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index d57c2ac665..e7cf0c5e5d 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -11,30 +11,33 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut, get_routes_entries}; -use super::{HatCode, HatFace}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::SubscriberId; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use zenoh_protocol::{ - core::{Reliability, WhatAmI}, + core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; +use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn propagate_simple_subscription_to( _tables: &mut Tables, diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 25fed11842..f0de12d7b9 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -11,33 +11,41 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut, get_routes_entries}; -use super::{HatCode, HatFace}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use ordered_float::OrderedFloat; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::atomic::Ordering; -use std::sync::Arc; use zenoh_buffers::ZBuf; -use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ - core::{WhatAmI, WireExpr}, + core::{ + key_expr::{ + include::{Includer, DEFAULT_INCLUDER}, + OwnedKeyExpr, + }, + WhatAmI, WireExpr, + }, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareQueryable, QueryableId, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; +use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + resource::{NodeId, Resource, SessionContext}, + tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, + }, + hat::{HatQueriesTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { this.complete = this.complete || info.complete; diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 85ce0e6916..f573acee43 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -17,34 +17,6 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use self::{ - network::{shared_nodes, Network}, - pubsub::{ - pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription, - }, - queries::{ - queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable, - }, -}; -use super::{ - super::dispatcher::{ - face::FaceState, - tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, - }, - HatBaseTrait, HatTrait, -}; -use crate::{ - net::runtime::Runtime, - net::{ - codec::Zenoh080Routing, - protocol::linkstate::LinkStateList, - routing::{ - dispatcher::face::Face, - hat::TREES_COMPUTATION_DELAY_MS, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, - }, - }, -}; use std::{ any::Any, collections::{hash_map::DefaultHasher, HashMap, HashSet}, @@ -52,6 +24,7 @@ use std::{ sync::{atomic::AtomicU32, Arc}, time::Duration, }; + use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, @@ -66,6 +39,33 @@ use zenoh_sync::get_mut_unchecked; use zenoh_task::TerminatableTask; use zenoh_transport::unicast::TransportUnicast; +use self::{ + network::{shared_nodes, Network}, + pubsub::{ + pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription, + }, + queries::{ + queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable, + }, +}; +use super::{ + super::dispatcher::{ + face::FaceState, + tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, + }, + HatBaseTrait, HatTrait, +}; +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + routing::{ + dispatcher::face::Face, + hat::TREES_COMPUTATION_DELAY_MS, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + }, + runtime::Runtime, +}; + mod network; mod pubsub; mod queries; diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs index 3ff59b5ede..ae435a6871 100644 --- a/zenoh/src/net/routing/hat/router/network.rs +++ b/zenoh/src/net/routing/hat/router/network.rs @@ -11,25 +11,34 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::net::codec::Zenoh080Routing; -use crate::net::protocol::linkstate::{LinkState, LinkStateList}; -use crate::net::routing::dispatcher::tables::NodeId; -use crate::net::runtime::Runtime; -use petgraph::graph::NodeIndex; -use petgraph::visit::{IntoNodeReferences, VisitMap, Visitable}; -use rand::Rng; use std::convert::TryInto; + +use petgraph::{ + graph::NodeIndex, + visit::{IntoNodeReferences, VisitMap, Visitable}, +}; +use rand::Rng; use vec_map::VecMap; -use zenoh_buffers::writer::{DidntWrite, HasWriter}; -use zenoh_buffers::ZBuf; +use zenoh_buffers::{ + writer::{DidntWrite, HasWriter}, + ZBuf, +}; use zenoh_codec::WCodec; use zenoh_link::Locator; -use zenoh_protocol::common::ZExtBody; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; -use zenoh_protocol::network::oam::id::OAM_LINKSTATE; -use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; +use zenoh_protocol::{ + common::ZExtBody, + core::{WhatAmI, WhatAmIMatcher, ZenohId}, + network::{oam, oam::id::OAM_LINKSTATE, NetworkBody, NetworkMessage, Oam}, +}; use zenoh_transport::unicast::TransportUnicast; +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::{LinkState, LinkStateList}, + routing::dispatcher::tables::NodeId, + runtime::Runtime, +}; + #[derive(Clone)] struct Details { zid: bool, diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 99b7eb3c12..14726ac970 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -11,33 +11,38 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::pubsub::*; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, + sync::{atomic::Ordering, Arc}, +}; + use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ - core::{Reliability, WhatAmI, ZenohId}, + core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI, ZenohId}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face_hat, face_hat_mut, get_peer, get_router, get_routes_entries, hat, hat_mut, + network::Network, res_hat, res_hat_mut, HatCode, HatContext, HatFace, HatTables, +}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + pubsub::*, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn send_sourced_subscription_to_net_childs( tables: &Tables, diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index dbd7da8629..9defb80081 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -11,36 +11,46 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::queries::*; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::atomic::Ordering; -use std::sync::Arc; use zenoh_buffers::ZBuf; -use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ - core::{WhatAmI, WireExpr, ZenohId}, + core::{ + key_expr::{ + include::{Includer, DEFAULT_INCLUDER}, + OwnedKeyExpr, + }, + WhatAmI, WireExpr, ZenohId, + }, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareQueryable, QueryableId, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face_hat, face_hat_mut, get_peer, get_router, get_routes_entries, hat, hat_mut, + network::Network, res_hat, res_hat_mut, HatCode, HatContext, HatFace, HatTables, +}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + queries::*, + resource::{NodeId, Resource, SessionContext}, + tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, + }, + hat::{HatQueriesTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { this.complete = this.complete || info.complete; diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 102e30a0df..5f579bf409 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -18,14 +18,8 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use super::{ - authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, - InterceptorFactoryTrait, InterceptorTrait, -}; -use crate::api::key_expr::KeyExpr; -use crate::net::routing::RoutingContext; -use std::any::Any; -use std::sync::Arc; +use std::{any::Any, sync::Arc}; + use zenoh_config::{AclConfig, Action, InterceptorFlow, Permission, Subject, ZenohId}; use zenoh_protocol::{ network::{Declare, DeclareBody, NetworkBody, NetworkMessage, Push, Request}, @@ -33,6 +27,12 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; + +use super::{ + authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, + InterceptorFactoryTrait, InterceptorTrait, +}; +use crate::{api::key_expr::KeyExpr, net::routing::RoutingContext}; pub struct AclEnforcer { enforcer: Arc, } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 61c1cba217..f1cdb1ca4e 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -17,13 +17,16 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use ahash::RandomState; use std::collections::HashMap; + +use ahash::RandomState; use zenoh_config::{ AclConfig, AclConfigRules, Action, InterceptorFlow, Permission, PolicyRule, Subject, }; -use zenoh_keyexpr::keyexpr; -use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; +use zenoh_keyexpr::{ + keyexpr, + keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}, +}; use zenoh_result::ZResult; type PolicyForSubject = FlowPolicy; diff --git a/zenoh/src/net/routing/interceptor/downsampling.rs b/zenoh/src/net/routing/interceptor/downsampling.rs index cda132e806..06e86ec3ce 100644 --- a/zenoh/src/net/routing/interceptor/downsampling.rs +++ b/zenoh/src/net/routing/interceptor/downsampling.rs @@ -18,17 +18,21 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use crate::net::routing::interceptor::*; -use std::collections::HashMap; -use std::sync::{Arc, Mutex}; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; + use zenoh_config::{DownsamplingItemConf, DownsamplingRuleConf, InterceptorFlow}; use zenoh_core::zlock; -use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; -use zenoh_keyexpr::keyexpr_tree::{support::UnknownWildness, KeBoxTree}; -use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; +use zenoh_keyexpr::keyexpr_tree::{ + impls::KeyedSetProvider, support::UnknownWildness, IKeyExprTree, IKeyExprTreeMut, KeBoxTree, +}; use zenoh_protocol::network::NetworkBody; use zenoh_result::ZResult; +use crate::net::routing::interceptor::*; + pub(crate) fn downsampling_interceptor_factories( config: &Vec, ) -> ZResult> { diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 6d9391ce15..3be30e9205 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -22,8 +22,6 @@ mod access_control; use access_control::acl_interceptor_factories; mod authorization; -use super::RoutingContext; -use crate::api::key_expr::KeyExpr; use std::any::Any; use zenoh_config::Config; @@ -31,6 +29,9 @@ use zenoh_protocol::network::NetworkMessage; use zenoh_result::ZResult; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; +use super::RoutingContext; +use crate::api::key_expr::KeyExpr; + pub mod downsampling; use crate::net::routing::interceptor::downsampling::downsampling_interceptor_factories; diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index 75b4d4ef6a..9601465326 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -24,11 +24,12 @@ pub mod router; use std::{cell::OnceCell, sync::Arc}; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; +use zenoh_protocol::{ + core::{key_expr::OwnedKeyExpr, WireExpr}, + network::NetworkMessage, +}; use self::{dispatcher::face::Face, router::Resource}; - use super::runtime; pub(crate) static PREFIX_LIVELINESS: &str = "@/liveliness"; @@ -100,8 +101,7 @@ impl RoutingContext { impl RoutingContext { #[inline] pub(crate) fn wire_expr(&self) -> Option<&WireExpr> { - use zenoh_protocol::network::DeclareBody; - use zenoh_protocol::network::NetworkBody; + use zenoh_protocol::network::{DeclareBody, NetworkBody}; match &self.msg.body { NetworkBody::Push(m) => Some(&m.wire_expr), NetworkBody::Request(m) => Some(&m.wire_expr), diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 87766f021b..630253e1c6 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -11,33 +11,32 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::dispatcher::face::{Face, FaceState}; -pub use super::dispatcher::pubsub::*; -pub use super::dispatcher::queries::*; -pub use super::dispatcher::resource::*; -use super::dispatcher::tables::Tables; -use super::dispatcher::tables::TablesLock; -use super::hat; -use super::interceptor::EgressInterceptor; -use super::interceptor::InterceptorsChain; -use super::runtime::Runtime; -use crate::net::primitives::DeMux; -use crate::net::primitives::DummyPrimitives; -use crate::net::primitives::EPrimitives; -use crate::net::primitives::McastMux; -use crate::net::primitives::Mux; -use crate::net::routing::interceptor::IngressInterceptor; -use std::str::FromStr; -use std::sync::Arc; -use std::sync::{Mutex, RwLock}; +use std::{ + str::FromStr, + sync::{Arc, Mutex, RwLock}, +}; + use uhlc::HLC; use zenoh_config::Config; use zenoh_protocol::core::{WhatAmI, ZenohId}; -use zenoh_transport::multicast::TransportMulticast; -use zenoh_transport::unicast::TransportUnicast; -use zenoh_transport::TransportPeer; // use zenoh_collections::Timer; use zenoh_result::ZResult; +use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast, TransportPeer}; + +pub use super::dispatcher::{pubsub::*, queries::*, resource::*}; +use super::{ + dispatcher::{ + face::{Face, FaceState}, + tables::{Tables, TablesLock}, + }, + hat, + interceptor::{EgressInterceptor, InterceptorsChain}, + runtime::Runtime, +}; +use crate::net::{ + primitives::{DeMux, DummyPrimitives, EPrimitives, McastMux, Mux}, + routing::interceptor::IngressInterceptor, +}; pub struct Router { // whatami: WhatAmI, diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 3f2e0b488f..8b53692ead 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -10,24 +10,13 @@ // // Contributors: // ZettaScale Zenoh Team, -use super::routing::dispatcher::face::Face; -use super::Runtime; -use crate::api::builders::sample::ValueBuilderTrait; -use crate::api::bytes::ZBytes; -use crate::api::key_expr::KeyExpr; -#[cfg(all(feature = "unstable", feature = "plugins"))] -use crate::api::plugins::PluginsManager; -use crate::api::queryable::Query; -use crate::api::queryable::QueryInner; -use crate::api::value::Value; -use crate::encoding::Encoding; -use crate::net::primitives::Primitives; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, + sync::{Arc, Mutex}, +}; + use serde_json::json; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::convert::TryInto; -use std::sync::Arc; -use std::sync::Mutex; use tracing::{error, trace}; use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{unwrap_or_default, ConfigValidator, ValidatedMap, WhatAmI}; @@ -36,20 +25,35 @@ use zenoh_core::Wait; use zenoh_plugin_trait::{PluginControl, PluginStatus}; #[cfg(all(feature = "unstable", feature = "plugins"))] use zenoh_protocol::core::key_expr::keyexpr; -use zenoh_protocol::network::declare::QueryableId; -use zenoh_protocol::network::Interest; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, ExprId, WireExpr, ZenohId, EMPTY_EXPR_ID}, network::{ - declare::{queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo}, - ext, Declare, DeclareBody, DeclareQueryable, DeclareSubscriber, Push, Request, Response, - ResponseFinal, + declare::{ + queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, QueryableId, + }, + ext, Declare, DeclareBody, DeclareQueryable, DeclareSubscriber, Interest, Push, Request, + Response, ResponseFinal, }, zenoh::{PushBody, RequestBody}, }; use zenoh_result::ZResult; use zenoh_transport::unicast::TransportUnicast; +use super::{routing::dispatcher::face::Face, Runtime}; +#[cfg(all(feature = "unstable", feature = "plugins"))] +use crate::api::plugins::PluginsManager; +use crate::{ + api::{ + builders::sample::ValueBuilderTrait, + bytes::ZBytes, + key_expr::KeyExpr, + queryable::{Query, QueryInner}, + value::Value, + }, + encoding::Encoding, + net::primitives::Primitives, +}; + pub struct AdminContext { runtime: Runtime, version: String, diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index f1cf4d95d2..f4eb0289ca 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -20,31 +20,28 @@ mod adminspace; pub mod orchestrator; -use super::primitives::DeMux; -use super::routing; -use super::routing::router::Router; -#[cfg(all(feature = "unstable", feature = "plugins"))] -use crate::api::loader::{load_plugins, start_plugins}; -#[cfg(all(feature = "unstable", feature = "plugins"))] -use crate::api::plugins::PluginsManager; -use crate::config::{unwrap_or_default, Config, ModeDependent, Notifier}; -use crate::{GIT_VERSION, LONG_VERSION}; -pub use adminspace::AdminSpace; -use futures::stream::StreamExt; -use futures::Future; -use std::any::Any; -use std::sync::atomic::{AtomicU32, Ordering}; -use std::sync::{Arc, Weak}; #[cfg(all(feature = "unstable", feature = "plugins"))] use std::sync::{Mutex, MutexGuard}; -use std::time::Duration; +use std::{ + any::Any, + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, Weak, + }, + time::Duration, +}; + +pub use adminspace::AdminSpace; +use futures::{stream::StreamExt, Future}; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use uhlc::{HLCBuilder, HLC}; use zenoh_link::{EndPoint, Link}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; -use zenoh_protocol::core::{Locator, WhatAmI, ZenohId}; -use zenoh_protocol::network::NetworkMessage; +use zenoh_protocol::{ + core::{Locator, WhatAmI, ZenohId}, + network::NetworkMessage, +}; use zenoh_result::{bail, ZResult}; #[cfg(all(feature = "unstable", feature = "shared-memory"))] use zenoh_shm::api::client_storage::SharedMemoryClientStorage; @@ -57,6 +54,16 @@ use zenoh_transport::{ TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; +use super::{primitives::DeMux, routing, routing::router::Router}; +#[cfg(all(feature = "unstable", feature = "plugins"))] +use crate::api::loader::{load_plugins, start_plugins}; +#[cfg(all(feature = "unstable", feature = "plugins"))] +use crate::api::plugins::PluginsManager; +use crate::{ + config::{unwrap_or_default, Config, ModeDependent, Notifier}, + GIT_VERSION, LONG_VERSION, +}; + pub(crate) struct RuntimeState { zid: ZenohId, whatami: WhatAmI, diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index c2c7ecedd2..610f189b58 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -11,14 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{Runtime, RuntimeSession}; +use std::{ + net::{IpAddr, Ipv6Addr, SocketAddr}, + time::Duration, +}; + use futures::prelude::*; use socket2::{Domain, Socket, Type}; -use std::net::{IpAddr, Ipv6Addr, SocketAddr}; -use std::time::Duration; use tokio::net::UdpSocket; -use zenoh_buffers::reader::DidntRead; -use zenoh_buffers::{reader::HasReader, writer::HasWriter}; +use zenoh_buffers::{ + reader::{DidntRead, HasReader}, + writer::HasWriter, +}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_config::{ get_global_connect_timeout, get_global_listener_timeout, unwrap_or_default, ModeDependent, @@ -30,6 +34,8 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, zerror, ZResult}; +use super::{Runtime, RuntimeSession}; + const RCV_BUF_SIZE: usize = u16::MAX as usize; const SCOUT_INITIAL_PERIOD: Duration = Duration::from_millis(1_000); const SCOUT_MAX_PERIOD: Duration = Duration::from_millis(8_000); diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 841bc209f6..5f04b73d53 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -11,23 +11,33 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::net::primitives::{DummyPrimitives, EPrimitives, Primitives}; -use crate::net::routing::dispatcher::tables::{self, Tables}; -use crate::net::routing::router::*; -use crate::net::routing::RoutingContext; -use std::convert::{TryFrom, TryInto}; -use std::sync::Arc; +use std::{ + convert::{TryFrom, TryInto}, + sync::Arc, +}; + use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_config::Config; use zenoh_core::zlock; -use zenoh_protocol::core::Encoding; -use zenoh_protocol::core::{ - key_expr::keyexpr, ExprId, Reliability, WhatAmI, WireExpr, ZenohId, EMPTY_EXPR_ID, +use zenoh_protocol::{ + core::{ + key_expr::keyexpr, Encoding, ExprId, Reliability, WhatAmI, WireExpr, ZenohId, EMPTY_EXPR_ID, + }, + network::{ + declare::subscriber::ext::SubscriberInfo, ext, Declare, DeclareBody, DeclareKeyExpr, + }, + zenoh::{PushBody, Put}, +}; + +use crate::net::{ + primitives::{DummyPrimitives, EPrimitives, Primitives}, + routing::{ + dispatcher::tables::{self, Tables}, + router::*, + RoutingContext, + }, }; -use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr}; -use zenoh_protocol::zenoh::{PushBody, Put}; #[test] fn base_test() { diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index ac466ae50b..54418d9f78 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -26,71 +26,59 @@ // Reexport API in flat namespace pub(crate) mod flat { - pub use crate::buffers::*; - pub use crate::bytes::*; - pub use crate::config::*; - pub use crate::core::{Error as ZError, Resolvable, Resolve, Result as ZResult}; - pub use crate::encoding::*; - pub use crate::handlers::*; - pub use crate::key_expr::*; - pub use crate::publication::*; - pub use crate::query::*; - pub use crate::queryable::*; - pub use crate::sample::*; - pub use crate::scouting::*; - pub use crate::selector::*; - pub use crate::session::*; #[cfg(feature = "shared-memory")] pub use crate::shm::*; - pub use crate::subscriber::*; - pub use crate::time::*; - pub use crate::value::*; + pub use crate::{ + buffers::*, + bytes::*, + config::*, + core::{Error as ZError, Resolvable, Resolve, Result as ZResult}, + encoding::*, + handlers::*, + key_expr::*, + publication::*, + query::*, + queryable::*, + sample::*, + scouting::*, + selector::*, + session::*, + subscriber::*, + time::*, + value::*, + }; } // Reexport API in hierarchical namespace pub(crate) mod mods { - pub use crate::buffers; - pub use crate::bytes; - pub use crate::config; - pub use crate::core; - pub use crate::encoding; - pub use crate::handlers; - pub use crate::key_expr; - pub use crate::publication; - pub use crate::query; - pub use crate::queryable; - pub use crate::sample; - pub use crate::scouting; - pub use crate::selector; - pub use crate::session; #[cfg(feature = "shared-memory")] pub use crate::shm; - pub use crate::subscriber; - pub use crate::time; - pub use crate::value; + pub use crate::{ + buffers, bytes, config, core, encoding, handlers, key_expr, publication, query, queryable, + sample, scouting, selector, session, subscriber, time, value, + }; } +pub use flat::*; +pub use mods::*; + #[allow(deprecated)] pub use crate::core::AsyncResolve; #[allow(deprecated)] pub use crate::core::SyncResolve; pub use crate::core::Wait; -pub use flat::*; -pub use mods::*; /// Prelude to import when using Zenoh's sync API. #[deprecated = "use `zenoh::prelude` instead"] pub mod sync { - pub use super::flat::*; - pub use super::mods::*; + pub use super::{flat::*, mods::*}; #[allow(deprecated)] pub use crate::core::SyncResolve; } /// Prelude to import when using Zenoh's async API. #[deprecated = "use `zenoh::prelude` instead"] pub mod r#async { - pub use super::flat::*; - pub use super::mods::*; + pub use super::{flat::*, mods::*}; #[allow(deprecated)] pub use crate::core::AsyncResolve; } diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 5f3c482581..1889a9f9fa 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -13,8 +13,11 @@ // #![cfg(target_family = "unix")] mod test { - use std::sync::{Arc, Mutex}; - use std::time::Duration; + use std::{ + sync::{Arc, Mutex}, + time::Duration, + }; + use tokio::runtime::Handle; use zenoh::prelude::*; use zenoh_core::{zlock, ztimeout}; diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 836845a645..a63137ccfc 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,8 +14,7 @@ #[cfg(feature = "unstable")] #[test] fn attachment_pubsub() { - use zenoh::bytes::ZBytes; - use zenoh::prelude::*; + use zenoh::{bytes::ZBytes, prelude::*}; let zenoh = zenoh::open(Config::default()).wait().unwrap(); let _sub = zenoh .declare_subscriber("test/attachment") diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 99ca6055da..41a681dc8f 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::internal::ztimeout; -use zenoh::prelude::*; + +use zenoh::{internal::ztimeout, prelude::*}; const TIMEOUT: Duration = Duration::from_secs(10); diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index 0862f9ee89..55f9368a87 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::{thread, time::Duration}; + use zenoh::prelude::*; #[test] diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 37f193630d..4113aa462d 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use std::sync::{Arc, Mutex}; -use zenoh::internal::zlock; -use zenoh::prelude::*; + +use zenoh::{internal::zlock, prelude::*}; #[cfg(target_os = "windows")] static MINIMAL_SLEEP_INTERVAL_MS: u64 = 17; diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 0456361419..b974b5d705 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -14,7 +14,9 @@ #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_liveliness() { - use {std::time::Duration, zenoh::internal::ztimeout, zenoh::prelude::*}; + use std::time::Duration; + + use zenoh::{internal::ztimeout, prelude::*}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 6f44b2d0be..3a75cc9f37 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::internal::ztimeout; -use zenoh::prelude::*; + +use zenoh::{internal::ztimeout, prelude::*}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 3c9f2723a6..fac785d7c0 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -11,14 +11,21 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::str::FromStr; -use std::sync::atomic::Ordering; -use std::sync::{atomic::AtomicUsize, Arc}; -use std::time::Duration; +use std::{ + str::FromStr, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + use tokio_util::{sync::CancellationToken, task::TaskTracker}; -use zenoh::core::Result; -use zenoh::internal::{bail, ztimeout}; -use zenoh::prelude::*; +use zenoh::{ + core::Result, + internal::{bail, ztimeout}, + prelude::*, +}; const TIMEOUT: Duration = Duration::from_secs(10); const MSG_COUNT: usize = 50; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 43dfc79470..4d0205f5be 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -11,13 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::Duration; -use zenoh::internal::ztimeout; -use zenoh::prelude::*; +use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + #[cfg(feature = "unstable")] use zenoh::runtime::{Runtime, RuntimeBuilder}; +use zenoh::{internal::ztimeout, prelude::*}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index ec77890c1e..14f6985414 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -13,11 +13,15 @@ // #[cfg(all(feature = "unstable", feature = "shared-memory"))] mod tests { - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::Arc; - use std::time::Duration; - use zenoh::internal::ztimeout; - use zenoh::prelude::*; + use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, + }; + + use zenoh::{internal::ztimeout, prelude::*}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index c5be555a00..b62a842b28 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -11,12 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::Duration; +use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + use tokio::runtime::Handle; -use zenoh::internal::ztimeout; -use zenoh::prelude::*; +use zenoh::{internal::ztimeout, prelude::*}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index cabee33333..229352e5db 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -14,16 +14,14 @@ use clap::Parser; use futures::future; use git_version::git_version; -use tracing_subscriber::layer::SubscriberExt; -use tracing_subscriber::util::SubscriberInitExt; -use tracing_subscriber::EnvFilter; -use zenoh::config::EndPoint; -use zenoh::config::{Config, ModeDependentValue, PermissionsConf, ValidatedMap}; -use zenoh::core::Result; -use zenoh::scouting::WhatAmI; - +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; #[cfg(feature = "loki")] use url::Url; +use zenoh::{ + config::{Config, EndPoint, ModeDependentValue, PermissionsConf, ValidatedMap}, + core::Result, + scouting::WhatAmI, +}; #[cfg(feature = "loki")] const LOKI_ENDPOINT_VAR: &str = "LOKI_ENDPOINT"; From 5a841eef5dcfb8dc62d3ae9b96bafb6ab1661858 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 29 Apr 2024 17:29:02 +0200 Subject: [PATCH 317/598] chore: add pre-commit hook config --- .pre-commit-config.yaml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..86dc1703ed --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,21 @@ +# +# Copyright (c) 2024 ZettaScale Technology +# +# This program and the accompanying materials are made available under the +# terms of the Eclipse Public License 2.0 which is available at +# http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +# which is available at https://www.apache.org/licenses/LICENSE-2.0. +# +# SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +# +# Contributors: +# ZettaScale Zenoh Team, +# +repos: + - repo: local + hooks: + - id: fmt + name: fmt + entry: cargo fmt -- --config "unstable_features=true,imports_granularity=Crate,group_imports=StdExternalCrate" + language: system + types: [rust] From 42bee876f968a144016e71bfb9fbce02069e88b3 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Thu, 2 May 2024 10:44:59 +0200 Subject: [PATCH 318/598] fix: fix missing formatting --- zenoh/src/lib.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index bb2bccb869..58e17fc2ea 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -205,10 +205,9 @@ pub mod session { /// Tools to access information about the current zenoh [`Session`](crate::Session). pub mod info { - pub use crate::api::info::PeersZenohIdBuilder; - pub use crate::api::info::RoutersZenohIdBuilder; - pub use crate::api::info::SessionInfo; - pub use crate::api::info::ZenohIdBuilder; + pub use crate::api::info::{ + PeersZenohIdBuilder, RoutersZenohIdBuilder, SessionInfo, ZenohIdBuilder, + }; } /// Sample primitives From c4dfd101701527e9e1441f59f867aa49e529cc6e Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Thu, 2 May 2024 12:06:34 +0300 Subject: [PATCH 319/598] Polish SHM examples --- examples/Cargo.toml | 5 +++ examples/examples/z_alloc_shm.rs | 32 +++++------------ examples/examples/z_bytes_shm.rs | 17 ++++++--- examples/examples/z_get_shm.rs | 44 ++++++++--------------- examples/examples/z_ping_shm.rs | 37 +++++++------------ examples/examples/z_posix_shm_provider.rs | 44 +++++++++++++++++++++++ examples/examples/z_pub_shm.rs | 43 +++++++--------------- examples/examples/z_pub_shm_thr.rs | 37 +++++++------------ examples/examples/z_queryable_shm.rs | 44 ++++++++--------------- zenoh/src/api/bytes.rs | 2 +- 10 files changed, 140 insertions(+), 165 deletions(-) create mode 100644 examples/examples/z_posix_shm_provider.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 263653028a..90281ae558 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -172,3 +172,8 @@ required-features = ["unstable", "shared-memory"] name = "z_bytes_shm" path = "examples/z_bytes_shm.rs" required-features = ["unstable", "shared-memory"] + +[[example]] +name = "z_posix_shm_provider" +path = "examples/z_posix_shm_provider.rs" +required-features = ["unstable", "shared-memory"] diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index acff39379c..a01de8d2fa 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -21,29 +21,15 @@ async fn main() { } async fn run() -> ZResult<()> { - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(65536, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(65536) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); diff --git a/examples/examples/z_bytes_shm.rs b/examples/examples/z_bytes_shm.rs index d9ab4e1f82..5c582e56e6 100644 --- a/examples/examples/z_bytes_shm.rs +++ b/examples/examples/z_bytes_shm.rs @@ -21,6 +21,7 @@ use zenoh::{ fn main() { // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs let backend = PosixSharedMemoryProviderBackend::builder() .with_size(4096) .unwrap() @@ -32,11 +33,17 @@ fn main() { .backend(backend) .res(); - // Prepare a layout for allocations - let layout = provider.alloc_layout().size(1024).res().unwrap(); - - // allocate an SHM buffer (ZShmMut) - let mut owned_shm_buf_mut = layout.alloc().res().unwrap(); + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + let mut owned_shm_buf_mut = provider + .alloc_layout() + .size(1024) + .res() + .unwrap() + .alloc() + .res() + .unwrap(); // mutable and immutable API let _data: &[u8] = &owned_shm_buf_mut; diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 2773348fd0..7466f6eabc 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -33,43 +33,29 @@ async fn main() { println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); - println!("Creating POSIX SHM backend..."); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(N * 1024, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - println!("Creating SHM Provider with POSIX backend..."); - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + println!("Creating POSIX SHM provider..."); + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(N * 1024) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example println!("Allocating Shared Memory Buffer..."); - let layout = shared_memory_provider + let mut sbuf = provider .alloc_layout() .size(1024) .res() - .unwrap(); - - let mut sbuf = layout + .unwrap() .alloc() .with_policy::>() .res_async() diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 372967f6e8..f19c4274a4 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -44,34 +44,23 @@ fn main() { let mut samples = Vec::with_capacity(n); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(size, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(size) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); - let buf = shared_memory_provider + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + let mut buf = provider .alloc_layout() .size(size) .res() diff --git a/examples/examples/z_posix_shm_provider.rs b/examples/examples/z_posix_shm_provider.rs new file mode 100644 index 0000000000..cdf502bc61 --- /dev/null +++ b/examples/examples/z_posix_shm_provider.rs @@ -0,0 +1,44 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh::prelude::*; + +fn main() { + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + + // Total amount of shared memory to allocate + let size = 4096; + + // An alignment for POSIX SHM provider + // Due to internal optimization, all allocations will be aligned corresponding to this alignment, + // so the provider will be able to satisfy allocation layouts with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // A layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(size, provider_alignment).unwrap(); + + // Build a provider backend + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let _shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); +} diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 356737c3cd..d2a87a59cc 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -32,48 +32,31 @@ async fn main() -> Result<(), ZError> { println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); - println!("Creating POSIX SHM backend..."); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(N * 1024, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - println!("Creating SHM Provider with POSIX backend..."); - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + println!("Creating POSIX SHM provider..."); + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(N * 1024) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); let publisher = session.declare_publisher(&path).await.unwrap(); + // Create allocation layout for series of similar allocations println!("Allocating Shared Memory Buffer..."); - let layout = shared_memory_provider - .alloc_layout() - .size(1024) - .res() - .unwrap(); + let layout = provider.alloc_layout().size(1024).res().unwrap(); println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { tokio::time::sleep(std::time::Duration::from_secs(1)).await; + // Allocate particular SHM buffer using pre-created layout let mut sbuf = layout .alloc() .with_policy::>() diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 0b94304321..0d44fbe6ee 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -28,34 +28,23 @@ async fn main() { let z = zenoh::open(config).await.unwrap(); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(sm_size, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(sm_size) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); - let mut buf = shared_memory_provider + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + let mut buf = provider .alloc_layout() .size(size) .res() diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs index 49939dcb0a..ed2320d2c5 100644 --- a/examples/examples/z_queryable_shm.rs +++ b/examples/examples/z_queryable_shm.rs @@ -32,31 +32,16 @@ async fn main() { println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); - println!("Creating POSIX SHM backend..."); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(N * 1024, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - println!("Creating SHM Provider with POSIX backend..."); - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + println!("Creating POSIX SHM provider..."); + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(N * 1024) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); @@ -83,14 +68,15 @@ async fn main() { } println!(")"); + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example println!("Allocating Shared Memory Buffer..."); - let layout = shared_memory_provider + let mut sbuf = provider .alloc_layout() .size(1024) .res() - .unwrap(); - - let mut sbuf = layout + .unwrap() .alloc() .with_policy::>() .res_async() diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 874f37ba8c..8a53d5ba34 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -1833,12 +1833,12 @@ mod tests { #[cfg(all(feature = "shared-memory", feature = "unstable"))] use zenoh_shm::api::{ + buffer::zshm::{zshm, ZShm}, protocol_implementations::posix::{ posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, protocol_id::POSIX_PROTOCOL_ID, }, provider::shared_memory_provider::SharedMemoryProviderBuilder, - buffer::zshm::{zshm, ZShm}, }; const NUM: usize = 1_000; From 446fa2fc28770f92cb44d456dc9638faf4263761 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Thu, 2 May 2024 12:15:20 +0300 Subject: [PATCH 320/598] fix lints --- zenoh/tests/bytes.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/tests/bytes.rs b/zenoh/tests/bytes.rs index f8eb11bf63..6de12ab63f 100644 --- a/zenoh/tests/bytes.rs +++ b/zenoh/tests/bytes.rs @@ -15,7 +15,7 @@ #[test] #[cfg(all(feature = "shared-memory", feature = "unstable"))] fn shm_bytes_single_buf() { - use zenoh::prelude::r#async::*; + use zenoh::prelude::*; // create an SHM backend... let backend = PosixSharedMemoryProviderBackend::builder() From 74b444efa1a636ed7f2c7099a53914a6f7da98ee Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Thu, 2 May 2024 12:59:35 +0300 Subject: [PATCH 321/598] fix lint --- examples/examples/z_ping_shm.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index f19c4274a4..354f11d789 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -60,7 +60,7 @@ fn main() { // Allocate an SHM buffer // NOTE: For allocation API please check z_alloc_shm.rs example // NOTE: For buf's API please check z_bytes_shm.rs example - let mut buf = provider + let buf = provider .alloc_layout() .size(size) .res() From b27a289931dfb317ee5e6696d23cd16251d9b61e Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 2 May 2024 16:39:57 +0200 Subject: [PATCH 322/598] Pre-commit fmt --- ci/nostd-check/src/bin/nostd_check.rs | 1 + ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs | 4 ++-- .../src/queryable_get/bin/z_queryable_get.rs | 7 +++---- commons/zenoh-shm/src/header/storage.rs | 2 +- commons/zenoh-shm/src/header/subscription.rs | 2 +- commons/zenoh-shm/src/posix_shm/segment.rs | 2 +- commons/zenoh-shm/src/watchdog/confirmator.rs | 4 ++-- commons/zenoh-shm/src/watchdog/periodic_task.rs | 9 +++++---- commons/zenoh-shm/src/watchdog/storage.rs | 2 +- .../src/unicast/universal/reliability.rs | 11 +++++------ 10 files changed, 22 insertions(+), 22 deletions(-) diff --git a/ci/nostd-check/src/bin/nostd_check.rs b/ci/nostd-check/src/bin/nostd_check.rs index 74f85ae06c..b243c9d182 100644 --- a/ci/nostd-check/src/bin/nostd_check.rs +++ b/ci/nostd-check/src/bin/nostd_check.rs @@ -15,6 +15,7 @@ #![no_std] use core::panic::PanicInfo; + use getrandom::{register_custom_getrandom, Error}; use linked_list_allocator::LockedHeap; #[allow(unused_imports)] diff --git a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs index 2091f833a1..7b1b017c7d 100644 --- a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs +++ b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::*; + +use zenoh::{config::Config, prelude::*}; #[tokio::main] async fn main() { diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 43cb038f94..69335d674e 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -11,10 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::convert::TryFrom; -use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::*; +use std::{convert::TryFrom, time::Duration}; + +use zenoh::{config::Config, prelude::*}; #[tokio::main] async fn main() { diff --git a/commons/zenoh-shm/src/header/storage.rs b/commons/zenoh-shm/src/header/storage.rs index c09fa83dba..36e004511a 100644 --- a/commons/zenoh-shm/src/header/storage.rs +++ b/commons/zenoh-shm/src/header/storage.rs @@ -11,12 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use lazy_static::lazy_static; use std::{ collections::LinkedList, sync::{Arc, Mutex}, }; +use lazy_static::lazy_static; use zenoh_result::{zerror, ZResult}; use super::{ diff --git a/commons/zenoh-shm/src/header/subscription.rs b/commons/zenoh-shm/src/header/subscription.rs index 49ad170aea..5efe54a7f7 100644 --- a/commons/zenoh-shm/src/header/subscription.rs +++ b/commons/zenoh-shm/src/header/subscription.rs @@ -11,12 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use lazy_static::lazy_static; use std::{ collections::BTreeMap, sync::{Arc, Mutex}, }; +use lazy_static::lazy_static; use zenoh_result::{zerror, ZResult}; use super::{ diff --git a/commons/zenoh-shm/src/posix_shm/segment.rs b/commons/zenoh-shm/src/posix_shm/segment.rs index ab79d0fcc1..5458ab3e3e 100644 --- a/commons/zenoh-shm/src/posix_shm/segment.rs +++ b/commons/zenoh-shm/src/posix_shm/segment.rs @@ -117,7 +117,7 @@ where unsafe { *(self.shmem.as_ptr() as *mut usize) } } - // TODO: dead code warning occurs because of `tested_crate_module!()` macro when feature `test` is not enabled. Better to fix that + // TODO: dead code warning occurs because of `tested_crate_module!()` macro when feature `test` is not enabled. Better to fix that #[allow(dead_code)] pub fn is_empty(&self) -> bool { unsafe { *(self.shmem.as_ptr() as *mut usize) == 0 } diff --git a/commons/zenoh-shm/src/watchdog/confirmator.rs b/commons/zenoh-shm/src/watchdog/confirmator.rs index 54c2d233dc..b84a76dc50 100644 --- a/commons/zenoh-shm/src/watchdog/confirmator.rs +++ b/commons/zenoh-shm/src/watchdog/confirmator.rs @@ -22,8 +22,8 @@ use lazy_static::lazy_static; use zenoh_result::{zerror, ZResult}; use super::{ - periodic_task::PeriodicTask, descriptor::{Descriptor, OwnedDescriptor, SegmentID}, + periodic_task::PeriodicTask, segment::Segment, }; @@ -117,7 +117,7 @@ pub struct WatchdogConfirmator { impl WatchdogConfirmator { fn new(interval: Duration) -> Self { let segment_transactions = Arc::>>::default(); - + let c_segment_transactions = segment_transactions.clone(); let mut segments: Vec<(Arc, BTreeMap)> = vec![]; let task = PeriodicTask::new("Watchdog Confirmator".to_owned(), interval, move || { diff --git a/commons/zenoh-shm/src/watchdog/periodic_task.rs b/commons/zenoh-shm/src/watchdog/periodic_task.rs index 98cf8fbba7..08a6ee18d3 100644 --- a/commons/zenoh-shm/src/watchdog/periodic_task.rs +++ b/commons/zenoh-shm/src/watchdog/periodic_task.rs @@ -23,7 +23,8 @@ use std::{ use thread_priority::ThreadBuilder; #[cfg(unix)] use thread_priority::{ - set_current_thread_priority, RealtimeThreadSchedulePolicy, ThreadPriority, ThreadPriorityValue, ThreadSchedulePolicy::Realtime + set_current_thread_priority, RealtimeThreadSchedulePolicy, ThreadPriority, ThreadPriorityValue, + ThreadSchedulePolicy::Realtime, }; pub struct PeriodicTask { @@ -44,7 +45,7 @@ impl PeriodicTask { let running = Arc::new(AtomicBool::new(true)); let c_running = running.clone(); - + #[cfg(unix)] let builder = ThreadBuilder::default() .name(name) @@ -54,7 +55,7 @@ impl PeriodicTask { // TODO: deal with windows realtime scheduling #[cfg(windows)] let builder = ThreadBuilder::default().name(name); - + let _ = builder.spawn(move |result| { if let Err(e) = result { #[cfg(windows)] @@ -79,7 +80,7 @@ impl PeriodicTask { let cycle_start = std::time::Instant::now(); f(); - + // sleep for next iteration let elapsed = cycle_start.elapsed(); if elapsed < interval { diff --git a/commons/zenoh-shm/src/watchdog/storage.rs b/commons/zenoh-shm/src/watchdog/storage.rs index 5744a273a0..1b04ad313c 100644 --- a/commons/zenoh-shm/src/watchdog/storage.rs +++ b/commons/zenoh-shm/src/watchdog/storage.rs @@ -11,12 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use lazy_static::lazy_static; use std::{ collections::BTreeSet, sync::{Arc, Mutex}, }; +use lazy_static::lazy_static; use zenoh_result::{zerror, ZResult}; use super::{allocated_watchdog::AllocatedWatchdog, descriptor::OwnedDescriptor, segment::Segment}; diff --git a/io/zenoh-transport/src/unicast/universal/reliability.rs b/io/zenoh-transport/src/unicast/universal/reliability.rs index b3637bee27..7aece8d077 100644 --- a/io/zenoh-transport/src/unicast/universal/reliability.rs +++ b/io/zenoh-transport/src/unicast/universal/reliability.rs @@ -11,15 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::convert::TryInto; -use std::fmt; - -use super::common::seq_num::SeqNum; -use super::core::u64; +use std::{convert::TryInto, fmt}; use zenoh_result::{ZError, ZErrorKind, ZResult}; use zenoh_util::zerror; +use super::{common::seq_num::SeqNum, core::u64}; + pub(super) struct ReliabilityQueue { sn: SeqNum, index: usize, @@ -249,9 +247,10 @@ impl fmt::Debug for ReliabilityQueue { #[cfg(test)] mod tests { - use super::*; use rand::{thread_rng, Rng}; + use super::*; + #[test] fn reliability_queue_simple() { let size = 2; From 77654a0b16da29716faa311f3f8a4040b8338bf0 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Fri, 3 May 2024 15:17:18 +0300 Subject: [PATCH 323/598] fix after merge --- examples/examples/z_alloc_shm.rs | 4 +-- examples/examples/z_get_shm.rs | 3 +- zenoh/src/lib.rs | 55 +++++++++++++++++--------------- 3 files changed, 33 insertions(+), 29 deletions(-) diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index a01de8d2fa..2db5e5a44e 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -40,14 +40,14 @@ async fn run() -> ZResult<()> { // This layout is reusable and can handle series of similar allocations let buffer_layout = { // OPTION 1: Simple (default) configuration: - let simple_layout = shared_memory_provider + let simple_layout = provider .alloc_layout() .size(512) .res() .unwrap(); // OPTION 2: Comprehensive configuration: - let _comprehensive_layout = shared_memory_provider + let _comprehensive_layout = provider .alloc_layout() .size(512) .alignment(AllocAlignment::new(2)) diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 7466f6eabc..39caf3a101 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::Duration; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e6cf1e437a..caf961984b 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -378,32 +378,35 @@ pub mod internal { #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub mod shm { - pub use zenoh_shm::api::buffer::{ - zshm::{zshm, ZShm}, - zshmmut::{zshmmut, ZShmMut}, - }; - pub use zenoh_shm::api::client::{ - shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, - }; - pub use zenoh_shm::api::client_storage::{SharedMemoryClientStorage, GLOBAL_CLIENT_STORAGE}; - pub use zenoh_shm::api::common::types::{ChunkID, ProtocolID, SegmentID}; - pub use zenoh_shm::api::protocol_implementations::posix::{ - posix_shared_memory_client::PosixSharedMemoryClient, - posix_shared_memory_provider_backend::{ - LayoutedPosixSharedMemoryProviderBackendBuilder, PosixSharedMemoryProviderBackend, - PosixSharedMemoryProviderBackendBuilder, + pub use zenoh_shm::api::{ + buffer::{ + zshm::{zshm, ZShm}, + zshmmut::{zshmmut, ZShmMut}, + }, + client::{ + shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, + }, + client_storage::{SharedMemoryClientStorage, GLOBAL_CLIENT_STORAGE}, + common::types::{ChunkID, ProtocolID, SegmentID}, + protocol_implementations::posix::{ + posix_shared_memory_client::PosixSharedMemoryClient, + posix_shared_memory_provider_backend::{ + LayoutedPosixSharedMemoryProviderBackendBuilder, PosixSharedMemoryProviderBackend, + PosixSharedMemoryProviderBackendBuilder, + }, + protocol_id::POSIX_PROTOCOL_ID, + }, + provider::{ + shared_memory_provider::{ + AllocBuilder, AllocLayout, AllocLayoutAlignedBuilder, AllocLayoutBuilder, + AllocLayoutSizedBuilder, AllocPolicy, AsyncAllocPolicy, BlockOn, DeallocEldest, + DeallocOptimal, DeallocYoungest, Deallocate, Defragment, DynamicProtocolID, + ForceDeallocPolicy, GarbageCollect, JustAlloc, ProtocolIDSource, + SharedMemoryProvider, SharedMemoryProviderBuilder, + SharedMemoryProviderBuilderBackendID, SharedMemoryProviderBuilderID, + StaticProtocolID, + }, + types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, }, - protocol_id::POSIX_PROTOCOL_ID, - }; - pub use zenoh_shm::api::provider::shared_memory_provider::{ - AllocBuilder, AllocLayout, AllocLayoutAlignedBuilder, AllocLayoutBuilder, - AllocLayoutSizedBuilder, AllocPolicy, AsyncAllocPolicy, BlockOn, DeallocEldest, - DeallocOptimal, DeallocYoungest, Deallocate, Defragment, DynamicProtocolID, - ForceDeallocPolicy, GarbageCollect, JustAlloc, ProtocolIDSource, SharedMemoryProvider, - SharedMemoryProviderBuilder, SharedMemoryProviderBuilderBackendID, - SharedMemoryProviderBuilderID, StaticProtocolID, - }; - pub use zenoh_shm::api::provider::types::{ - AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError, }; } From 9fb1e1926139b6d297cb1d0cf347ea0d62a890d2 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Fri, 3 May 2024 15:20:47 +0300 Subject: [PATCH 324/598] Update z_alloc_shm.rs --- examples/examples/z_alloc_shm.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index 2db5e5a44e..93df5d821d 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -40,11 +40,7 @@ async fn run() -> ZResult<()> { // This layout is reusable and can handle series of similar allocations let buffer_layout = { // OPTION 1: Simple (default) configuration: - let simple_layout = provider - .alloc_layout() - .size(512) - .res() - .unwrap(); + let simple_layout = provider.alloc_layout().size(512).res().unwrap(); // OPTION 2: Comprehensive configuration: let _comprehensive_layout = provider From 511bc67abbb672a0e7edebd0fa54dfeeeb60f457 Mon Sep 17 00:00:00 2001 From: Dmitrii Bannov <104833606+yellowhatter@users.noreply.github.com> Date: Fri, 3 May 2024 16:14:28 +0300 Subject: [PATCH 325/598] Shm refine (#986) * [skip ci] SHM Payload API example and test * Add payload_mut to sample for zsliceshmmut deserialization * Improve SHM examples * Fix merge * Query/Reply shared memory examples * rename payload tests to bytes tests * - fix API exports - fix z_payload_shm example * Add attachment_mut to Sample * [skip ci] fix SHM exports in new api export mechanism * Massive renaming for ZSliceShm and ZSliceShmMut * fix ci * [skip ci] z_payload_shm -> z_bytes_shm * Polish SHM examples * fix lints * fix lint * fix after merge * Update z_alloc_shm.rs --------- Co-authored-by: Luca Cominardi --- .../src/api/{slice => buffer}/mod.rs | 4 +- .../src/api/{slice => buffer}/traits.rs | 0 .../{slice/zsliceshm.rs => buffer/zshm.rs} | 76 ++++----- .../zsliceshmmut.rs => buffer/zshmmut.rs} | 86 +++++------ commons/zenoh-shm/src/api/mod.rs | 2 +- .../api/provider/shared_memory_provider.rs | 12 +- commons/zenoh-shm/src/api/provider/types.rs | 4 +- examples/Cargo.toml | 22 ++- examples/examples/z_alloc_shm.rs | 40 ++--- examples/examples/z_bytes_shm.rs | 103 +++++++++++++ examples/examples/z_get_shm.rs | 144 ++++++++++++++++++ examples/examples/z_ping_shm.rs | 39 ++--- examples/examples/z_posix_shm_provider.rs | 44 ++++++ examples/examples/z_pub_shm.rs | 48 ++---- examples/examples/z_pub_shm_thr.rs | 37 ++--- examples/examples/z_queryable.rs | 7 +- examples/examples/z_queryable_shm.rs | 118 ++++++++++++++ examples/examples/z_sub_shm.rs | 39 +++-- zenoh/src/api/bytes.rs | 48 +++--- zenoh/src/api/encoding.rs | 6 +- zenoh/src/api/query.rs | 5 + zenoh/src/api/queryable.rs | 37 +++-- zenoh/src/api/sample.rs | 13 ++ zenoh/src/api/session.rs | 12 +- zenoh/src/lib.rs | 30 +++- zenoh/src/net/runtime/adminspace.rs | 6 +- zenoh/src/prelude.rs | 4 +- zenoh/tests/bytes.rs | 69 +++++++++ zenoh/tests/payload.rs | 86 ----------- 29 files changed, 784 insertions(+), 357 deletions(-) rename commons/zenoh-shm/src/api/{slice => buffer}/mod.rs (92%) rename commons/zenoh-shm/src/api/{slice => buffer}/traits.rs (100%) rename commons/zenoh-shm/src/api/{slice/zsliceshm.rs => buffer/zshm.rs} (59%) rename commons/zenoh-shm/src/api/{slice/zsliceshmmut.rs => buffer/zshmmut.rs} (59%) create mode 100644 examples/examples/z_bytes_shm.rs create mode 100644 examples/examples/z_get_shm.rs create mode 100644 examples/examples/z_posix_shm_provider.rs create mode 100644 examples/examples/z_queryable_shm.rs create mode 100644 zenoh/tests/bytes.rs delete mode 100644 zenoh/tests/payload.rs diff --git a/commons/zenoh-shm/src/api/slice/mod.rs b/commons/zenoh-shm/src/api/buffer/mod.rs similarity index 92% rename from commons/zenoh-shm/src/api/slice/mod.rs rename to commons/zenoh-shm/src/api/buffer/mod.rs index 59c793f94a..8a3e040da9 100644 --- a/commons/zenoh-shm/src/api/slice/mod.rs +++ b/commons/zenoh-shm/src/api/buffer/mod.rs @@ -13,5 +13,5 @@ // pub mod traits; -pub mod zsliceshm; -pub mod zsliceshmmut; +pub mod zshm; +pub mod zshmmut; diff --git a/commons/zenoh-shm/src/api/slice/traits.rs b/commons/zenoh-shm/src/api/buffer/traits.rs similarity index 100% rename from commons/zenoh-shm/src/api/slice/traits.rs rename to commons/zenoh-shm/src/api/buffer/traits.rs diff --git a/commons/zenoh-shm/src/api/slice/zsliceshm.rs b/commons/zenoh-shm/src/api/buffer/zshm.rs similarity index 59% rename from commons/zenoh-shm/src/api/slice/zsliceshm.rs rename to commons/zenoh-shm/src/api/buffer/zshm.rs index b2ba611b3c..d6f34f293a 100644 --- a/commons/zenoh-shm/src/api/slice/zsliceshm.rs +++ b/commons/zenoh-shm/src/api/buffer/zshm.rs @@ -20,44 +20,44 @@ use std::{ use zenoh_buffers::{ZBuf, ZSlice}; -use super::{traits::SHMBuf, zsliceshmmut::zsliceshmmut}; +use super::{traits::SHMBuf, zshmmut::zshmmut}; use crate::SharedMemoryBuf; -/// An immutable SHM slice +/// An immutable SHM buffer #[zenoh_macros::unstable_doc] #[repr(transparent)] #[derive(Clone, Debug, PartialEq, Eq)] -pub struct ZSliceShm(pub(crate) SharedMemoryBuf); +pub struct ZShm(pub(crate) SharedMemoryBuf); -impl SHMBuf for ZSliceShm { +impl SHMBuf for ZShm { fn is_valid(&self) -> bool { self.0.is_valid() } } -impl PartialEq<&zsliceshm> for ZSliceShm { - fn eq(&self, other: &&zsliceshm) -> bool { +impl PartialEq<&zshm> for ZShm { + fn eq(&self, other: &&zshm) -> bool { self.0 == other.0 .0 } } -impl Borrow for ZSliceShm { - fn borrow(&self) -> &zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl Borrow for ZShm { + fn borrow(&self) -> &zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl BorrowMut for ZSliceShm { - fn borrow_mut(&mut self) -> &mut zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl BorrowMut for ZShm { + fn borrow_mut(&mut self) -> &mut zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl Deref for ZSliceShm { +impl Deref for ZShm { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -65,37 +65,37 @@ impl Deref for ZSliceShm { } } -impl AsRef<[u8]> for ZSliceShm { +impl AsRef<[u8]> for ZShm { fn as_ref(&self) -> &[u8] { self } } -impl From for ZSliceShm { +impl From for ZShm { fn from(value: SharedMemoryBuf) -> Self { Self(value) } } -impl From for ZSlice { - fn from(value: ZSliceShm) -> Self { +impl From for ZSlice { + fn from(value: ZShm) -> Self { value.0.into() } } -impl From for ZBuf { - fn from(value: ZSliceShm) -> Self { +impl From for ZBuf { + fn from(value: ZShm) -> Self { value.0.into() } } -impl TryFrom<&mut ZSliceShm> for &mut zsliceshmmut { +impl TryFrom<&mut ZShm> for &mut zshmmut { type Error = (); - fn try_from(value: &mut ZSliceShm) -> Result { + fn try_from(value: &mut ZShm) -> Result { match value.0.is_unique() && value.0.is_valid() { true => { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction Ok(unsafe { core::mem::transmute(value) }) } @@ -104,64 +104,64 @@ impl TryFrom<&mut ZSliceShm> for &mut zsliceshmmut { } } -/// A borrowed immutable SHM slice +/// A borrowed immutable SHM buffer #[zenoh_macros::unstable_doc] #[derive(Debug, PartialEq, Eq)] #[allow(non_camel_case_types)] #[repr(transparent)] -pub struct zsliceshm(ZSliceShm); +pub struct zshm(ZShm); -impl ToOwned for zsliceshm { - type Owned = ZSliceShm; +impl ToOwned for zshm { + type Owned = ZShm; fn to_owned(&self) -> Self::Owned { self.0.clone() } } -impl PartialEq for &zsliceshm { - fn eq(&self, other: &ZSliceShm) -> bool { +impl PartialEq for &zshm { + fn eq(&self, other: &ZShm) -> bool { self.0 .0 == other.0 } } -impl Deref for zsliceshm { - type Target = ZSliceShm; +impl Deref for zshm { + type Target = ZShm; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for zsliceshm { +impl DerefMut for zshm { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl From<&SharedMemoryBuf> for &zsliceshm { +impl From<&SharedMemoryBuf> for &zshm { fn from(value: &SharedMemoryBuf) -> Self { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(value) } } } -impl From<&mut SharedMemoryBuf> for &mut zsliceshm { +impl From<&mut SharedMemoryBuf> for &mut zshm { fn from(value: &mut SharedMemoryBuf) -> Self { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(value) } } } -impl TryFrom<&mut zsliceshm> for &mut zsliceshmmut { +impl TryFrom<&mut zshm> for &mut zshmmut { type Error = (); - fn try_from(value: &mut zsliceshm) -> Result { + fn try_from(value: &mut zshm) -> Result { match value.0 .0.is_unique() && value.0 .0.is_valid() { true => { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction Ok(unsafe { core::mem::transmute(value) }) } diff --git a/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs b/commons/zenoh-shm/src/api/buffer/zshmmut.rs similarity index 59% rename from commons/zenoh-shm/src/api/slice/zsliceshmmut.rs rename to commons/zenoh-shm/src/api/buffer/zshmmut.rs index d866e4173e..7341b7600c 100644 --- a/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs +++ b/commons/zenoh-shm/src/api/buffer/zshmmut.rs @@ -19,37 +19,37 @@ use zenoh_buffers::{ZBuf, ZSlice}; use super::{ traits::{SHMBuf, SHMBufMut}, - zsliceshm::{zsliceshm, ZSliceShm}, + zshm::{zshm, ZShm}, }; use crate::SharedMemoryBuf; -/// A mutable SHM slice +/// A mutable SHM buffer #[zenoh_macros::unstable_doc] #[derive(Debug, PartialEq, Eq)] #[repr(transparent)] -pub struct ZSliceShmMut(SharedMemoryBuf); +pub struct ZShmMut(SharedMemoryBuf); -impl SHMBuf for ZSliceShmMut { +impl SHMBuf for ZShmMut { fn is_valid(&self) -> bool { self.0.is_valid() } } -impl SHMBufMut for ZSliceShmMut {} +impl SHMBufMut for ZShmMut {} -impl ZSliceShmMut { +impl ZShmMut { pub(crate) unsafe fn new_unchecked(data: SharedMemoryBuf) -> Self { Self(data) } } -impl PartialEq for &ZSliceShmMut { - fn eq(&self, other: &zsliceshmmut) -> bool { +impl PartialEq for &ZShmMut { + fn eq(&self, other: &zshmmut) -> bool { self.0 == other.0 .0 } } -impl TryFrom for ZSliceShmMut { +impl TryFrom for ZShmMut { type Error = SharedMemoryBuf; fn try_from(value: SharedMemoryBuf) -> Result { @@ -60,10 +60,10 @@ impl TryFrom for ZSliceShmMut { } } -impl TryFrom for ZSliceShmMut { - type Error = ZSliceShm; +impl TryFrom for ZShmMut { + type Error = ZShm; - fn try_from(value: ZSliceShm) -> Result { + fn try_from(value: ZShm) -> Result { match value.0.is_unique() && value.0.is_valid() { true => Ok(Self(value.0)), false => Err(value), @@ -71,39 +71,39 @@ impl TryFrom for ZSliceShmMut { } } -impl Borrow for ZSliceShmMut { - fn borrow(&self) -> &zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl Borrow for ZShmMut { + fn borrow(&self) -> &zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl BorrowMut for ZSliceShmMut { - fn borrow_mut(&mut self) -> &mut zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl BorrowMut for ZShmMut { + fn borrow_mut(&mut self) -> &mut zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl Borrow for ZSliceShmMut { - fn borrow(&self) -> &zsliceshmmut { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl Borrow for ZShmMut { + fn borrow(&self) -> &zshmmut { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl BorrowMut for ZSliceShmMut { - fn borrow_mut(&mut self) -> &mut zsliceshmmut { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl BorrowMut for ZShmMut { + fn borrow_mut(&mut self) -> &mut zshmmut { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl Deref for ZSliceShmMut { +impl Deref for ZShmMut { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -111,75 +111,75 @@ impl Deref for ZSliceShmMut { } } -impl DerefMut for ZSliceShmMut { +impl DerefMut for ZShmMut { fn deref_mut(&mut self) -> &mut Self::Target { self.0.as_mut() } } -impl AsRef<[u8]> for ZSliceShmMut { +impl AsRef<[u8]> for ZShmMut { fn as_ref(&self) -> &[u8] { self } } -impl AsMut<[u8]> for ZSliceShmMut { +impl AsMut<[u8]> for ZShmMut { fn as_mut(&mut self) -> &mut [u8] { self } } -impl From for ZSliceShm { - fn from(value: ZSliceShmMut) -> Self { +impl From for ZShm { + fn from(value: ZShmMut) -> Self { value.0.into() } } -impl From for ZSlice { - fn from(value: ZSliceShmMut) -> Self { +impl From for ZSlice { + fn from(value: ZShmMut) -> Self { value.0.into() } } -impl From for ZBuf { - fn from(value: ZSliceShmMut) -> Self { +impl From for ZBuf { + fn from(value: ZShmMut) -> Self { value.0.into() } } -/// A borrowed mutable SHM slice +/// A borrowed mutable SHM buffer #[zenoh_macros::unstable_doc] #[derive(Debug, PartialEq, Eq)] #[allow(non_camel_case_types)] #[repr(transparent)] -pub struct zsliceshmmut(ZSliceShmMut); +pub struct zshmmut(ZShmMut); -impl PartialEq for &zsliceshmmut { - fn eq(&self, other: &ZSliceShmMut) -> bool { +impl PartialEq for &zshmmut { + fn eq(&self, other: &ZShmMut) -> bool { self.0 .0 == other.0 } } -impl Deref for zsliceshmmut { - type Target = ZSliceShmMut; +impl Deref for zshmmut { + type Target = ZShmMut; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for zsliceshmmut { +impl DerefMut for zshmmut { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl TryFrom<&mut SharedMemoryBuf> for &mut zsliceshmmut { +impl TryFrom<&mut SharedMemoryBuf> for &mut zshmmut { type Error = (); fn try_from(value: &mut SharedMemoryBuf) -> Result { match value.is_unique() && value.is_valid() { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction true => Ok(unsafe { core::mem::transmute(value) }), false => Err(()), diff --git a/commons/zenoh-shm/src/api/mod.rs b/commons/zenoh-shm/src/api/mod.rs index 08a5678fa8..a87188da29 100644 --- a/commons/zenoh-shm/src/api/mod.rs +++ b/commons/zenoh-shm/src/api/mod.rs @@ -12,9 +12,9 @@ // ZettaScale Zenoh Team, // +pub mod buffer; pub mod client; pub mod client_storage; pub mod common; pub mod protocol_implementations; pub mod provider; -pub mod slice; diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs index 58109a699d..1ca560f07e 100644 --- a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs +++ b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs @@ -28,7 +28,7 @@ use super::{ types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, }; use crate::{ - api::{common::types::ProtocolID, slice::zsliceshmmut::ZSliceShmMut}, + api::{buffer::zshmmut::ZShmMut, common::types::ProtocolID}, header::{ allocated_descriptor::AllocatedHeaderDescriptor, descriptor::HeaderDescriptor, storage::GLOBAL_HEADER_STORAGE, @@ -712,11 +712,11 @@ where self.backend.defragment() } - /// Map externally-allocated chunk into ZSliceShmMut. + /// Map externally-allocated chunk into ZShmMut. /// This method is designed to be used with push data sources. /// Remember that chunk's len may be >= len! #[zenoh_macros::unstable_doc] - pub fn map(&self, chunk: AllocatedChunk, len: usize) -> ZResult { + pub fn map(&self, chunk: AllocatedChunk, len: usize) -> ZResult { // allocate resources for SHM buffer let (allocated_header, allocated_watchdog, confirmed_watchdog) = Self::alloc_resources()?; @@ -728,7 +728,7 @@ where allocated_watchdog, confirmed_watchdog, ); - Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + Ok(unsafe { ZShmMut::new_unchecked(wrapped) }) } /// Try to collect free chunks. @@ -805,7 +805,7 @@ where allocated_watchdog, confirmed_watchdog, ); - Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + Ok(unsafe { ZShmMut::new_unchecked(wrapped) }) } fn alloc_resources() -> ZResult<( @@ -910,6 +910,6 @@ where allocated_watchdog, confirmed_watchdog, ); - Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + Ok(unsafe { ZShmMut::new_unchecked(wrapped) }) } } diff --git a/commons/zenoh-shm/src/api/provider/types.rs b/commons/zenoh-shm/src/api/provider/types.rs index ddf949ee75..beae24bfb7 100644 --- a/commons/zenoh-shm/src/api/provider/types.rs +++ b/commons/zenoh-shm/src/api/provider/types.rs @@ -17,7 +17,7 @@ use std::fmt::Display; use zenoh_result::{bail, ZResult}; use super::chunk::AllocatedChunk; -use crate::api::slice::zsliceshmmut::ZSliceShmMut; +use crate::api::buffer::zshmmut::ZShmMut; /// Allocation errors /// @@ -169,4 +169,4 @@ pub type ChunkAllocResult = Result; /// SHM buffer allocation result #[zenoh_macros::unstable_doc] -pub type BufAllocResult = Result; +pub type BufAllocResult = Result; diff --git a/examples/Cargo.toml b/examples/Cargo.toml index e117507ae9..90281ae558 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -100,6 +100,11 @@ path = "examples/z_pull.rs" name = "z_queryable" path = "examples/z_queryable.rs" +[[example]] +name = "z_queryable_shm" +path = "examples/z_queryable_shm.rs" +required-features = ["unstable", "shared-memory"] + [[example]] name = "z_storage" path = "examples/z_storage.rs" @@ -108,6 +113,11 @@ path = "examples/z_storage.rs" name = "z_get" path = "examples/z_get.rs" +[[example]] +name = "z_get_shm" +path = "examples/z_get_shm.rs" +required-features = ["unstable", "shared-memory"] + [[example]] name = "z_forward" path = "examples/z_forward.rs" @@ -156,4 +166,14 @@ path = "examples/z_pong.rs" [[example]] name = "z_alloc_shm" path = "examples/z_alloc_shm.rs" -required-features = ["unstable", "shared-memory"] \ No newline at end of file +required-features = ["unstable", "shared-memory"] + +[[example]] +name = "z_bytes_shm" +path = "examples/z_bytes_shm.rs" +required-features = ["unstable", "shared-memory"] + +[[example]] +name = "z_posix_shm_provider" +path = "examples/z_posix_shm_provider.rs" +required-features = ["unstable", "shared-memory"] diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index acff39379c..93df5d821d 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -21,29 +21,15 @@ async fn main() { } async fn run() -> ZResult<()> { - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(65536, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(65536) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); @@ -54,14 +40,10 @@ async fn run() -> ZResult<()> { // This layout is reusable and can handle series of similar allocations let buffer_layout = { // OPTION 1: Simple (default) configuration: - let simple_layout = shared_memory_provider - .alloc_layout() - .size(512) - .res() - .unwrap(); + let simple_layout = provider.alloc_layout().size(512).res().unwrap(); // OPTION 2: Comprehensive configuration: - let _comprehensive_layout = shared_memory_provider + let _comprehensive_layout = provider .alloc_layout() .size(512) .alignment(AllocAlignment::new(2)) diff --git a/examples/examples/z_bytes_shm.rs b/examples/examples/z_bytes_shm.rs new file mode 100644 index 0000000000..5c582e56e6 --- /dev/null +++ b/examples/examples/z_bytes_shm.rs @@ -0,0 +1,103 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh::{ + bytes::ZBytes, + shm::{ + zshm, zshmmut, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, ZShm, + ZShmMut, POSIX_PROTOCOL_ID, + }, +}; + +fn main() { + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(4096) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + let mut owned_shm_buf_mut = provider + .alloc_layout() + .size(1024) + .res() + .unwrap() + .alloc() + .res() + .unwrap(); + + // mutable and immutable API + let _data: &[u8] = &owned_shm_buf_mut; + let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + + // convert into immutable owned buffer (ZShmMut -> ZSlceShm) + let owned_shm_buf: ZShm = owned_shm_buf_mut.into(); + + // immutable API + let _data: &[u8] = &owned_shm_buf; + + // convert again into mutable owned buffer (ZShm -> ZSlceShmMut) + let mut owned_shm_buf_mut: ZShmMut = owned_shm_buf.try_into().unwrap(); + + // mutable and immutable API + let _data: &[u8] = &owned_shm_buf_mut; + let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + + // build a ZBytes from an SHM buffer (ZShmMut -> ZBytes) + let mut payload: ZBytes = owned_shm_buf_mut.into(); + + // branch to illustrate immutable access to SHM data + { + // deserialize ZBytes as an immutably borrowed zshm (ZBytes -> &zshm) + let borrowed_shm_buf: &zshm = payload.deserialize().unwrap(); + + // immutable API + let _data: &[u8] = borrowed_shm_buf; + + // construct owned buffer from borrowed type (&zshm -> ZShm) + let owned = borrowed_shm_buf.to_owned(); + + // immutable API + let _data: &[u8] = &owned; + + // try to construct mutable ZShmMut (ZShm -> ZShmMut) + let owned_mut: Result = owned.try_into(); + // the attempt fails because ZShm has two existing references ('owned' and inside 'payload') + assert!(owned_mut.is_err()) + } + + // branch to illustrate mutable access to SHM data + { + // deserialize ZBytes as mutably borrowed zshm (ZBytes -> &mut zshm) + let borrowed_shm_buf: &mut zshm = payload.deserialize_mut().unwrap(); + + // immutable API + let _data: &[u8] = borrowed_shm_buf; + + // convert zshm to zshmmut (&mut zshm -> &mut zshmmut) + let borrowed_shm_buf_mut: &mut zshmmut = borrowed_shm_buf.try_into().unwrap(); + + // mutable and immutable API + let _data: &[u8] = borrowed_shm_buf_mut; + let _data_mut: &mut [u8] = borrowed_shm_buf_mut; + } +} diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs new file mode 100644 index 0000000000..39caf3a101 --- /dev/null +++ b/examples/examples/z_get_shm.rs @@ -0,0 +1,144 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::time::Duration; + +use clap::Parser; +use zenoh::prelude::*; +use zenoh_examples::CommonArgs; + +const N: usize = 10; + +#[tokio::main] +async fn main() { + // initiate logging + zenoh_util::try_init_log_from_env(); + + let (mut config, selector, mut value, target, timeout) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); + + println!("Opening session..."); + let session = zenoh::open(config).await.unwrap(); + + println!("Creating POSIX SHM provider..."); + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(N * 1024) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + println!("Allocating Shared Memory Buffer..."); + let mut sbuf = provider + .alloc_layout() + .size(1024) + .res() + .unwrap() + .alloc() + .with_policy::>() + .res_async() + .await + .unwrap(); + + let content = value + .take() + .unwrap_or_else(|| "Get from SharedMemory Rust!".to_string()); + sbuf[0..content.len()].copy_from_slice(content.as_bytes()); + + println!("Sending Query '{selector}'..."); + let replies = session + .get(&selector) + .value(sbuf) + .target(target) + .timeout(timeout) + .await + .unwrap(); + + while let Ok(reply) = replies.recv_async().await { + match reply.result() { + Ok(sample) => { + print!(">> Received ('{}': ", sample.key_expr().as_str()); + match sample.payload().deserialize::<&zshm>() { + Ok(payload) => println!("'{}')", String::from_utf8_lossy(payload),), + Err(e) => println!("'Not a SharedMemoryBuf: {:?}')", e), + } + } + Err(err) => { + let payload = err + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!(">> Received (ERROR: '{}')", payload); + } + } + } +} + +#[derive(clap::ValueEnum, Clone, Copy, Debug)] +#[value(rename_all = "SCREAMING_SNAKE_CASE")] +enum Qt { + BestMatching, + All, + AllComplete, +} + +#[derive(Parser, Clone, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/**")] + /// The selection of resources to query + selector: Selector<'static>, + /// The value to publish. + value: Option, + #[arg(short, long, default_value = "BEST_MATCHING")] + /// The target queryables of the query. + target: Qt, + #[arg(short = 'o', long, default_value = "10000")] + /// The query timeout in milliseconds. + timeout: u64, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> ( + Config, + Selector<'static>, + Option, + QueryTarget, + Duration, +) { + let args = Args::parse(); + ( + args.common.into(), + args.selector, + args.value, + match args.target { + Qt::BestMatching => QueryTarget::BestMatching, + Qt::All => QueryTarget::All, + Qt::AllComplete => QueryTarget::AllComplete, + }, + Duration::from_millis(args.timeout), + ) +} diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index d4c5b4f162..4c3ad4ed40 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -45,34 +45,23 @@ fn main() { let mut samples = Vec::with_capacity(n); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(size, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(size) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); - let buf = shared_memory_provider + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + let buf = provider .alloc_layout() .size(size) .res() @@ -81,7 +70,7 @@ fn main() { .res() .unwrap(); - // convert ZSliceShmMut into ZSlice as ZSliceShmMut does not support Clone + // convert ZShmMut into ZSlice as ZShmMut does not support Clone let buf: ZSlice = buf.into(); // -- warmup -- diff --git a/examples/examples/z_posix_shm_provider.rs b/examples/examples/z_posix_shm_provider.rs new file mode 100644 index 0000000000..cdf502bc61 --- /dev/null +++ b/examples/examples/z_posix_shm_provider.rs @@ -0,0 +1,44 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh::prelude::*; + +fn main() { + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + + // Total amount of shared memory to allocate + let size = 4096; + + // An alignment for POSIX SHM provider + // Due to internal optimization, all allocations will be aligned corresponding to this alignment, + // so the provider will be able to satisfy allocation layouts with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // A layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(size, provider_alignment).unwrap(); + + // Build a provider backend + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let _shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); +} diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 92d19b6b06..d2a87a59cc 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -16,7 +16,6 @@ use zenoh::prelude::*; use zenoh_examples::CommonArgs; const N: usize = 10; -const K: u32 = 3; #[tokio::main] async fn main() -> Result<(), ZError> { @@ -33,46 +32,31 @@ async fn main() -> Result<(), ZError> { println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); - println!("Creating POSIX SHM backend..."); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(N * 1024, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - println!("Creating SHM Provider with POSIX backend..."); - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + println!("Creating POSIX SHM provider..."); + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(N * 1024) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); let publisher = session.declare_publisher(&path).await.unwrap(); + // Create allocation layout for series of similar allocations println!("Allocating Shared Memory Buffer..."); - let layout = shared_memory_provider - .alloc_layout() - .size(1024) - .res() - .unwrap(); + let layout = provider.alloc_layout().size(1024).res().unwrap(); println!("Press CTRL-C to quit..."); - for idx in 0..(K * N as u32) { + for idx in 0..u32::MAX { + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + // Allocate particular SHM buffer using pre-created layout let mut sbuf = layout .alloc() .with_policy::>() diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 0b94304321..0d44fbe6ee 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -28,34 +28,23 @@ async fn main() { let z = zenoh::open(config).await.unwrap(); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(sm_size, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(sm_size) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); - let mut buf = shared_memory_provider + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + let mut buf = provider .alloc_layout() .size(size) .res() diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index e24b8e80cb..dcdca82c09 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -20,7 +20,12 @@ async fn main() { // initiate logging zenoh_util::try_init_log_from_env(); - let (config, key_expr, value, complete) = parse_args(); + let (mut config, key_expr, value, complete) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_get_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs new file mode 100644 index 0000000000..ed2320d2c5 --- /dev/null +++ b/examples/examples/z_queryable_shm.rs @@ -0,0 +1,118 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use clap::Parser; +use zenoh::prelude::*; +use zenoh_examples::CommonArgs; + +const N: usize = 10; + +#[tokio::main] +async fn main() { + // initiate logging + zenoh_util::try_init_log_from_env(); + + let (mut config, key_expr, value, complete) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_get_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); + + println!("Opening session..."); + let session = zenoh::open(config).await.unwrap(); + + println!("Creating POSIX SHM provider..."); + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(N * 1024) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + println!("Declaring Queryable on '{key_expr}'..."); + let queryable = session + .declare_queryable(&key_expr) + .complete(complete) + .await + .unwrap(); + + println!("Press CTRL-C to quit..."); + while let Ok(query) = queryable.recv_async().await { + print!( + ">> [Queryable] Received Query '{}' ('{}'", + query.selector(), + query.key_expr().as_str(), + ); + if let Some(payload) = query.payload() { + match payload.deserialize::<&zshm>() { + Ok(payload) => print!(": '{}'", String::from_utf8_lossy(payload)), + Err(e) => print!(": 'Not a SharedMemoryBuf: {:?}'", e), + } + } + println!(")"); + + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + println!("Allocating Shared Memory Buffer..."); + let mut sbuf = provider + .alloc_layout() + .size(1024) + .res() + .unwrap() + .alloc() + .with_policy::>() + .res_async() + .await + .unwrap(); + + sbuf[0..value.len()].copy_from_slice(value.as_bytes()); + + println!( + ">> [Queryable] Responding ('{}': '{}')", + key_expr.as_str(), + value, + ); + query + .reply(key_expr.clone(), sbuf) + .await + .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); + } +} + +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/zenoh-rs-queryable")] + /// The key expression matching queries to reply to. + key: KeyExpr<'static>, + #[arg(short, long, default_value = "Queryable from SharedMemory Rust!")] + /// The value to reply to queries. + value: String, + #[arg(long)] + /// Declare the queryable as complete w.r.t. the key expression. + complete: bool, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> (Config, KeyExpr<'static>, String, bool) { + let args = Args::parse(); + (args.common.into(), args.key, args.value, args.complete) +} diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 9914539ed5..bab31d4a2a 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -35,18 +35,37 @@ async fn main() { println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { - match sample.payload().deserialize::<&zsliceshm>() { - Ok(payload) => println!( - ">> [Subscriber] Received {} ('{}': '{:02x?}')", - sample.kind(), - sample.key_expr().as_str(), - payload - ), - Err(e) => { - println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); - } + print!( + ">> [Subscriber] Received {} ('{}': ", + sample.kind(), + sample.key_expr().as_str(), + ); + match sample.payload().deserialize::<&zshm>() { + Ok(payload) => print!("'{}'", String::from_utf8_lossy(payload)), + Err(e) => print!("'Not a SharedMemoryBuf: {:?}'", e), } + println!(")"); } + + // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber + // // holding a reference to the SHM buffer, then it will be able to get a mutable reference to it. + // // With the mutable reference at hand, it's possible to mutate in place the SHM buffer content. + // + // use zenoh::shm::zshmmut; + + // while let Ok(mut sample) = subscriber.recv_async().await { + // let kind = sample.kind(); + // let key_expr = sample.key_expr().to_string(); + // match sample.payload_mut().deserialize_mut::<&mut zshmmut>() { + // Ok(payload) => println!( + // ">> [Subscriber] Received {} ('{}': '{:02x?}')", + // kind, key_expr, payload + // ), + // Err(e) => { + // println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); + // } + // } + // } } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index fb32910b54..98afd1a3c3 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -30,9 +30,9 @@ use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; use zenoh_result::{ZError, ZResult}; #[cfg(all(feature = "shared-memory", feature = "unstable"))] use zenoh_shm::{ - api::slice::{ - zsliceshm::{zsliceshm, ZSliceShm}, - zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, + api::buffer::{ + zshm::{zshm, ZShm}, + zshmmut::{zshmmut, ZShmMut}, }, SharedMemoryBuf, }; @@ -1526,47 +1526,47 @@ impl TryFrom<&mut ZBytes> for serde_pickle::Value { // Shared memory conversion #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl Serialize for ZSerde { +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: ZSliceShm) -> Self::Output { + fn serialize(self, t: ZShm) -> Self::Output { let slice: ZSlice = t.into(); ZBytes::new(slice) } } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl From for ZBytes { - fn from(t: ZSliceShm) -> Self { +impl From for ZBytes { + fn from(t: ZShm) -> Self { ZSerde.serialize(t) } } // Shared memory conversion #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl Serialize for ZSerde { +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: ZSliceShmMut) -> Self::Output { + fn serialize(self, t: ZShmMut) -> Self::Output { let slice: ZSlice = t.into(); ZBytes::new(slice) } } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl From for ZBytes { - fn from(t: ZSliceShmMut) -> Self { +impl From for ZBytes { + fn from(t: ZShmMut) -> Self { ZSerde.serialize(t) } } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a zsliceshm> for ZSerde { +impl<'a> Deserialize<'a, &'a zshm> for ZSerde { type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<&'a zsliceshm, Self::Error> { - // A ZSliceShm is expected to have only one slice + fn deserialize(self, v: Self::Input) -> Result<&'a zshm, Self::Error> { + // A ZShm is expected to have only one slice let mut zslices = v.0.zslices(); if let Some(zs) = zslices.next() { if let Some(shmb) = zs.downcast_ref::() { @@ -1578,7 +1578,7 @@ impl<'a> Deserialize<'a, &'a zsliceshm> for ZSerde { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a ZBytes> for &'a zsliceshm { +impl<'a> TryFrom<&'a ZBytes> for &'a zshm { type Error = ZDeserializeError; fn try_from(value: &'a ZBytes) -> Result { @@ -1587,7 +1587,7 @@ impl<'a> TryFrom<&'a ZBytes> for &'a zsliceshm { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshm { +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zshm { type Error = ZDeserializeError; fn try_from(value: &'a mut ZBytes) -> Result { @@ -1596,11 +1596,11 @@ impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshm { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a mut zsliceshm> for ZSerde { +impl<'a> Deserialize<'a, &'a mut zshm> for ZSerde { type Input = &'a mut ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshm, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<&'a mut zshm, Self::Error> { // A ZSliceShmBorrowMut is expected to have only one slice let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { @@ -1613,11 +1613,11 @@ impl<'a> Deserialize<'a, &'a mut zsliceshm> for ZSerde { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a mut zsliceshmmut> for ZSerde { +impl<'a> Deserialize<'a, &'a mut zshmmut> for ZSerde { type Input = &'a mut ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshmmut, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<&'a mut zshmmut, Self::Error> { // A ZSliceShmBorrowMut is expected to have only one slice let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { @@ -1630,7 +1630,7 @@ impl<'a> Deserialize<'a, &'a mut zsliceshmmut> for ZSerde { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshmmut { +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zshmmut { type Error = ZDeserializeError; fn try_from(value: &'a mut ZBytes) -> Result { @@ -1834,12 +1834,12 @@ mod tests { use zenoh_protocol::core::Properties; #[cfg(all(feature = "shared-memory", feature = "unstable"))] use zenoh_shm::api::{ + buffer::zshm::{zshm, ZShm}, protocol_implementations::posix::{ posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, protocol_id::POSIX_PROTOCOL_ID, }, provider::shared_memory_provider::SharedMemoryProviderBuilder, - slice::zsliceshm::{zsliceshm, ZSliceShm}, }; use super::ZBytes; @@ -1967,9 +1967,9 @@ mod tests { let mutable_shm_buf = layout.alloc().res().unwrap(); // convert to immutable SHM buffer - let immutable_shm_buf: ZSliceShm = mutable_shm_buf.into(); + let immutable_shm_buf: ZShm = mutable_shm_buf.into(); - serialize_deserialize!(&zsliceshm, immutable_shm_buf); + serialize_deserialize!(&zshm, immutable_shm_buf); } // Properties diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index f1be92c7ac..29c65f837e 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -17,7 +17,7 @@ use phf::phf_map; use zenoh_buffers::{ZBuf, ZSlice}; use zenoh_protocol::core::EncodingId; #[cfg(feature = "shared-memory")] -use zenoh_shm::api::slice::{zsliceshm::ZSliceShm, zsliceshmmut::ZSliceShmMut}; +use zenoh_shm::api::buffer::{zshm::ZShm, zshmmut::ZShmMut}; use super::bytes::ZBytes; @@ -837,10 +837,10 @@ impl EncodingMapping for serde_pickle::Value { // - Zenoh SHM #[cfg(feature = "shared-memory")] -impl EncodingMapping for ZSliceShm { +impl EncodingMapping for ZShm { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } #[cfg(feature = "shared-memory")] -impl EncodingMapping for ZSliceShmMut { +impl EncodingMapping for ZShmMut { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index e344237087..66de2e5700 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -93,6 +93,11 @@ impl Reply { self.result.as_ref() } + /// Gets the a mutable borrowed result of this `Reply`. Use [`Reply::into_result`] to take ownership of the result. + pub fn result_mut(&mut self) -> Result<&mut Sample, &mut Value> { + self.result.as_mut() + } + /// Converts this `Reply` into the its result. Use [`Reply::result`] it you don't want to take ownership. pub fn into_result(self) -> Result { self.result diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index e2343811db..0653c4433d 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -51,18 +51,11 @@ use super::{ use crate::net::primitives::Primitives; pub(crate) struct QueryInner { - /// The key expression of this Query. pub(crate) key_expr: KeyExpr<'static>, - /// This Query's selector parameters. pub(crate) parameters: Parameters<'static>, - /// This Query's body. - pub(crate) value: Option, - pub(crate) qid: RequestId, pub(crate) zid: ZenohId, pub(crate) primitives: Arc, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, } impl Drop for QueryInner { @@ -80,6 +73,9 @@ impl Drop for QueryInner { pub struct Query { pub(crate) inner: Arc, pub(crate) eid: EntityId, + pub(crate) value: Option, + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, } impl Query { @@ -107,24 +103,43 @@ impl Query { /// This Query's value. #[inline(always)] pub fn value(&self) -> Option<&Value> { - self.inner.value.as_ref() + self.value.as_ref() + } + + /// This Query's value. + #[inline(always)] + pub fn value_mut(&mut self) -> Option<&mut Value> { + self.value.as_mut() } /// This Query's payload. #[inline(always)] pub fn payload(&self) -> Option<&ZBytes> { - self.inner.value.as_ref().map(|v| &v.payload) + self.value.as_ref().map(|v| &v.payload) + } + + /// This Query's payload. + #[inline(always)] + pub fn payload_mut(&mut self) -> Option<&mut ZBytes> { + self.value.as_mut().map(|v| &mut v.payload) } /// This Query's encoding. #[inline(always)] pub fn encoding(&self) -> Option<&Encoding> { - self.inner.value.as_ref().map(|v| &v.encoding) + self.value.as_ref().map(|v| &v.encoding) } + /// This Query's attachment. #[zenoh_macros::unstable] pub fn attachment(&self) -> Option<&ZBytes> { - self.inner.attachment.as_ref() + self.attachment.as_ref() + } + + /// This Query's attachment. + #[zenoh_macros::unstable] + pub fn attachment_mut(&mut self) -> Option<&mut ZBytes> { + self.attachment.as_mut() } /// Sends a reply in the form of [`Sample`] to this Query. diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 2551a2a0d9..f70f024677 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -306,6 +306,12 @@ impl Sample { &self.payload } + /// Gets the payload of this Sample. + #[inline] + pub fn payload_mut(&mut self) -> &mut ZBytes { + &mut self.payload + } + /// Gets the kind of this Sample. #[inline] pub fn kind(&self) -> SampleKind { @@ -352,6 +358,13 @@ impl Sample { pub fn attachment(&self) -> Option<&ZBytes> { self.attachment.as_ref() } + + /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. + #[zenoh_macros::unstable] + #[inline] + pub fn attachment_mut(&mut self) -> Option<&mut ZBytes> { + self.attachment.as_mut() + } } impl From for Value { diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 2e718ecccb..018a3a085e 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -1806,10 +1806,6 @@ impl Session { let query_inner = Arc::new(QueryInner { key_expr, parameters: parameters.to_owned().into(), - value: body.map(|b| Value { - payload: b.payload.into(), - encoding: b.encoding.into(), - }), qid, zid, primitives: if local { @@ -1817,13 +1813,17 @@ impl Session { } else { primitives }, - #[cfg(feature = "unstable")] - attachment, }); for (eid, callback) in queryables { callback(Query { inner: query_inner.clone(), eid, + value: body.as_ref().map(|b| Value { + payload: b.payload.clone().into(), + encoding: b.encoding.clone().into(), + }), + #[cfg(feature = "unstable")] + attachment: attachment.clone(), }); } } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 58e17fc2ea..caf961984b 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -379,20 +379,34 @@ pub mod internal { #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub mod shm { pub use zenoh_shm::api::{ - client_storage::SharedMemoryClientStorage, + buffer::{ + zshm::{zshm, ZShm}, + zshmmut::{zshmmut, ZShmMut}, + }, + client::{ + shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, + }, + client_storage::{SharedMemoryClientStorage, GLOBAL_CLIENT_STORAGE}, + common::types::{ChunkID, ProtocolID, SegmentID}, protocol_implementations::posix::{ - posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + posix_shared_memory_client::PosixSharedMemoryClient, + posix_shared_memory_provider_backend::{ + LayoutedPosixSharedMemoryProviderBackendBuilder, PosixSharedMemoryProviderBackend, + PosixSharedMemoryProviderBackendBuilder, + }, protocol_id::POSIX_PROTOCOL_ID, }, provider::{ shared_memory_provider::{ - BlockOn, Deallocate, Defragment, GarbageCollect, SharedMemoryProviderBuilder, + AllocBuilder, AllocLayout, AllocLayoutAlignedBuilder, AllocLayoutBuilder, + AllocLayoutSizedBuilder, AllocPolicy, AsyncAllocPolicy, BlockOn, DeallocEldest, + DeallocOptimal, DeallocYoungest, Deallocate, Defragment, DynamicProtocolID, + ForceDeallocPolicy, GarbageCollect, JustAlloc, ProtocolIDSource, + SharedMemoryProvider, SharedMemoryProviderBuilder, + SharedMemoryProviderBuilderBackendID, SharedMemoryProviderBuilderID, + StaticProtocolID, }, - types::{AllocAlignment, MemoryLayout}, - }, - slice::{ - zsliceshm::{zsliceshm, ZSliceShm}, - zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, + types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, }, }; } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 8b53692ead..62f6b7c8b4 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -466,14 +466,14 @@ impl Primitives for AdminSpace { inner: Arc::new(QueryInner { key_expr: key_expr.clone(), parameters: query.parameters.into(), - value: query.ext_body.map(|b| Value::new(b.payload, b.encoding)), qid: msg.id, zid, primitives, - #[cfg(feature = "unstable")] - attachment: query.ext_attachment.map(Into::into), }), eid: self.queryable_id, + value: query.ext_body.map(|b| Value::new(b.payload, b.encoding)), + #[cfg(feature = "unstable")] + attachment: query.ext_attachment.map(Into::into), }; for (key, handler) in &self.handlers { diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 54418d9f78..2ed94e6f47 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -26,7 +26,7 @@ // Reexport API in flat namespace pub(crate) mod flat { - #[cfg(feature = "shared-memory")] + #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub use crate::shm::*; pub use crate::{ buffers::*, @@ -51,7 +51,7 @@ pub(crate) mod flat { // Reexport API in hierarchical namespace pub(crate) mod mods { - #[cfg(feature = "shared-memory")] + #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub use crate::shm; pub use crate::{ buffers, bytes, config, core, encoding, handlers, key_expr, publication, query, queryable, diff --git a/zenoh/tests/bytes.rs b/zenoh/tests/bytes.rs new file mode 100644 index 0000000000..6de12ab63f --- /dev/null +++ b/zenoh/tests/bytes.rs @@ -0,0 +1,69 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +#[test] +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +fn shm_bytes_single_buf() { + use zenoh::prelude::*; + + // create an SHM backend... + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(4096) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Prepare a layout for allocations + let layout = provider.alloc_layout().size(1024).res().unwrap(); + + // allocate an SHM buffer (ZShmMut) + let owned_shm_buf_mut = layout.alloc().res().unwrap(); + + // convert into immutable owned buffer (ZShmMut -> ZSlceShm) + let owned_shm_buf: ZShm = owned_shm_buf_mut.into(); + + // convert again into mutable owned buffer (ZShm -> ZSlceShmMut) + let owned_shm_buf_mut: ZShmMut = owned_shm_buf.try_into().unwrap(); + + // build a ZBytes from an SHM buffer (ZShmMut -> ZBytes) + let mut payload: ZBytes = owned_shm_buf_mut.into(); + + // branch to illustrate immutable access to SHM data + { + // deserialize ZBytes as an immutably borrowed zshm (ZBytes -> &zshm) + let borrowed_shm_buf: &zshm = payload.deserialize().unwrap(); + + // construct owned buffer from borrowed type (&zshm -> ZShm) + let owned = borrowed_shm_buf.to_owned(); + + // try to construct mutable ZShmMut (ZShm -> ZShmMut) + let owned_mut: Result = owned.try_into(); + // the attempt fails because ZShm has two existing references ('owned' and inside 'payload') + assert!(owned_mut.is_err()) + } + + // branch to illustrate mutable access to SHM data + { + // deserialize ZBytes as mutably borrowed zshm (ZBytes -> &mut zshm) + let borrowed_shm_buf: &mut zshm = payload.deserialize_mut().unwrap(); + + // convert zshm to zshmmut (&mut zshm -> &mut zshmmut) + let _borrowed_shm_buf_mut: &mut zshmmut = borrowed_shm_buf.try_into().unwrap(); + } +} diff --git a/zenoh/tests/payload.rs b/zenoh/tests/payload.rs deleted file mode 100644 index fecf10a608..0000000000 --- a/zenoh/tests/payload.rs +++ /dev/null @@ -1,86 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -#[test] -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -fn shm_payload_single_buf() { - use zenoh::prelude::*; - - // create an SHM backend... - let backend = PosixSharedMemoryProviderBackend::builder() - .with_size(4096) - .unwrap() - .res() - .unwrap(); - // ...and an SHM provider - let provider = SharedMemoryProviderBuilder::builder() - .protocol_id::() - .backend(backend) - .res(); - - // Prepare a layout for allocations - let layout = provider.alloc_layout().size(1024).res().unwrap(); - - // allocate an SHM buffer - let mut owned_shm_buf_mut = layout.alloc().res().unwrap(); - - // get data - let _data: &[u8] = &owned_shm_buf_mut; - let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; - - // convert into immutable owned buffer - let owned_shm_buf: ZSliceShm = owned_shm_buf_mut.into(); - - // get data - let _data: &[u8] = &owned_shm_buf; - - // convert again into mutable owned buffer - let mut owned_shm_buf_mut: ZSliceShmMut = owned_shm_buf.try_into().unwrap(); - - // get data - let _data: &[u8] = &owned_shm_buf_mut; - let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; - - // build a ZBytes from an SHM buffer - let mut payload: ZBytes = owned_shm_buf_mut.into(); - - { - // deserialize ZBytes as borrowed zsliceshm - let borrowed_shm_buf: &zsliceshm = payload.deserialize().unwrap(); - - // get data - let _data: &[u8] = borrowed_shm_buf; - - // construct owned buffer from borrowed type - let owned = borrowed_shm_buf.to_owned(); - - // get data - let _data: &[u8] = &owned; - } - - { - // deserialize ZBytes as mutably borrowed zsliceshm - let borrowed_shm_buf: &mut zsliceshm = payload.deserialize_mut().unwrap(); - - // get data - let _data: &[u8] = borrowed_shm_buf; - - // convert zsliceshm to zsliceshmmut - let borrowed_shm_buf_mut: &mut zsliceshmmut = borrowed_shm_buf.try_into().unwrap(); - - // get data - let _data: &[u8] = borrowed_shm_buf_mut; - let _data_mut: &mut [u8] = borrowed_shm_buf_mut; - } -} From 6338c634d9ac7e0649cffc7e2eec5a8329c13419 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Mon, 6 May 2024 16:19:40 +0300 Subject: [PATCH 326/598] Refine SHM alloc API --- .../api/provider/shared_memory_provider.rs | 194 ++++++++++++------ commons/zenoh-shm/src/api/provider/types.rs | 26 +++ examples/examples/z_alloc_shm.rs | 53 ++++- examples/examples/z_bytes_shm.rs | 9 +- examples/examples/z_get_shm.rs | 6 +- examples/examples/z_ping_shm.rs | 9 +- examples/examples/z_pub_shm.rs | 2 +- examples/examples/z_pub_shm_thr.rs | 9 +- examples/examples/z_queryable_shm.rs | 6 +- io/zenoh-transport/tests/unicast_shm.rs | 2 +- zenoh/src/api/bytes.rs | 2 +- zenoh/src/lib.rs | 14 +- zenoh/tests/bytes.rs | 2 +- zenoh/tests/shm.rs | 2 +- 14 files changed, 212 insertions(+), 124 deletions(-) diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs index 1ca560f07e..658c96e162 100644 --- a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs +++ b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs @@ -25,7 +25,10 @@ use zenoh_result::ZResult; use super::{ chunk::{AllocatedChunk, ChunkDescriptor}, shared_memory_provider_backend::SharedMemoryProviderBackend, - types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, + types::{ + AllocAlignment, BufAllocResult, BufLayoutAllocResult, ChunkAllocResult, MemoryLayout, + ZAllocError, ZLayoutAllocError, ZLayoutError, + }, }; use crate::{ api::{buffer::zshmmut::ZShmMut, common::types::ProtocolID}, @@ -64,83 +67,69 @@ impl BusyChunk { } } -/// Builder to create AllocLayout -#[zenoh_macros::unstable_doc] -pub struct AllocLayoutBuilder<'a, IDSource, Backend> +struct AllocData<'a, IDSource, Backend> where IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend, { + size: usize, + alignment: AllocAlignment, provider: &'a SharedMemoryProvider, } -impl<'a, IDSource, Backend> AllocLayoutBuilder<'a, IDSource, Backend> -where - IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, -{ - /// Set size for layout - #[zenoh_macros::unstable_doc] - pub fn size(self, size: usize) -> AllocLayoutSizedBuilder<'a, IDSource, Backend> { - AllocLayoutSizedBuilder { - provider: self.provider, - size, - } - } -} #[zenoh_macros::unstable_doc] -pub struct AllocLayoutSizedBuilder<'a, IDSource, Backend> +pub struct AllocLayoutSizedBuilder<'a, IDSource, Backend>(AllocData<'a, IDSource, Backend>) where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, -{ - provider: &'a SharedMemoryProvider, - size: usize, -} + Backend: SharedMemoryProviderBackend; + impl<'a, IDSource, Backend> AllocLayoutSizedBuilder<'a, IDSource, Backend> where IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend, { - /// Set alignment for layout + fn new(provider: &'a SharedMemoryProvider, size: usize) -> Self { + Self(AllocData { + provider, + size, + alignment: AllocAlignment::default(), + }) + } + + /// Set alignment #[zenoh_macros::unstable_doc] - pub fn alignment( - self, - alignment: AllocAlignment, - ) -> AllocLayoutAlignedBuilder<'a, IDSource, Backend> { - AllocLayoutAlignedBuilder { - provider: self.provider, - size: self.size, + pub fn with_alignment(self, alignment: AllocAlignment) -> Self { + Self(AllocData { + provider: self.0.provider, + size: self.0.size, alignment, - } + }) } - /// try to build an allocation layout + /// Try to build an allocation layout #[zenoh_macros::unstable_doc] - pub fn res(self) -> ZResult> { - AllocLayout::new(self.size, AllocAlignment::default(), self.provider) + pub fn make_layout(self) -> Result, ZLayoutError> { + AllocLayout::new(self.0) } -} -#[zenoh_macros::unstable_doc] -pub struct AllocLayoutAlignedBuilder<'a, IDSource, Backend> -where - IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, -{ - provider: &'a SharedMemoryProvider, - size: usize, - alignment: AllocAlignment, -} -impl<'a, IDSource, Backend> AllocLayoutAlignedBuilder<'a, IDSource, Backend> -where - IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, -{ - /// Try to build layout with specified args + /// Set the allocation policy #[zenoh_macros::unstable_doc] - pub fn res(self) -> ZResult> { - AllocLayout::new(self.size, self.alignment, self.provider) + pub fn with_policy(self) -> AllocBuilder2<'a, IDSource, Backend, Policy> { + AllocBuilder2 { + data: self.0, + _phantom: PhantomData, + } + } + + /// Get the result + #[zenoh_macros::unstable_doc] + pub fn res(self) -> BufLayoutAllocResult { + let builder = AllocBuilder2::<'a, IDSource, Backend, JustAlloc> { + data: self.0, + _phantom: PhantomData, + }; + + builder.res() } } @@ -173,24 +162,25 @@ where } } - fn new( - size: usize, - alignment: AllocAlignment, - provider: &'a SharedMemoryProvider, - ) -> ZResult { + fn new(data: AllocData<'a, IDSource, Backend>) -> Result { // NOTE: Depending on internal implementation, provider's backend might relayout // the allocations for bigger alignment (ex. 4-byte aligned allocation to 8-bytes aligned) // Create layout for specified arguments - let layout = MemoryLayout::new(size, alignment)?; + let layout = MemoryLayout::new(data.size, data.alignment) + .map_err(|_| ZLayoutError::IncorrectLayoutArgs)?; // Obtain provider's layout for our layout - let provider_layout = provider.backend.layout_for(layout)?; + let provider_layout = data + .provider + .backend + .layout_for(layout) + .map_err(|_| ZLayoutError::ProviderIncompatibleLayout)?; Ok(Self { - size, + size: data.size, provider_layout, - provider, + provider: data.provider, }) } } @@ -511,6 +501,75 @@ unsafe impl<'a, Policy: AllocPolicy, IDSource, Backend: SharedMemoryProviderBack } }*/ +/// Builder for allocations +#[zenoh_macros::unstable_doc] +pub struct AllocBuilder2< + 'a, + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, + Policy = JustAlloc, +> { + data: AllocData<'a, IDSource, Backend>, + _phantom: PhantomData, +} + +// Generic impl +impl<'a, IDSource, Backend, Policy> AllocBuilder2<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// Set the allocation policy + #[zenoh_macros::unstable_doc] + pub fn with_policy(self) -> AllocBuilder2<'a, IDSource, Backend, OtherPolicy> { + AllocBuilder2 { + data: self.data, + _phantom: PhantomData, + } + } +} + +// Alloc policy +impl<'a, IDSource, Backend, Policy> AllocBuilder2<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, + Policy: AllocPolicy, +{ + /// Get the result + #[zenoh_macros::unstable_doc] + pub fn res(self) -> BufLayoutAllocResult { + let layout = AllocLayout::new(self.data).map_err(ZLayoutAllocError::Layout)?; + + layout + .alloc() + .with_policy::() + .res() + .map_err(ZLayoutAllocError::Alloc) + } +} + +// Async Alloc policy +impl<'a, IDSource, Backend, Policy> AllocBuilder2<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource + Send + Sync, + Backend: SharedMemoryProviderBackend + Sync, + Policy: AsyncAllocPolicy, +{ + /// Get the async result + #[zenoh_macros::unstable_doc] + pub async fn res_async(self) -> BufLayoutAllocResult { + let layout = AllocLayout::new(self.data).map_err(ZLayoutAllocError::Layout)?; + + layout + .alloc() + .with_policy::() + .res_async() + .await + .map_err(ZLayoutAllocError::Alloc) + } +} + /// Builder for allocations #[zenoh_macros::unstable_doc] pub struct AllocBuilder< @@ -699,11 +758,10 @@ where IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend, { - /// Create layout builder associated with particular SharedMemoryProvider. - /// Layout is a rich interface to make allocations + /// Rich interface for making allocations #[zenoh_macros::unstable_doc] - pub fn alloc_layout(&self) -> AllocLayoutBuilder { - AllocLayoutBuilder { provider: self } + pub fn alloc(&self, size: usize) -> AllocLayoutSizedBuilder { + AllocLayoutSizedBuilder::new(self, size) } /// Defragment memory diff --git a/commons/zenoh-shm/src/api/provider/types.rs b/commons/zenoh-shm/src/api/provider/types.rs index beae24bfb7..6e8ced7fc8 100644 --- a/commons/zenoh-shm/src/api/provider/types.rs +++ b/commons/zenoh-shm/src/api/provider/types.rs @@ -163,6 +163,17 @@ impl MemoryLayout { } } +/// Layouting errors +/// +/// IncorrectLayoutArgs: layout arguments are incorrect +/// ProviderIncompatibleLayout: layout incompatible with provider +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub enum ZLayoutError { + IncorrectLayoutArgs, + ProviderIncompatibleLayout, +} + /// SHM chunk allocation result #[zenoh_macros::unstable_doc] pub type ChunkAllocResult = Result; @@ -170,3 +181,18 @@ pub type ChunkAllocResult = Result; /// SHM buffer allocation result #[zenoh_macros::unstable_doc] pub type BufAllocResult = Result; + +/// Layouting and allocation errors +/// +/// Alloc: allocation error +/// Layout: layouting error +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub enum ZLayoutAllocError { + Alloc(ZAllocError), + Layout(ZLayoutError), +} + +/// SHM buffer layouting and allocation result +#[zenoh_macros::unstable_doc] +pub type BufLayoutAllocResult = Result; diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index 93df5d821d..d7d19de7f8 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -34,20 +34,51 @@ async fn run() -> ZResult<()> { .backend(backend) .res(); + // There are two API-defined ways of making shm buffer allocations: direct and through the layout... + + // Direct allocation + // The direct allocation calcualtes all layouting checks on each allocation. It is good for making + // uniquely-layouted allocations. For making series of similar allocations, please refer to layout + // allocation API which is shown later in this example... + let _direct_allocation = { + // OPTION: Simple allocation + let simple = provider.alloc(512).res().unwrap(); + + // OPTION: Allocation with custom alignemnt and alloc policy customization + let _comprehensive = provider + .alloc(512) + .with_alignment(AllocAlignment::new(2)) + // for more examples on policies, please see allocation policy usage below (for layout allocation API) + .with_policy::() + .res() + .unwrap(); + + // OPTION: Allocation with custom alignemnt and async alloc policy + let _async = provider + .alloc(512) + .with_alignment(AllocAlignment::new(2)) + // for more examples on policies, please see allocation policy usage below (for layout allocation API) + .with_policy::>>() + .res_async() + .await + .unwrap(); + + simple + }; + // Create a layout for particular allocation arguments and particular SHM provider // The layout is validated for argument correctness and also is checked // against particular SHM provider's layouting capabilities. // This layout is reusable and can handle series of similar allocations let buffer_layout = { - // OPTION 1: Simple (default) configuration: - let simple_layout = provider.alloc_layout().size(512).res().unwrap(); + // OPTION: Simple configuration: + let simple_layout = provider.alloc(512).make_layout().unwrap(); - // OPTION 2: Comprehensive configuration: + // OPTION: Comprehensive configuration: let _comprehensive_layout = provider - .alloc_layout() - .size(512) - .alignment(AllocAlignment::new(2)) - .res() + .alloc(512) + .with_alignment(AllocAlignment::new(2)) + .make_layout() .unwrap(); simple_layout @@ -69,10 +100,10 @@ async fn run() -> ZResult<()> { let mut sbuf = async { // Some examples on how to use layout's interface: - // The default allocation with default JustAlloc policy + // OPTION: The default allocation with default JustAlloc policy let default_alloc = buffer_layout.alloc().res().unwrap(); - // The async allocation + // OPTION: The async allocation let _async_alloc = buffer_layout .alloc() .with_policy::() @@ -80,14 +111,14 @@ async fn run() -> ZResult<()> { .await .unwrap(); - // The comprehensive allocation policy that blocks if provider is not able to allocate + // OPTION: The comprehensive allocation policy that blocks if provider is not able to allocate let _comprehensive_alloc = buffer_layout .alloc() .with_policy::>>() .res() .unwrap(); - // The comprehensive allocation policy that deallocates up to 1000 buffers if provider is not able to allocate + // OPTION: The comprehensive allocation policy that deallocates up to 1000 buffers if provider is not able to allocate let _comprehensive_alloc = buffer_layout .alloc() .with_policy::>>() diff --git a/examples/examples/z_bytes_shm.rs b/examples/examples/z_bytes_shm.rs index 5c582e56e6..970ff2bae4 100644 --- a/examples/examples/z_bytes_shm.rs +++ b/examples/examples/z_bytes_shm.rs @@ -36,14 +36,7 @@ fn main() { // Allocate an SHM buffer // NOTE: For allocation API please check z_alloc_shm.rs example // NOTE: For buf's API please check z_bytes_shm.rs example - let mut owned_shm_buf_mut = provider - .alloc_layout() - .size(1024) - .res() - .unwrap() - .alloc() - .res() - .unwrap(); + let mut owned_shm_buf_mut = provider.alloc(1024).res().unwrap(); // mutable and immutable API let _data: &[u8] = &owned_shm_buf_mut; diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 39caf3a101..19e66b09f8 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -53,11 +53,7 @@ async fn main() { // NOTE: For buf's API please check z_bytes_shm.rs example println!("Allocating Shared Memory Buffer..."); let mut sbuf = provider - .alloc_layout() - .size(1024) - .res() - .unwrap() - .alloc() + .alloc(1024) .with_policy::>() .res_async() .await diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 4c3ad4ed40..c53669fc44 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -61,14 +61,7 @@ fn main() { // Allocate an SHM buffer // NOTE: For allocation API please check z_alloc_shm.rs example // NOTE: For buf's API please check z_bytes_shm.rs example - let buf = provider - .alloc_layout() - .size(size) - .res() - .unwrap() - .alloc() - .res() - .unwrap(); + let buf = provider.alloc(size).res().unwrap(); // convert ZShmMut into ZSlice as ZShmMut does not support Clone let buf: ZSlice = buf.into(); diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index d2a87a59cc..f07341a088 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -50,7 +50,7 @@ async fn main() -> Result<(), ZError> { // Create allocation layout for series of similar allocations println!("Allocating Shared Memory Buffer..."); - let layout = provider.alloc_layout().size(1024).res().unwrap(); + let layout = provider.alloc(1024).make_layout().unwrap(); println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 0d44fbe6ee..47b54b0589 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -44,14 +44,7 @@ async fn main() { // Allocate an SHM buffer // NOTE: For allocation API please check z_alloc_shm.rs example // NOTE: For buf's API please check z_bytes_shm.rs example - let mut buf = provider - .alloc_layout() - .size(size) - .res() - .unwrap() - .alloc() - .res() - .unwrap(); + let mut buf = provider.alloc(size).res().unwrap(); for b in buf.as_mut() { *b = rand::random::(); diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs index ed2320d2c5..685b162a5a 100644 --- a/examples/examples/z_queryable_shm.rs +++ b/examples/examples/z_queryable_shm.rs @@ -73,11 +73,7 @@ async fn main() { // NOTE: For buf's API please check z_bytes_shm.rs example println!("Allocating Shared Memory Buffer..."); let mut sbuf = provider - .alloc_layout() - .size(1024) - .res() - .unwrap() - .alloc() + .alloc(1024) .with_policy::>() .res_async() .await diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index f7b884f6b9..981b856235 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -241,7 +241,7 @@ mod tests { ztimeout!(peer_shm01_manager.get_transport_unicast(&peer_net01)).unwrap(); assert!(!peer_net01_transport.is_shm().unwrap()); - let layout = shm01.alloc_layout().size(MSG_SIZE).res().unwrap(); + let layout = shm01.alloc(MSG_SIZE).make_layout().unwrap(); // Send the message println!("Transport SHM [3a]"); diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 98afd1a3c3..3857019215 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -1961,7 +1961,7 @@ mod tests { .res(); // Prepare a layout for allocations - let layout = provider.alloc_layout().size(1024).res().unwrap(); + let layout = provider.alloc(1024).make_layout().unwrap(); // allocate an SHM buffer let mutable_shm_buf = layout.alloc().res().unwrap(); diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index caf961984b..e48388f5e7 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -398,15 +398,17 @@ pub mod shm { }, provider::{ shared_memory_provider::{ - AllocBuilder, AllocLayout, AllocLayoutAlignedBuilder, AllocLayoutBuilder, - AllocLayoutSizedBuilder, AllocPolicy, AsyncAllocPolicy, BlockOn, DeallocEldest, - DeallocOptimal, DeallocYoungest, Deallocate, Defragment, DynamicProtocolID, - ForceDeallocPolicy, GarbageCollect, JustAlloc, ProtocolIDSource, - SharedMemoryProvider, SharedMemoryProviderBuilder, + AllocBuilder, AllocBuilder2, AllocLayout, AllocLayoutSizedBuilder, AllocPolicy, + AsyncAllocPolicy, BlockOn, DeallocEldest, DeallocOptimal, DeallocYoungest, + Deallocate, Defragment, DynamicProtocolID, ForceDeallocPolicy, GarbageCollect, + JustAlloc, ProtocolIDSource, SharedMemoryProvider, SharedMemoryProviderBuilder, SharedMemoryProviderBuilderBackendID, SharedMemoryProviderBuilderID, StaticProtocolID, }, - types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, + types::{ + AllocAlignment, BufAllocResult, BufLayoutAllocResult, ChunkAllocResult, + MemoryLayout, ZAllocError, ZLayoutAllocError, ZLayoutError, + }, }, }; } diff --git a/zenoh/tests/bytes.rs b/zenoh/tests/bytes.rs index 6de12ab63f..0f26625fba 100644 --- a/zenoh/tests/bytes.rs +++ b/zenoh/tests/bytes.rs @@ -30,7 +30,7 @@ fn shm_bytes_single_buf() { .res(); // Prepare a layout for allocations - let layout = provider.alloc_layout().size(1024).res().unwrap(); + let layout = provider.alloc(1024).make_layout().unwrap(); // allocate an SHM buffer (ZShmMut) let owned_shm_buf_mut = layout.alloc().res().unwrap(); diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 14f6985414..9c71126138 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -122,7 +122,7 @@ mod tests { let shm_segment_size = shm01.available(); // Prepare a layout for allocations - let layout = shm01.alloc_layout().size(size).res().unwrap(); + let layout = shm01.alloc(size).make_layout().unwrap(); // Put data println!("[PS][03b] Putting on peer02 session. {MSG_COUNT} msgs of {size} bytes."); From c2bbe56cf23d2ea85a181a078d07b45c6b113d6a Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 7 May 2024 15:59:57 +0300 Subject: [PATCH 327/598] Implement Wait and IntoFuture for SHM allocators --- .../api/provider/shared_memory_provider.rs | 132 +++++++++++------- examples/examples/z_alloc_shm.rs | 16 +-- examples/examples/z_bytes_shm.rs | 10 +- examples/examples/z_get_shm.rs | 1 - examples/examples/z_ping_shm.rs | 2 +- examples/examples/z_pub_shm.rs | 3 +- examples/examples/z_pub_shm_thr.rs | 2 +- examples/examples/z_queryable_shm.rs | 1 - io/zenoh-transport/tests/unicast_shm.rs | 16 +-- zenoh/src/api/bytes.rs | 6 +- zenoh/tests/bytes.rs | 2 +- zenoh/tests/shm.rs | 9 +- 12 files changed, 109 insertions(+), 91 deletions(-) diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs index 658c96e162..811ff9ec57 100644 --- a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs +++ b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs @@ -14,12 +14,15 @@ use std::{ collections::VecDeque, + future::{Future, IntoFuture}, marker::PhantomData, + pin::Pin, sync::{atomic::Ordering, Arc, Mutex}, time::Duration, }; use async_trait::async_trait; +use zenoh_core::{Resolvable, Wait}; use zenoh_result::ZResult; use super::{ @@ -108,7 +111,7 @@ where /// Try to build an allocation layout #[zenoh_macros::unstable_doc] - pub fn make_layout(self) -> Result, ZLayoutError> { + pub fn into_layout(self) -> Result, ZLayoutError> { AllocLayout::new(self.0) } @@ -120,16 +123,29 @@ where _phantom: PhantomData, } } +} - /// Get the result - #[zenoh_macros::unstable_doc] - pub fn res(self) -> BufLayoutAllocResult { +#[zenoh_macros::unstable_doc] +impl<'a, IDSource, Backend> Resolvable for AllocLayoutSizedBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + type To = BufLayoutAllocResult; +} + +// Sync alloc policy +impl<'a, IDSource, Backend> Wait for AllocLayoutSizedBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + fn wait(self) -> ::To { let builder = AllocBuilder2::<'a, IDSource, Backend, JustAlloc> { data: self.0, _phantom: PhantomData, }; - - builder.res() + builder.wait() } } @@ -262,10 +278,7 @@ pub trait AllocPolicy { #[zenoh_macros::unstable_doc] #[async_trait] pub trait AsyncAllocPolicy { - async fn alloc_async< - IDSource: ProtocolIDSource + Send + Sync, - Backend: SharedMemoryProviderBackend + Sync, - >( + async fn alloc_async( layout: &MemoryLayout, provider: &SharedMemoryProvider, ) -> ChunkAllocResult; @@ -403,13 +416,14 @@ where { _phantom: PhantomData, } + #[async_trait] impl AsyncAllocPolicy for BlockOn where InnerPolicy: AllocPolicy, { async fn alloc_async< - IDSource: ProtocolIDSource + Send + Sync, + IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend + Sync, >( layout: &MemoryLayout, @@ -529,44 +543,54 @@ where } } -// Alloc policy -impl<'a, IDSource, Backend, Policy> AllocBuilder2<'a, IDSource, Backend, Policy> +impl<'a, IDSource, Backend, Policy> Resolvable for AllocBuilder2<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + type To = BufLayoutAllocResult; +} + +// Sync alloc policy +impl<'a, IDSource, Backend, Policy> Wait for AllocBuilder2<'a, IDSource, Backend, Policy> where IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend, Policy: AllocPolicy, { - /// Get the result - #[zenoh_macros::unstable_doc] - pub fn res(self) -> BufLayoutAllocResult { + fn wait(self) -> ::To { let layout = AllocLayout::new(self.data).map_err(ZLayoutAllocError::Layout)?; layout .alloc() .with_policy::() - .res() + .wait() .map_err(ZLayoutAllocError::Alloc) } } -// Async Alloc policy -impl<'a, IDSource, Backend, Policy> AllocBuilder2<'a, IDSource, Backend, Policy> +// Async alloc policy +impl<'a, IDSource, Backend, Policy> IntoFuture for AllocBuilder2<'a, IDSource, Backend, Policy> where - IDSource: ProtocolIDSource + Send + Sync, + IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend + Sync, Policy: AsyncAllocPolicy, { - /// Get the async result - #[zenoh_macros::unstable_doc] - pub async fn res_async(self) -> BufLayoutAllocResult { - let layout = AllocLayout::new(self.data).map_err(ZLayoutAllocError::Layout)?; - - layout - .alloc() - .with_policy::() - .res_async() - .await - .map_err(ZLayoutAllocError::Alloc) + type Output = ::To; + type IntoFuture = Pin::To> + 'a>>; + + fn into_future(self) -> Self::IntoFuture { + Box::pin( + async move { + let layout = AllocLayout::new(self.data).map_err(ZLayoutAllocError::Layout)?; + layout + .alloc() + .with_policy::() + .await + .map_err(ZLayoutAllocError::Alloc) + } + .into_future(), + ) } } @@ -598,36 +622,48 @@ where } } -// Alloc policy -impl<'a, IDSource, Backend, Policy> AllocBuilder<'a, IDSource, Backend, Policy> +impl<'a, IDSource, Backend, Policy> Resolvable for AllocBuilder<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + type To = BufAllocResult; +} + +// Sync alloc policy +impl<'a, IDSource, Backend, Policy> Wait for AllocBuilder<'a, IDSource, Backend, Policy> where IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend, Policy: AllocPolicy, { - /// Get the result - #[zenoh_macros::unstable_doc] - pub fn res(self) -> BufAllocResult { + fn wait(self) -> ::To { self.layout .provider .alloc_inner::(self.layout.size, &self.layout.provider_layout) } } -// Async Alloc policy -impl<'a, IDSource, Backend, Policy> AllocBuilder<'a, IDSource, Backend, Policy> +// Async alloc policy +impl<'a, IDSource, Backend, Policy> IntoFuture for AllocBuilder<'a, IDSource, Backend, Policy> where - IDSource: ProtocolIDSource + Send + Sync, + IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend + Sync, Policy: AsyncAllocPolicy, { - /// Get the async result - #[zenoh_macros::unstable_doc] - pub async fn res_async(self) -> BufAllocResult { - self.layout - .provider - .alloc_inner_async::(self.layout.size, &self.layout.provider_layout) - .await + type Output = ::To; + type IntoFuture = Pin::To> + 'a>>; + + fn into_future(self) -> Self::IntoFuture { + Box::pin( + async move { + self.layout + .provider + .alloc_inner_async::(self.layout.size, &self.layout.provider_layout) + .await + } + .into_future(), + ) } } @@ -703,7 +739,7 @@ where /// Trait to create ProtocolID sources for SharedMemoryProvider #[zenoh_macros::unstable_doc] -pub trait ProtocolIDSource { +pub trait ProtocolIDSource: Send + Sync { fn id(&self) -> ProtocolID; } @@ -938,7 +974,7 @@ where // PRIVATE impls for Sync backend impl SharedMemoryProvider where - IDSource: ProtocolIDSource + Send + Sync, + IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend + Sync, { async fn alloc_inner_async( diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index d7d19de7f8..297626fc73 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -42,7 +42,7 @@ async fn run() -> ZResult<()> { // allocation API which is shown later in this example... let _direct_allocation = { // OPTION: Simple allocation - let simple = provider.alloc(512).res().unwrap(); + let simple = provider.alloc(512).wait().unwrap(); // OPTION: Allocation with custom alignemnt and alloc policy customization let _comprehensive = provider @@ -50,7 +50,7 @@ async fn run() -> ZResult<()> { .with_alignment(AllocAlignment::new(2)) // for more examples on policies, please see allocation policy usage below (for layout allocation API) .with_policy::() - .res() + .wait() .unwrap(); // OPTION: Allocation with custom alignemnt and async alloc policy @@ -59,7 +59,6 @@ async fn run() -> ZResult<()> { .with_alignment(AllocAlignment::new(2)) // for more examples on policies, please see allocation policy usage below (for layout allocation API) .with_policy::>>() - .res_async() .await .unwrap(); @@ -72,13 +71,13 @@ async fn run() -> ZResult<()> { // This layout is reusable and can handle series of similar allocations let buffer_layout = { // OPTION: Simple configuration: - let simple_layout = provider.alloc(512).make_layout().unwrap(); + let simple_layout = provider.alloc(512).into_layout().unwrap(); // OPTION: Comprehensive configuration: let _comprehensive_layout = provider .alloc(512) .with_alignment(AllocAlignment::new(2)) - .make_layout() + .into_layout() .unwrap(); simple_layout @@ -101,13 +100,12 @@ async fn run() -> ZResult<()> { // Some examples on how to use layout's interface: // OPTION: The default allocation with default JustAlloc policy - let default_alloc = buffer_layout.alloc().res().unwrap(); + let default_alloc = buffer_layout.alloc().wait().unwrap(); // OPTION: The async allocation let _async_alloc = buffer_layout .alloc() .with_policy::() - .res_async() .await .unwrap(); @@ -115,14 +113,14 @@ async fn run() -> ZResult<()> { let _comprehensive_alloc = buffer_layout .alloc() .with_policy::>>() - .res() + .wait() .unwrap(); // OPTION: The comprehensive allocation policy that deallocates up to 1000 buffers if provider is not able to allocate let _comprehensive_alloc = buffer_layout .alloc() .with_policy::>>() - .res() + .wait() .unwrap(); default_alloc diff --git a/examples/examples/z_bytes_shm.rs b/examples/examples/z_bytes_shm.rs index 970ff2bae4..60a50ba0d1 100644 --- a/examples/examples/z_bytes_shm.rs +++ b/examples/examples/z_bytes_shm.rs @@ -11,13 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::{ - bytes::ZBytes, - shm::{ - zshm, zshmmut, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, ZShm, - ZShmMut, POSIX_PROTOCOL_ID, - }, -}; +use zenoh::prelude::*; fn main() { // create an SHM backend... @@ -36,7 +30,7 @@ fn main() { // Allocate an SHM buffer // NOTE: For allocation API please check z_alloc_shm.rs example // NOTE: For buf's API please check z_bytes_shm.rs example - let mut owned_shm_buf_mut = provider.alloc(1024).res().unwrap(); + let mut owned_shm_buf_mut = provider.alloc(1024).wait().unwrap(); // mutable and immutable API let _data: &[u8] = &owned_shm_buf_mut; diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 19e66b09f8..05d5f6ae7a 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -55,7 +55,6 @@ async fn main() { let mut sbuf = provider .alloc(1024) .with_policy::>() - .res_async() .await .unwrap(); diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index c53669fc44..2e4d5f86f8 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -61,7 +61,7 @@ fn main() { // Allocate an SHM buffer // NOTE: For allocation API please check z_alloc_shm.rs example // NOTE: For buf's API please check z_bytes_shm.rs example - let buf = provider.alloc(size).res().unwrap(); + let buf = provider.alloc(size).wait().unwrap(); // convert ZShmMut into ZSlice as ZShmMut does not support Clone let buf: ZSlice = buf.into(); diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index f07341a088..d5a6c56a67 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -50,7 +50,7 @@ async fn main() -> Result<(), ZError> { // Create allocation layout for series of similar allocations println!("Allocating Shared Memory Buffer..."); - let layout = provider.alloc(1024).make_layout().unwrap(); + let layout = provider.alloc(1024).into_layout().unwrap(); println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { @@ -60,7 +60,6 @@ async fn main() -> Result<(), ZError> { let mut sbuf = layout .alloc() .with_policy::>() - .res_async() .await .unwrap(); diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 47b54b0589..d3e6d50181 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -44,7 +44,7 @@ async fn main() { // Allocate an SHM buffer // NOTE: For allocation API please check z_alloc_shm.rs example // NOTE: For buf's API please check z_bytes_shm.rs example - let mut buf = provider.alloc(size).res().unwrap(); + let mut buf = provider.alloc(size).wait().unwrap(); for b in buf.as_mut() { *b = rand::random::(); diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs index 685b162a5a..80bbafb076 100644 --- a/examples/examples/z_queryable_shm.rs +++ b/examples/examples/z_queryable_shm.rs @@ -75,7 +75,6 @@ async fn main() { let mut sbuf = provider .alloc(1024) .with_policy::>() - .res_async() .await .unwrap(); diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index 981b856235..1b2369e620 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -241,18 +241,15 @@ mod tests { ztimeout!(peer_shm01_manager.get_transport_unicast(&peer_net01)).unwrap(); assert!(!peer_net01_transport.is_shm().unwrap()); - let layout = shm01.alloc(MSG_SIZE).make_layout().unwrap(); + let layout = shm01.alloc(MSG_SIZE).into_layout().unwrap(); // Send the message println!("Transport SHM [3a]"); // The msg count for (msg_count, _) in (0..MSG_COUNT).enumerate() { // Create the message to send - let mut sbuf = ztimeout!(layout - .alloc() - .with_policy::>() - .res_async()) - .unwrap(); + let mut sbuf = + ztimeout!(layout.alloc().with_policy::>()).unwrap(); sbuf[0..8].copy_from_slice(&msg_count.to_le_bytes()); let message: NetworkMessage = Push { @@ -292,11 +289,8 @@ mod tests { // The msg count for (msg_count, _) in (0..MSG_COUNT).enumerate() { // Create the message to send - let mut sbuf = ztimeout!(layout - .alloc() - .with_policy::>() - .res_async()) - .unwrap(); + let mut sbuf = + ztimeout!(layout.alloc().with_policy::>()).unwrap(); sbuf[0..8].copy_from_slice(&msg_count.to_le_bytes()); let message: NetworkMessage = Push { diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 3857019215..4b6a8fc33b 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -1831,6 +1831,8 @@ mod tests { use rand::Rng; use zenoh_buffers::{ZBuf, ZSlice}; + #[cfg(all(feature = "shared-memory", feature = "unstable"))] + use zenoh_core::Wait; use zenoh_protocol::core::Properties; #[cfg(all(feature = "shared-memory", feature = "unstable"))] use zenoh_shm::api::{ @@ -1961,10 +1963,10 @@ mod tests { .res(); // Prepare a layout for allocations - let layout = provider.alloc(1024).make_layout().unwrap(); + let layout = provider.alloc(1024).into_layout().unwrap(); // allocate an SHM buffer - let mutable_shm_buf = layout.alloc().res().unwrap(); + let mutable_shm_buf = layout.alloc().wait().unwrap(); // convert to immutable SHM buffer let immutable_shm_buf: ZShm = mutable_shm_buf.into(); diff --git a/zenoh/tests/bytes.rs b/zenoh/tests/bytes.rs index 0f26625fba..504406f00c 100644 --- a/zenoh/tests/bytes.rs +++ b/zenoh/tests/bytes.rs @@ -30,7 +30,7 @@ fn shm_bytes_single_buf() { .res(); // Prepare a layout for allocations - let layout = provider.alloc(1024).make_layout().unwrap(); + let layout = provider.alloc(1024).into_layout().unwrap(); // allocate an SHM buffer (ZShmMut) let owned_shm_buf_mut = layout.alloc().res().unwrap(); diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 9c71126138..20fb04e813 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -122,17 +122,14 @@ mod tests { let shm_segment_size = shm01.available(); // Prepare a layout for allocations - let layout = shm01.alloc(size).make_layout().unwrap(); + let layout = shm01.alloc(size).into_layout().unwrap(); // Put data println!("[PS][03b] Putting on peer02 session. {MSG_COUNT} msgs of {size} bytes."); for c in 0..msg_count { // Allocate new message - let sbuf = ztimeout!(layout - .alloc() - .with_policy::>() - .res_async()) - .unwrap(); + let sbuf = + ztimeout!(layout.alloc().with_policy::>()).unwrap(); println!("{c} created"); // Publish this message From 416fb28f45efc88d01ef84fd25295a68f2d6e4c8 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 7 May 2024 16:24:39 +0300 Subject: [PATCH 328/598] fix clippy --- zenoh/tests/bytes.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/tests/bytes.rs b/zenoh/tests/bytes.rs index 504406f00c..039c1b1986 100644 --- a/zenoh/tests/bytes.rs +++ b/zenoh/tests/bytes.rs @@ -33,7 +33,7 @@ fn shm_bytes_single_buf() { let layout = provider.alloc(1024).into_layout().unwrap(); // allocate an SHM buffer (ZShmMut) - let owned_shm_buf_mut = layout.alloc().res().unwrap(); + let owned_shm_buf_mut = layout.alloc().wait().unwrap(); // convert into immutable owned buffer (ZShmMut -> ZSlceShm) let owned_shm_buf: ZShm = owned_shm_buf_mut.into(); From 86cf27d5df472545dc351729cb22a6f74a5ded1f Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Wed, 8 May 2024 15:17:17 +0300 Subject: [PATCH 329/598] fix port collision in tests --- zenoh/tests/shm.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 20fb04e813..3a0447fc92 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -176,7 +176,7 @@ mod tests { // Initiate logging zenoh_util::try_init_log_from_env(); - let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:17447"]).await; + let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:19447"]).await; test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; close_session(peer01, peer02).await; }); @@ -190,7 +190,7 @@ mod tests { zenoh_util::try_init_log_from_env(); let (peer01, peer02) = - open_session_multicast("udp/224.0.0.1:17448", "udp/224.0.0.1:17448").await; + open_session_multicast("udp/224.0.0.1:19448", "udp/224.0.0.1:19448").await; test_session_pubsub(&peer01, &peer02, Reliability::BestEffort).await; close_session(peer01, peer02).await; }); From cfb86a81bef94c6867d03d710424f3ec8256abd2 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 13 May 2024 18:21:48 +0200 Subject: [PATCH 330/598] Enhance subscribers, queryables and liveliness tokens propagation to improve scalability (#814) * Router implements interests protocol for clients * Send WireExpr in UndeclareSubscriber/UndeclareQueryable to clients for pico * Fix WireExprExt M flag encoding/decoding * Fix decl_key * Clients send all samples and queries to routers and peers * Avoid self declaration loop on interest * Fix query/replies copy/paste bugs * Peers implement interests protocol for clients * Don't send WireExpr in UndeclareSubscriber/UndeclareQueryable to clients * Add client writer-side filtering (#863) * Add client writer-side filtering * Reimplement liveliness with interests * Fix writer-side filtering before receiving FinalInterest * Fix pubsub interest based routing after router failover * Declare message can be Push/Request/RequestContinuous/Response * Address review comments * Remove F: Future flag from DeclareInterest * cargo fmt --all * Remove unused Interest flags field * Update doc * Remove unneeded interest_id field * Update commons/zenoh-protocol/src/network/declare.rs * Remove unused UndeclareInterest * Implement proper Declare Request/Response id correlation * Add new Interest network message * Update doc * Update codec * Fix stable build * Fix test_acl * Fix writer side filtering * Add separate functions to compute matching status * Fix unstable imports * Remove useless checks --------- Co-authored-by: Luca Cominardi --- commons/zenoh-codec/src/network/declare.rs | 6 +- commons/zenoh-codec/src/network/interest.rs | 4 +- commons/zenoh-protocol/src/network/declare.rs | 13 + .../zenoh-protocol/src/network/interest.rs | 2 +- zenoh/src/api/builders/publication.rs | 29 +- zenoh/src/api/publication.rs | 37 +- zenoh/src/api/session.rs | 276 ++++++---- zenoh/src/api/subscriber.rs | 4 +- zenoh/src/net/primitives/demux.rs | 2 +- zenoh/src/net/primitives/mod.rs | 4 + zenoh/src/net/primitives/mux.rs | 50 ++ zenoh/src/net/routing/dispatcher/face.rs | 102 +++- zenoh/src/net/routing/dispatcher/pubsub.rs | 98 +++- zenoh/src/net/routing/dispatcher/queries.rs | 82 +++ zenoh/src/net/routing/dispatcher/resource.rs | 158 ++++-- zenoh/src/net/routing/hat/client/mod.rs | 3 + zenoh/src/net/routing/hat/client/pubsub.rs | 285 ++++++++-- zenoh/src/net/routing/hat/client/queries.rs | 57 +- .../src/net/routing/hat/linkstate_peer/mod.rs | 5 + .../net/routing/hat/linkstate_peer/pubsub.rs | 473 +++++++++++++---- .../net/routing/hat/linkstate_peer/queries.rs | 264 ++++++++-- zenoh/src/net/routing/hat/mod.rs | 60 ++- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 5 + zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 390 +++++++++++--- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 340 +++++++++--- zenoh/src/net/routing/hat/router/mod.rs | 5 + zenoh/src/net/routing/hat/router/pubsub.rs | 488 ++++++++++++++---- zenoh/src/net/routing/hat/router/queries.rs | 271 ++++++++-- zenoh/src/net/runtime/adminspace.rs | 5 + zenoh/src/net/tests/tables.rs | 2 + 30 files changed, 2856 insertions(+), 664 deletions(-) diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index faffb04952..7c3b797d5d 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -958,7 +958,7 @@ where if x.wire_expr.has_suffix() { flags |= 1; } - if let Mapping::Receiver = wire_expr.mapping { + if let Mapping::Sender = wire_expr.mapping { flags |= 1 << 1; } codec.write(&mut zriter, flags)?; @@ -998,9 +998,9 @@ where String::new() }; let mapping = if imsg::has_flag(flags, 1 << 1) { - Mapping::Receiver - } else { Mapping::Sender + } else { + Mapping::Receiver }; Ok(( diff --git a/commons/zenoh-codec/src/network/interest.rs b/commons/zenoh-codec/src/network/interest.rs index 2deda7748a..5ebdc91f71 100644 --- a/commons/zenoh-codec/src/network/interest.rs +++ b/commons/zenoh-codec/src/network/interest.rs @@ -23,8 +23,8 @@ use zenoh_protocol::{ core::WireExpr, network::{ declare, id, - interest::{self, InterestMode, InterestOptions}, - Interest, Mapping, + interest::{self, Interest, InterestMode, InterestOptions}, + Mapping, }, }; diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index a5373cd5f4..d8c66559ce 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -178,6 +178,19 @@ pub mod common { pub mod ext { use super::*; + /// Flags: + /// - N: Named If N==1 then the key expr has name/suffix + /// - M: Mapping if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver + /// + /// 7 6 5 4 3 2 1 0 + /// +-+-+-+-+-+-+-+-+ + /// |X|X|X|X|X|X|M|N| + /// +-+-+-+---------+ + /// ~ key_scope:z16 ~ + /// +---------------+ + /// ~ key_suffix ~ if N==1 -- + /// +---------------+ + /// pub type WireExprExt = zextzbuf!(0x0f, true); #[derive(Debug, Clone, PartialEq, Eq)] pub struct WireExprType { diff --git a/commons/zenoh-protocol/src/network/interest.rs b/commons/zenoh-protocol/src/network/interest.rs index 46797b72ee..b36080be28 100644 --- a/commons/zenoh-protocol/src/network/interest.rs +++ b/commons/zenoh-protocol/src/network/interest.rs @@ -121,7 +121,7 @@ pub mod flag { pub type DeclareRequestId = u32; pub type AtomicDeclareRequestId = AtomicU32; -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum InterestMode { Final, Current, diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index d4dc1b54d2..0b7bb01eae 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -314,8 +314,7 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { fn create_one_shot_publisher(self) -> ZResult> { Ok(Publisher { session: self.session, - #[cfg(feature = "unstable")] - eid: 0, // This is a one shot Publisher + id: 0, // This is a one shot Publisher key_expr: self.key_expr?, congestion_control: self.congestion_control, priority: self.priority, @@ -363,22 +362,16 @@ impl<'a, 'b> Wait for PublisherBuilder<'a, 'b> { } } self.session - .declare_publication_intent(key_expr.clone()) - .wait()?; - #[cfg(feature = "unstable")] - let eid = self.session.runtime.next_id(); - let publisher = Publisher { - session: self.session, - #[cfg(feature = "unstable")] - eid, - key_expr, - congestion_control: self.congestion_control, - priority: self.priority, - is_express: self.is_express, - destination: self.destination, - }; - tracing::trace!("publish({:?})", publisher.key_expr); - Ok(publisher) + .declare_publisher_inner(key_expr.clone(), self.destination) + .map(|id| Publisher { + session: self.session, + id, + key_expr, + congestion_control: self.congestion_control, + priority: self.priority, + is_express: self.is_express, + destination: self.destination, + }) } } diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 553170e76a..d72f18739d 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -14,6 +14,7 @@ use std::{ convert::TryFrom, + fmt, future::{IntoFuture, Ready}, pin::Pin, task::{Context, Poll}, @@ -32,9 +33,7 @@ use zenoh_result::{Error, ZResult}; use { crate::api::handlers::{Callback, DefaultHandler, IntoHandler}, crate::api::sample::SourceInfo, - crate::api::Id, zenoh_protocol::core::EntityGlobalId, - zenoh_protocol::core::EntityId, }; use super::{ @@ -48,7 +47,23 @@ use super::{ sample::{DataInfo, Locality, QoS, Sample, SampleFields, SampleKind}, session::{SessionRef, Undeclarable}, }; -use crate::net::primitives::Primitives; +use crate::{api::Id, net::primitives::Primitives}; + +pub(crate) struct PublisherState { + pub(crate) id: Id, + pub(crate) remote_id: Id, + pub(crate) key_expr: KeyExpr<'static>, + pub(crate) destination: Locality, +} + +impl fmt::Debug for PublisherState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Publisher") + .field("id", &self.id) + .field("key_expr", &self.key_expr) + .finish() + } +} #[zenoh_macros::unstable] #[derive(Clone)] @@ -113,8 +128,7 @@ impl std::fmt::Debug for PublisherRef<'_> { #[derive(Debug, Clone)] pub struct Publisher<'a> { pub(crate) session: SessionRef<'a>, - #[cfg(feature = "unstable")] - pub(crate) eid: EntityId, + pub(crate) id: Id, pub(crate) key_expr: KeyExpr<'a>, pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, @@ -142,7 +156,7 @@ impl<'a> Publisher<'a> { pub fn id(&self) -> EntityGlobalId { EntityGlobalId { zid: self.session.zid(), - eid: self.eid, + eid: self.id, } } @@ -459,11 +473,9 @@ impl Resolvable for PublisherUndeclaration<'_> { impl Wait for PublisherUndeclaration<'_> { fn wait(mut self) -> ::To { let Publisher { - session, key_expr, .. + session, id: eid, .. } = &self.publisher; - session - .undeclare_publication_intent(key_expr.clone()) - .wait()?; + session.undeclare_publisher_inner(*eid)?; self.publisher.key_expr = unsafe { keyexpr::from_str_unchecked("") }.into(); Ok(()) } @@ -481,10 +493,7 @@ impl IntoFuture for PublisherUndeclaration<'_> { impl Drop for Publisher<'_> { fn drop(&mut self) { if !self.key_expr.is_empty() { - let _ = self - .session - .undeclare_publication_intent(self.key_expr.clone()) - .wait(); + let _ = self.session.undeclare_publisher_inner(self.id); } } } diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 018a3a085e..e5087e693b 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -35,16 +35,19 @@ use zenoh_protocol::network::{declare::SubscriberId, ext}; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, - AtomicExprId, CongestionControl, ExprId, Reliability, WireExpr, ZenohId, EMPTY_EXPR_ID, + AtomicExprId, CongestionControl, EntityId, ExprId, Reliability, WireExpr, ZenohId, + EMPTY_EXPR_ID, }, network::{ + self, declare::{ self, common::ext::WireExprType, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, + interest::{InterestMode, InterestOptions}, request::{self, ext::TargetType, Request}, - AtomicRequestId, Mapping, Push, RequestId, Response, ResponseFinal, + AtomicRequestId, Interest, Mapping, Push, RequestId, Response, ResponseFinal, }, zenoh::{ query::{self, ext::QueryBodyType, Consolidation}, @@ -68,7 +71,7 @@ use super::{ handlers::{Callback, DefaultHandler}, info::SessionInfo, key_expr::{KeyExpr, KeyExprInner}, - publication::Priority, + publication::{Priority, PublisherState}, query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, @@ -107,7 +110,7 @@ pub(crate) struct SessionState { pub(crate) remote_resources: HashMap, #[cfg(feature = "unstable")] pub(crate) remote_subscribers: HashMap>, - //pub(crate) publications: Vec, + pub(crate) publishers: HashMap, pub(crate) subscribers: HashMap>, pub(crate) queryables: HashMap>, #[cfg(feature = "unstable")] @@ -116,13 +119,13 @@ pub(crate) struct SessionState { pub(crate) matching_listeners: HashMap>, pub(crate) queries: HashMap, pub(crate) aggregated_subscribers: Vec, - //pub(crate) aggregated_publishers: Vec, + pub(crate) aggregated_publishers: Vec, } impl SessionState { pub(crate) fn new( aggregated_subscribers: Vec, - _aggregated_publishers: Vec, + aggregated_publishers: Vec, ) -> SessionState { SessionState { primitives: None, @@ -132,7 +135,7 @@ impl SessionState { remote_resources: HashMap::new(), #[cfg(feature = "unstable")] remote_subscribers: HashMap::new(), - //publications: Vec::new(), + publishers: HashMap::new(), subscribers: HashMap::new(), queryables: HashMap::new(), #[cfg(feature = "unstable")] @@ -141,7 +144,7 @@ impl SessionState { matching_listeners: HashMap::new(), queries: HashMap::new(), aggregated_subscribers, - //aggregated_publishers, + aggregated_publishers, } } } @@ -916,84 +919,99 @@ impl Session { }) } - /// Declare a publication for the given key expression. - /// - /// Puts that match the given key expression will only be sent on the network - /// if matching subscribers exist in the system. - /// - /// # Arguments - /// - /// * `key_expr` - The key expression to publish - pub(crate) fn declare_publication_intent<'a>( - &'a self, - _key_expr: KeyExpr<'a>, - ) -> impl Resolve> + 'a { - ResolveClosure::new(move || { - // tracing::trace!("declare_publication({:?})", key_expr); - // let mut state = zwrite!(self.state); - // if !state.publications.iter().any(|p| **p == **key_expr) { - // let declared_pub = if let Some(join_pub) = state - // .aggregated_publishers - // .iter() - // .find(|s| s.includes(&key_expr)) - // { - // let joined_pub = state.publications.iter().any(|p| join_pub.includes(p)); - // (!joined_pub).then(|| join_pub.clone().into()) - // } else { - // Some(key_expr.clone()) - // }; - // state.publications.push(key_expr.into()); - - // if let Some(res) = declared_pub { - // let primitives = state.primitives.as_ref().unwrap().clone(); - // drop(state); - // primitives.decl_publisher(&res.to_wire(self), None); - // } - // } - Ok(()) - }) + pub(crate) fn declare_publisher_inner( + &self, + key_expr: KeyExpr, + destination: Locality, + ) -> ZResult { + let mut state = zwrite!(self.state); + tracing::trace!("declare_publisher({:?})", key_expr); + let id = self.runtime.next_id(); + + let mut pub_state = PublisherState { + id, + remote_id: id, + key_expr: key_expr.clone().into_owned(), + destination, + }; + + let declared_pub = (destination != Locality::SessionLocal) + .then(|| { + match state + .aggregated_publishers + .iter() + .find(|s| s.includes(&key_expr)) + { + Some(join_pub) => { + if let Some(joined_pub) = state.publishers.values().find(|p| { + p.destination != Locality::SessionLocal + && join_pub.includes(&p.key_expr) + }) { + pub_state.remote_id = joined_pub.remote_id; + None + } else { + Some(join_pub.clone().into()) + } + } + None => { + if let Some(twin_pub) = state.publishers.values().find(|p| { + p.destination != Locality::SessionLocal && p.key_expr == key_expr + }) { + pub_state.remote_id = twin_pub.remote_id; + None + } else { + Some(key_expr.clone()) + } + } + } + }) + .flatten(); + + state.publishers.insert(id, pub_state); + + if let Some(res) = declared_pub { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + primitives.send_interest(Interest { + id, + mode: InterestMode::CurrentFuture, + options: InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS, + wire_expr: Some(res.to_wire(self).to_owned()), + ext_qos: network::ext::QoSType::DEFAULT, + ext_tstamp: None, + ext_nodeid: network::ext::NodeIdType::DEFAULT, + }); + } + Ok(id) } - /// Undeclare a publication previously declared - /// with [`declare_publication`](Session::declare_publication). - /// - /// # Arguments - /// - /// * `key_expr` - The key expression of the publication to undeclarte - pub(crate) fn undeclare_publication_intent<'a>( - &'a self, - _key_expr: KeyExpr<'a>, - ) -> impl Resolve> + 'a { - ResolveClosure::new(move || { - // let mut state = zwrite!(self.state); - // if let Some(idx) = state.publications.iter().position(|p| **p == *key_expr) { - // trace!("undeclare_publication({:?})", key_expr); - // state.publications.remove(idx); - // match state - // .aggregated_publishers - // .iter() - // .find(|s| s.includes(&key_expr)) - // { - // Some(join_pub) => { - // let joined_pub = state.publications.iter().any(|p| join_pub.includes(p)); - // if !joined_pub { - // let primitives = state.primitives.as_ref().unwrap().clone(); - // let key_expr = WireExpr::from(join_pub).to_owned(); - // drop(state); - // primitives.forget_publisher(&key_expr, None); - // } - // } - // None => { - // let primitives = state.primitives.as_ref().unwrap().clone(); - // drop(state); - // primitives.forget_publisher(&key_expr.to_wire(self), None); - // } - // }; - // } else { - // bail!("Unable to find publication") - // } + pub(crate) fn undeclare_publisher_inner(&self, pid: Id) -> ZResult<()> { + let mut state = zwrite!(self.state); + if let Some(pub_state) = state.publishers.remove(&pid) { + trace!("undeclare_publisher({:?})", pub_state); + if pub_state.destination != Locality::SessionLocal { + // Note: there might be several publishers on the same KeyExpr. + // Before calling forget_publishers(key_expr), check if this was the last one. + if !state.publishers.values().any(|p| { + p.destination != Locality::SessionLocal && p.remote_id == pub_state.remote_id + }) { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + primitives.send_interest(Interest { + id: pub_state.remote_id, + mode: InterestMode::Final, + options: InterestOptions::empty(), + wire_expr: None, + ext_qos: declare::ext::QoSType::DEFAULT, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + }); + } + } Ok(()) - }) + } else { + Err(zerror!("Unable to find publisher").into()) + } } pub(crate) fn declare_subscriber_inner( @@ -1005,7 +1023,7 @@ impl Session { info: &SubscriberInfo, ) -> ZResult> { let mut state = zwrite!(self.state); - tracing::trace!("subscribe({:?})", key_expr); + tracing::trace!("declare_subscriber({:?})", key_expr); let id = self.runtime.next_id(); let key_expr = match scope { Some(scope) => scope / key_expr, @@ -1126,15 +1144,34 @@ impl Session { let state = zread!(self.state); self.update_status_up(&state, &key_expr) } + } else { + #[cfg(feature = "unstable")] + if key_expr + .as_str() + .starts_with(crate::api::liveliness::PREFIX_LIVELINESS) + { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + + primitives.send_interest(Interest { + id, + mode: InterestMode::CurrentFuture, + options: InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS, + wire_expr: Some(key_expr.to_wire(self).to_owned()), + ext_qos: network::ext::QoSType::DEFAULT, + ext_tstamp: None, + ext_nodeid: network::ext::NodeIdType::DEFAULT, + }); + } } Ok(sub_state) } - pub(crate) fn unsubscribe(&self, sid: Id) -> ZResult<()> { + pub(crate) fn undeclare_subscriber_inner(&self, sid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(sub_state) = state.subscribers.remove(&sid) { - trace!("unsubscribe({:?})", sub_state); + trace!("undeclare_subscriber({:?})", sub_state); for res in state .local_resources .values_mut() @@ -1184,6 +1221,26 @@ impl Session { self.update_status_down(&state, &sub_state.key_expr) } } + } else { + #[cfg(feature = "unstable")] + if sub_state + .key_expr + .as_str() + .starts_with(crate::api::liveliness::PREFIX_LIVELINESS) + { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + + primitives.send_interest(Interest { + id: sub_state.id, + mode: InterestMode::Final, + options: InterestOptions::empty(), + wire_expr: None, + ext_qos: declare::ext::QoSType::DEFAULT, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + }); + } } Ok(()) } else { @@ -1199,7 +1256,7 @@ impl Session { callback: Callback<'static, Query>, ) -> ZResult> { let mut state = zwrite!(self.state); - tracing::trace!("queryable({:?})", key_expr); + tracing::trace!("declare_queryable({:?})", key_expr); let id = self.runtime.next_id(); let qable_state = Arc::new(QueryableState { id, @@ -1236,7 +1293,7 @@ impl Session { pub(crate) fn close_queryable(&self, qid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(qable_state) = state.queryables.remove(&qid) { - trace!("close_queryable({:?})", qable_state); + trace!("undeclare_queryable({:?})", qable_state); if qable_state.origin != Locality::SessionLocal { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); @@ -1358,33 +1415,29 @@ impl Session { key_expr: &KeyExpr, destination: Locality, ) -> ZResult { - use crate::net::routing::dispatcher::tables::RoutingExpr; let router = self.runtime.router(); let tables = zread!(router.tables.tables); - let res = crate::net::routing::dispatcher::resource::Resource::get_resource( - &tables.root_res, - key_expr.as_str(), - ); - let route = crate::net::routing::dispatcher::pubsub::get_local_data_route( - &tables, - &res, - &mut RoutingExpr::new(&tables.root_res, key_expr.as_str()), - ); + let matching_subscriptions = + crate::net::routing::dispatcher::pubsub::get_matching_subscriptions(&tables, key_expr); drop(tables); let matching = match destination { - Locality::Any => !route.is_empty(), + Locality::Any => !matching_subscriptions.is_empty(), Locality::Remote => { if let Some(face) = zread!(self.state).primitives.as_ref() { - route.values().any(|dir| !Arc::ptr_eq(&dir.0, &face.state)) + matching_subscriptions + .values() + .any(|dir| !Arc::ptr_eq(dir, &face.state)) } else { - !route.is_empty() + !matching_subscriptions.is_empty() } } Locality::SessionLocal => { if let Some(face) = zread!(self.state).primitives.as_ref() { - route.values().any(|dir| Arc::ptr_eq(&dir.0, &face.state)) + matching_subscriptions + .values() + .any(|dir| Arc::ptr_eq(dir, &face.state)) } else { false } @@ -2070,7 +2123,7 @@ impl Primitives for Session { }; self.handle_data( false, - &m.ext_wire_expr.wire_expr, + &expr.to_wire(self), Some(data_info), ZBuf::default(), #[cfg(feature = "unstable")] @@ -2088,9 +2141,15 @@ impl Primitives for Session { zenoh_protocol::network::DeclareBody::UndeclareQueryable(m) => { trace!("recv UndeclareQueryable {:?}", m.id); } - DeclareBody::DeclareToken(_) => todo!(), - DeclareBody::UndeclareToken(_) => todo!(), - DeclareBody::DeclareFinal(_) => todo!(), + DeclareBody::DeclareToken(m) => { + trace!("recv DeclareToken {:?}", m.id); + } + DeclareBody::UndeclareToken(m) => { + trace!("recv UndeclareToken {:?}", m.id); + } + DeclareBody::DeclareFinal(_) => { + trace!("recv DeclareFinal {:?}", msg.interest_id); + } } } @@ -2585,6 +2644,11 @@ pub trait SessionDeclarations<'s, 'a> { } impl crate::net::primitives::EPrimitives for Session { + #[inline] + fn send_interest(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_interest(ctx.msg) + } + #[inline] fn send_declare(&self, ctx: crate::net::routing::RoutingContext) { (self as &dyn Primitives).send_declare(ctx.msg) diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index ba345f5116..a0cfd51811 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -145,7 +145,7 @@ impl Wait for SubscriberUndeclaration<'_> { self.subscriber.alive = false; self.subscriber .session - .unsubscribe(self.subscriber.state.id) + .undeclare_subscriber_inner(self.subscriber.state.id) } } @@ -161,7 +161,7 @@ impl IntoFuture for SubscriberUndeclaration<'_> { impl Drop for SubscriberInner<'_> { fn drop(&mut self) { if self.alive { - let _ = self.session.unsubscribe(self.state.id); + let _ = self.session.undeclare_subscriber_inner(self.state.id); } } } diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index b400d1a254..56bbbe4570 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -68,7 +68,7 @@ impl TransportPeerEventHandler for DeMux { match msg.body { NetworkBody::Push(m) => self.face.send_push(m), NetworkBody::Declare(m) => self.face.send_declare(m), - NetworkBody::Interest(_) => todo!(), + NetworkBody::Interest(m) => self.face.send_interest(m), NetworkBody::Request(m) => self.face.send_request(m), NetworkBody::Response(m) => self.face.send_response(m), NetworkBody::ResponseFinal(m) => self.face.send_response_final(m), diff --git a/zenoh/src/net/primitives/mod.rs b/zenoh/src/net/primitives/mod.rs index d3aa8097ca..dbdcdd26f8 100644 --- a/zenoh/src/net/primitives/mod.rs +++ b/zenoh/src/net/primitives/mod.rs @@ -43,6 +43,8 @@ pub trait Primitives: Send + Sync { pub(crate) trait EPrimitives: Send + Sync { fn as_any(&self) -> &dyn Any; + fn send_interest(&self, ctx: RoutingContext); + fn send_declare(&self, ctx: RoutingContext); fn send_push(&self, msg: Push); @@ -76,6 +78,8 @@ impl Primitives for DummyPrimitives { } impl EPrimitives for DummyPrimitives { + fn send_interest(&self, _ctx: RoutingContext) {} + fn send_declare(&self, _ctx: RoutingContext) {} fn send_push(&self, _msg: Push) {} diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index df292b4315..f58b4550d0 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -197,6 +197,31 @@ impl Primitives for Mux { } impl EPrimitives for Mux { + fn send_interest(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::Interest(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + outface: ctx.outface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix + .as_ref() + .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } + fn send_declare(&self, ctx: RoutingContext) { let ctx = RoutingContext { msg: NetworkMessage { @@ -497,6 +522,31 @@ impl Primitives for McastMux { } impl EPrimitives for McastMux { + fn send_interest(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::Interest(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + outface: ctx.outface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix + .as_ref() + .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } + fn send_declare(&self, ctx: RoutingContext) { let ctx = RoutingContext { msg: NetworkMessage { diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index c5129f76e2..4669433145 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -21,7 +21,12 @@ use std::{ use tokio_util::sync::CancellationToken; use zenoh_protocol::{ core::{ExprId, WhatAmI, ZenohId}, - network::{Mapping, Push, Request, RequestId, Response, ResponseFinal}, + network::{ + declare::ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareFinal, Mapping, Push, Request, RequestId, Response, + ResponseFinal, + }, zenoh::RequestBody, }; use zenoh_sync::get_mut_unchecked; @@ -35,10 +40,19 @@ use crate::{ api::key_expr::KeyExpr, net::{ primitives::{McastMux, Mux, Primitives}, - routing::interceptor::{InterceptorTrait, InterceptorsChain}, + routing::{ + interceptor::{InterceptorTrait, InterceptorsChain}, + RoutingContext, + }, }, }; +pub(crate) struct InterestState { + pub(crate) options: InterestOptions, + pub(crate) res: Option>, + pub(crate) finalized: bool, +} + pub struct FaceState { pub(crate) id: usize, pub(crate) zid: ZenohId, @@ -46,6 +60,8 @@ pub struct FaceState { #[cfg(feature = "stats")] pub(crate) stats: Option>, pub(crate) primitives: Arc, + pub(crate) local_interests: HashMap, + pub(crate) remote_key_interests: HashMap>>, pub(crate) local_mappings: HashMap>, pub(crate) remote_mappings: HashMap>, pub(crate) next_qid: RequestId, @@ -75,6 +91,8 @@ impl FaceState { #[cfg(feature = "stats")] stats, primitives, + local_interests: HashMap::new(), + remote_key_interests: HashMap::new(), local_mappings: HashMap::new(), remote_mappings: HashMap::new(), next_qid: 0, @@ -191,8 +209,67 @@ impl Face { } impl Primitives for Face { - fn send_interest(&self, _msg: zenoh_protocol::network::Interest) { - todo!() + fn send_interest(&self, msg: zenoh_protocol::network::Interest) { + let ctrl_lock = zlock!(self.tables.ctrl_lock); + if msg.mode != InterestMode::Final { + if msg.options.keyexprs() && msg.mode != InterestMode::Current { + register_expr_interest( + &self.tables, + &mut self.state.clone(), + msg.id, + msg.wire_expr.as_ref(), + ); + } + if msg.options.subscribers() { + declare_sub_interest( + ctrl_lock.as_ref(), + &self.tables, + &mut self.state.clone(), + msg.id, + msg.wire_expr.as_ref(), + msg.mode, + msg.options.aggregate(), + ); + } + if msg.options.queryables() { + declare_qabl_interest( + ctrl_lock.as_ref(), + &self.tables, + &mut self.state.clone(), + msg.id, + msg.wire_expr.as_ref(), + msg.mode, + msg.options.aggregate(), + ); + } + if msg.mode != InterestMode::Future { + self.state.primitives.send_declare(RoutingContext::new_out( + Declare { + interest_id: Some(msg.id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + }, + self.clone(), + )); + } + } else { + unregister_expr_interest(&self.tables, &mut self.state.clone(), msg.id); + undeclare_sub_interest( + ctrl_lock.as_ref(), + &self.tables, + &mut self.state.clone(), + msg.id, + ); + undeclare_qabl_interest( + ctrl_lock.as_ref(), + &self.tables, + &mut self.state.clone(), + msg.id, + ); + } + drop(ctrl_lock); } fn send_declare(&self, msg: zenoh_protocol::network::Declare) { @@ -246,9 +323,20 @@ impl Primitives for Face { msg.ext_nodeid.node_id, ); } - zenoh_protocol::network::DeclareBody::DeclareToken(_m) => todo!(), - zenoh_protocol::network::DeclareBody::UndeclareToken(_m) => todo!(), - zenoh_protocol::network::DeclareBody::DeclareFinal(_) => todo!(), + zenoh_protocol::network::DeclareBody::DeclareToken(m) => { + tracing::warn!("Received unsupported {m:?}") + } + zenoh_protocol::network::DeclareBody::UndeclareToken(m) => { + tracing::warn!("Received unsupported {m:?}") + } + zenoh_protocol::network::DeclareBody::DeclareFinal(_) => { + if let Some(id) = msg.interest_id { + get_mut_unchecked(&mut self.state.clone()) + .local_interests + .entry(id) + .and_modify(|interest| interest.finalized = true); + } + } } drop(ctrl_lock); } diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 94c6f7b1a6..4e69e45dc3 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -18,6 +18,7 @@ use zenoh_protocol::{ core::{key_expr::keyexpr, WhatAmI, WireExpr}, network::{ declare::{ext, subscriber::ext::SubscriberInfo, SubscriberId}, + interest::{InterestId, InterestMode}, Push, }, zenoh::PushBody, @@ -29,8 +30,90 @@ use super::{ resource::{DataRoutes, Direction, Resource}, tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}, }; +#[zenoh_macros::unstable] +use crate::key_expr::KeyExpr; use crate::net::routing::hat::HatTrait; +pub(crate) fn declare_sub_interest( + hat_code: &(dyn HatTrait + Send + Sync), + tables: &TablesLock, + face: &mut Arc, + id: InterestId, + expr: Option<&WireExpr>, + mode: InterestMode, + aggregate: bool, +) { + if let Some(expr) = expr { + let rtables = zread!(tables.tables); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + tracing::debug!( + "{} Declare sub interest {} ({}{})", + face, + id, + prefix.expr(), + expr.suffix + ); + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = if res + .as_ref() + .map(|r| r.context.is_some()) + .unwrap_or(false) + { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + hat_code.declare_sub_interest( + &mut wtables, + face, + id, + Some(&mut res), + mode, + aggregate, + ); + } + None => tracing::error!( + "{} Declare sub interest {} for unknown scope {}!", + face, + id, + expr.scope + ), + } + } else { + let mut wtables = zwrite!(tables.tables); + hat_code.declare_sub_interest(&mut wtables, face, id, None, mode, aggregate); + } +} + +pub(crate) fn undeclare_sub_interest( + hat_code: &(dyn HatTrait + Send + Sync), + tables: &TablesLock, + face: &mut Arc, + id: InterestId, +) { + tracing::debug!("{} Undeclare sub interest {}", face, id,); + let mut wtables = zwrite!(tables.tables); + hat_code.undeclare_sub_interest(&mut wtables, face, id); +} + pub(crate) fn declare_subscription( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, @@ -329,18 +412,11 @@ fn get_data_route( #[zenoh_macros::unstable] #[inline] -pub(crate) fn get_local_data_route( +pub(crate) fn get_matching_subscriptions( tables: &Tables, - res: &Option>, - expr: &mut RoutingExpr, -) -> Arc { - res.as_ref() - .and_then(|res| res.data_route(WhatAmI::Client, 0)) - .unwrap_or_else(|| { - tables - .hat_code - .compute_data_route(tables, expr, 0, WhatAmI::Client) - }) + key_expr: &KeyExpr<'_>, +) -> HashMap> { + tables.hat_code.get_matching_subscriptions(tables, key_expr) } #[cfg(feature = "stats")] diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 2bbc924e0b..23e405c3c4 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -24,6 +24,7 @@ use zenoh_protocol::{ core::{key_expr::keyexpr, Encoding, WireExpr}, network::{ declare::{ext, queryable::ext::QueryableInfoType, QueryableId}, + interest::{InterestId, InterestMode}, request::{ext::TargetType, Request, RequestId}, response::{self, ext::ResponderIdType, Response, ResponseFinal}, }, @@ -39,6 +40,87 @@ use super::{ }; use crate::net::routing::{hat::HatTrait, RoutingContext}; +#[allow(clippy::too_many_arguments)] // TODO refactor +pub(crate) fn declare_qabl_interest( + hat_code: &(dyn HatTrait + Send + Sync), + tables: &TablesLock, + face: &mut Arc, + id: InterestId, + expr: Option<&WireExpr>, + mode: InterestMode, + aggregate: bool, +) { + if let Some(expr) = expr { + let rtables = zread!(tables.tables); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + tracing::debug!( + "{} Declare qabl interest {} ({}{})", + face, + id, + prefix.expr(), + expr.suffix + ); + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = if res + .as_ref() + .map(|r| r.context.is_some()) + .unwrap_or(false) + { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + hat_code.declare_qabl_interest( + &mut wtables, + face, + id, + Some(&mut res), + mode, + aggregate, + ); + } + None => tracing::error!( + "{} Declare qabl interest {} for unknown scope {}!", + face, + id, + expr.scope + ), + } + } else { + let mut wtables = zwrite!(tables.tables); + hat_code.declare_qabl_interest(&mut wtables, face, id, None, mode, aggregate); + } +} + +pub(crate) fn undeclare_qabl_interest( + hat_code: &(dyn HatTrait + Send + Sync), + tables: &TablesLock, + face: &mut Arc, + id: InterestId, +) { + tracing::debug!("{} Undeclare qabl interest {}", face, id,); + let mut wtables = zwrite!(tables.tables); + hat_code.undeclare_qabl_interest(&mut wtables, face, id); +} + pub(crate) struct Query { src_face: Arc, src_qid: RequestId, diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index d8765e16ae..e6b13dc2c8 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -27,6 +27,7 @@ use zenoh_protocol::{ ext, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, }, + interest::InterestId, Mapping, RequestId, }, }; @@ -60,6 +61,20 @@ pub(crate) struct SessionContext { pub(crate) e_interceptor_cache: Option>, } +impl SessionContext { + pub(crate) fn new(face: Arc) -> Self { + Self { + face, + local_expr_id: None, + remote_expr_id: None, + subs: None, + qabl: None, + in_interceptor_cache: None, + e_interceptor_cache: None, + } + } +} + #[derive(Default)] pub(crate) struct RoutesIndexes { pub(crate) routers: Vec, @@ -217,6 +232,16 @@ impl Resource { self.context.as_mut().unwrap() } + #[inline(always)] + pub(crate) fn matches(&self, other: &Arc) -> bool { + self.context + .as_ref() + .unwrap() + .matches + .iter() + .any(|m| m.upgrade().is_some_and(|m| &m == other)) + } + pub fn nonwild_prefix(res: &Arc) -> (Option>, String) { match &res.nonwild_prefix { None => (Some(res.clone()), "".to_string()), @@ -434,34 +459,34 @@ impl Resource { let (nonwild_prefix, wildsuffix) = Resource::nonwild_prefix(res); match nonwild_prefix { Some(mut nonwild_prefix) => { - let ctx = get_mut_unchecked(&mut nonwild_prefix) + if let Some(ctx) = get_mut_unchecked(&mut nonwild_prefix) .session_ctxs - .entry(face.id) - .or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - }); - - if let Some(expr_id) = ctx.remote_expr_id { - WireExpr { - scope: expr_id, - suffix: wildsuffix.into(), - mapping: Mapping::Receiver, + .get(&face.id) + { + if let Some(expr_id) = ctx.remote_expr_id { + return WireExpr { + scope: expr_id, + suffix: wildsuffix.into(), + mapping: Mapping::Receiver, + }; } - } else if let Some(expr_id) = ctx.local_expr_id { - WireExpr { - scope: expr_id, - suffix: wildsuffix.into(), - mapping: Mapping::Sender, + if let Some(expr_id) = ctx.local_expr_id { + return WireExpr { + scope: expr_id, + suffix: wildsuffix.into(), + mapping: Mapping::Sender, + }; } - } else { + } + if face.remote_key_interests.values().any(|res| { + res.as_ref() + .map(|res| res.matches(&nonwild_prefix)) + .unwrap_or(true) + }) { + let ctx = get_mut_unchecked(&mut nonwild_prefix) + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); let expr_id = face.get_next_local_id(); get_mut_unchecked(ctx).local_expr_id = Some(expr_id); get_mut_unchecked(face) @@ -486,6 +511,8 @@ impl Resource { suffix: wildsuffix.into(), mapping: Mapping::Sender, } + } else { + res.expr().into() } } None => wildsuffix.into(), @@ -650,7 +677,7 @@ impl Resource { } } -pub fn register_expr( +pub(crate) fn register_expr( tables: &TablesLock, face: &mut Arc, expr_id: ExprId, @@ -697,20 +724,12 @@ pub fn register_expr( Resource::match_resource(&wtables, &mut res, matches); (res, wtables) }; - get_mut_unchecked(&mut res) + let ctx = get_mut_unchecked(&mut res) .session_ctxs .entry(face.id) - .or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: Some(expr_id), - subs: None, - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - }); + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + + get_mut_unchecked(ctx).remote_expr_id = Some(expr_id); get_mut_unchecked(face) .remote_mappings @@ -728,7 +747,7 @@ pub fn register_expr( } } -pub fn unregister_expr(tables: &TablesLock, face: &mut Arc, expr_id: ExprId) { +pub(crate) fn unregister_expr(tables: &TablesLock, face: &mut Arc, expr_id: ExprId) { let wtables = zwrite!(tables.tables); match get_mut_unchecked(face).remote_mappings.remove(&expr_id) { Some(mut res) => Resource::clean(&mut res), @@ -736,3 +755,64 @@ pub fn unregister_expr(tables: &TablesLock, face: &mut Arc, expr_id: } drop(wtables); } + +pub(crate) fn register_expr_interest( + tables: &TablesLock, + face: &mut Arc, + id: InterestId, + expr: Option<&WireExpr>, +) { + if let Some(expr) = expr { + let rtables = zread!(tables.tables); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (res, wtables) = if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + get_mut_unchecked(face) + .remote_key_interests + .insert(id, Some(res)); + drop(wtables); + } + None => tracing::error!( + "Declare keyexpr interest with unknown scope {}!", + expr.scope + ), + } + } else { + let wtables = zwrite!(tables.tables); + get_mut_unchecked(face) + .remote_key_interests + .insert(id, None); + drop(wtables); + } +} + +pub(crate) fn unregister_expr_interest( + tables: &TablesLock, + face: &mut Arc, + id: InterestId, +) { + let wtables = zwrite!(tables.tables); + get_mut_unchecked(face).remote_key_interests.remove(&id); + drop(wtables); +} diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 3b4e7c7103..921dc7554c 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -26,6 +26,7 @@ use std::{ use zenoh_config::WhatAmI; use zenoh_protocol::network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, + interest::InterestId, Oam, }; use zenoh_result::ZResult; @@ -285,6 +286,7 @@ impl HatContext { struct HatFace { next_id: AtomicU32, // @TODO: manage rollover and uniqueness + remote_sub_interests: HashMap>>, local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, @@ -295,6 +297,7 @@ impl HatFace { fn new() -> Self { Self { next_id: AtomicU32::new(0), + remote_sub_interests: HashMap::new(), local_subs: HashMap::new(), remote_subs: HashMap::new(), local_qabls: HashMap::new(), diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 3334fbfb14..a87a4e7f1e 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -19,23 +19,30 @@ use std::{ use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI}, - network::declare::{ - common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, SubscriberId, UndeclareSubscriber, + network::{ + declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, + }, + interest::{InterestId, InterestMode, InterestOptions}, + Interest, }, }; use zenoh_sync::get_mut_unchecked; use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; -use crate::net::routing::{ - dispatcher::{ - face::FaceState, - resource::{NodeId, Resource, SessionContext}, - tables::{Route, RoutingExpr, Tables}, +use crate::{ + key_expr::KeyExpr, + net::routing::{ + dispatcher::{ + face::{FaceState, InterestState}, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{HatPubSubTrait, Sources}, + router::{update_data_routes_from, RoutesIndexes}, + RoutingContext, PREFIX_LIVELINESS, }, - hat::{HatPubSubTrait, Sources}, - router::RoutesIndexes, - RoutingContext, PREFIX_LIVELINESS, }; #[inline] @@ -104,18 +111,11 @@ fn register_client_subscription( } } None => { - res.session_ctxs.insert( - face.id, - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: Some(*sub_info), - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }), - ); + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).subs = Some(*sub_info); } } } @@ -243,7 +243,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; - for src_face in tables + for mut src_face in tables .faces .values() .cloned() @@ -252,10 +252,134 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { for sub in face_hat!(src_face).remote_subs.values() { propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); } + if face.whatami != WhatAmI::Client { + for res in face_hat_mut!(&mut src_face).remote_sub_interests.values() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + let options = InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS; + get_mut_unchecked(face).local_interests.insert( + id, + InterestState { + options, + res: res.as_ref().map(|res| (*res).clone()), + finalized: false, + }, + ); + let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, face)); + face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode: InterestMode::CurrentFuture, + options, + wire_expr, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + res.as_ref().map(|res| res.expr()).unwrap_or_default(), + )); + } + } } + // recompute routes + update_data_routes_from(tables, &mut tables.root_res.clone()); } impl HatPubSubTrait for HatCode { + fn declare_sub_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + _aggregate: bool, + ) { + face_hat_mut!(face) + .remote_sub_interests + .insert(id, res.as_ref().map(|res| (*res).clone())); + for dst_face in tables + .faces + .values_mut() + .filter(|f| f.whatami != WhatAmI::Client) + { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + let options = InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS; + get_mut_unchecked(dst_face).local_interests.insert( + id, + InterestState { + options, + res: res.as_ref().map(|res| (*res).clone()), + finalized: mode == InterestMode::Future, + }, + ); + let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, dst_face)); + dst_face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode, + options, + wire_expr, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + res.as_ref().map(|res| res.expr()).unwrap_or_default(), + )); + } + } + + fn undeclare_sub_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ) { + if let Some(interest) = face_hat_mut!(face).remote_sub_interests.remove(&id) { + if !tables.faces.values().any(|f| { + f.whatami == WhatAmI::Client + && face_hat!(f) + .remote_sub_interests + .values() + .any(|i| *i == interest) + }) { + for dst_face in tables + .faces + .values_mut() + .filter(|f| f.whatami != WhatAmI::Client) + { + for id in dst_face + .local_interests + .keys() + .cloned() + .collect::>() + { + let local_interest = dst_face.local_interests.get(&id).unwrap(); + if local_interest.options.subscribers() && (local_interest.res == interest) + { + dst_face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode: InterestMode::Final, + options: InterestOptions::empty(), + wire_expr: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + local_interest + .res + .as_ref() + .map(|res| res.expr()) + .unwrap_or_default(), + )); + get_mut_unchecked(dst_face).local_interests.remove(&id); + } + } + } + } + } + } + fn declare_subscription( &self, tables: &mut Tables, @@ -322,6 +446,51 @@ impl HatPubSubTrait for HatCode { return Arc::new(route); } }; + + for face in tables + .faces + .values() + .filter(|f| f.whatami != WhatAmI::Client) + { + if face.local_interests.values().any(|interest| { + interest.finalized + && interest.options.subscribers() + && interest + .res + .as_ref() + .map(|res| { + KeyExpr::try_from(res.expr()) + .and_then(|intres| { + KeyExpr::try_from(expr.full_expr()) + .map(|putres| intres.includes(&putres)) + }) + .unwrap_or(false) + }) + .unwrap_or(true) + }) { + if face_hat!(face).remote_subs.values().any(|sub| { + KeyExpr::try_from(sub.expr()) + .and_then(|subres| { + KeyExpr::try_from(expr.full_expr()) + .map(|putres| subres.intersects(&putres)) + }) + .unwrap_or(false) + }) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.insert( + face.id, + (face.clone(), key_expr.to_owned(), NodeId::default()), + ); + } + } else { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.insert( + face.id, + (face.clone(), key_expr.to_owned(), NodeId::default()), + ); + } + } + let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res .as_ref() @@ -333,15 +502,7 @@ impl HatPubSubTrait for HatCode { let mres = mres.upgrade().unwrap(); for (sid, context) in &mres.session_ctxs { - if context.subs.is_some() - && match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } - { + if context.subs.is_some() && context.face.whatami == WhatAmI::Client { route.entry(*sid).or_insert_with(|| { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); (context.face.clone(), key_expr.to_owned(), NodeId::default()) @@ -365,4 +526,62 @@ impl HatPubSubTrait for HatCode { fn get_data_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { get_routes_entries() } + + fn get_matching_subscriptions( + &self, + tables: &Tables, + key_expr: &KeyExpr<'_>, + ) -> HashMap> { + let mut matching_subscriptions = HashMap::new(); + if key_expr.ends_with('/') { + return matching_subscriptions; + } + tracing::trace!("get_matching_subscriptions({})", key_expr,); + + for face in tables + .faces + .values() + .filter(|f| f.whatami != WhatAmI::Client) + { + if face.local_interests.values().any(|interest| { + interest.finalized + && interest.options.subscribers() + && interest + .res + .as_ref() + .map(|res| { + KeyExpr::try_from(res.expr()) + .map(|intres| intres.includes(key_expr)) + .unwrap_or(false) + }) + .unwrap_or(true) + }) && face_hat!(face).remote_subs.values().any(|sub| { + KeyExpr::try_from(sub.expr()) + .map(|subres| subres.intersects(key_expr)) + .unwrap_or(false) + }) { + matching_subscriptions.insert(face.id, face.clone()); + } + } + + let res = Resource::get_resource(&tables.root_res, key_expr); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, key_expr))); + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + + for (sid, context) in &mres.session_ctxs { + if context.subs.is_some() && context.face.whatami == WhatAmI::Client { + matching_subscriptions + .entry(*sid) + .or_insert_with(|| context.face.clone()); + } + } + } + matching_subscriptions + } } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index c915d788a9..749c03d5f8 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -27,9 +27,12 @@ use zenoh_protocol::{ }, WhatAmI, WireExpr, }, - network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, QueryableId, UndeclareQueryable, + network::{ + declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, + DeclareBody, DeclareQueryable, QueryableId, UndeclareQueryable, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; @@ -99,6 +102,7 @@ fn propagate_simple_queryable( .local_qabls .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, &mut dst_face); + println!("Decled key = {key_expr:?}"); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { interest_id: None, @@ -127,17 +131,11 @@ fn register_client_queryable( // Register queryable { let res = get_mut_unchecked(res); - get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - })) + get_mut_unchecked( + res.session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))), + ) .qabl = Some(*qabl_info); } face_hat_mut!(face).remote_qabls.insert(id, res.clone()); @@ -260,6 +258,27 @@ lazy_static::lazy_static! { } impl HatQueriesTrait for HatCode { + fn declare_qabl_interest( + &self, + _tables: &mut Tables, + _face: &mut Arc, + _id: InterestId, + _res: Option<&mut Arc>, + _mode: InterestMode, + _aggregate: bool, + ) { + // ignore + } + + fn undeclare_qabl_interest( + &self, + _tables: &mut Tables, + _face: &mut Arc, + _id: InterestId, + ) { + // ignore + } + fn declare_queryable( &self, tables: &mut Tables, @@ -326,6 +345,16 @@ impl HatQueriesTrait for HatCode { return EMPTY_ROUTE.clone(); } }; + + if let Some(face) = tables.faces.values().find(|f| f.whatami != WhatAmI::Client) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: (face.clone(), key_expr.to_owned(), NodeId::default()), + complete: 0, + distance: f64::MAX, + }); + } + let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res .as_ref() diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index e76f53a0dd..bb5aec4db1 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -29,6 +29,7 @@ use zenoh_protocol::{ common::ZExtBody, network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, + interest::InterestId, oam::id::OAM_LINKSTATE, Oam, }, @@ -480,8 +481,10 @@ impl HatContext { struct HatFace { link_id: usize, next_id: AtomicU32, // @TODO: manage rollover and uniqueness + remote_sub_interests: HashMap>, bool)>, local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, + remote_qabl_interests: HashMap>>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } @@ -491,8 +494,10 @@ impl HatFace { Self { link_id: 0, next_id: AtomicU32::new(0), + remote_sub_interests: HashMap::new(), local_subs: HashMap::new(), remote_subs: HashMap::new(), + remote_qabl_interests: HashMap::new(), local_qabls: HashMap::new(), remote_qabls: HashMap::new(), } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index e5f7da81f7..135f899656 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -20,9 +20,12 @@ use std::{ use petgraph::graph::NodeIndex; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI, ZenohId}, - network::declare::{ - common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, SubscriberId, UndeclareSubscriber, + network::{ + declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; @@ -31,16 +34,19 @@ use super::{ face_hat, face_hat_mut, get_peer, get_routes_entries, hat, hat_mut, network::Network, res_hat, res_hat_mut, HatCode, HatContext, HatFace, HatTables, }; -use crate::net::routing::{ - dispatcher::{ - face::FaceState, - pubsub::*, - resource::{NodeId, Resource, SessionContext}, - tables::{Route, RoutingExpr, Tables}, +use crate::{ + key_expr::KeyExpr, + net::routing::{ + dispatcher::{ + face::FaceState, + pubsub::*, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, }, - hat::{HatPubSubTrait, Sources}, - router::RoutesIndexes, - RoutingContext, PREFIX_LIVELINESS, }; #[inline] @@ -96,23 +102,59 @@ fn propagate_simple_subscription_to( && !face_hat!(dst_face).local_subs.contains_key(res) && dst_face.whatami == WhatAmI::Client { - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + if dst_face.whatami != WhatAmI::Client { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); + } else { + let matching_interests = face_hat!(dst_face) + .remote_sub_interests + .values() + .filter(|si| si.0.as_ref().map(|si| si.matches(res)).unwrap_or(true)) + .cloned() + .collect::>, bool)>>(); + + for (int_res, aggregate) in matching_interests { + let res = if aggregate { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_subs.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); + } + } + } } } @@ -220,18 +262,11 @@ fn register_client_subscription( } } None => { - res.session_ctxs.insert( - face.id, - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: Some(*sub_info), - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }), - ); + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).subs = Some(*sub_info); } } } @@ -273,6 +308,13 @@ fn client_subs(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_client_subs(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.subs.is_some()) +} + #[inline] fn send_forget_sourced_subscription_to_net_childs( tables: &Tables, @@ -313,8 +355,8 @@ fn send_forget_sourced_subscription_to_net_childs( } fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { - for face in tables.faces.values_mut() { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + for mut face in tables.faces.values().cloned() { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { interest_id: None, @@ -329,6 +371,35 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc res.expr(), )); } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_subs(&m, &face) || remote_peer_subs(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } @@ -417,8 +488,9 @@ pub(super) fn undeclare_client_subscription( if client_subs.is_empty() { undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); } + if client_subs.len() == 1 && !peer_subs { - let face = &mut client_subs[0]; + let mut face = &mut client_subs[0]; if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( @@ -435,6 +507,35 @@ pub(super) fn undeclare_client_subscription( res.expr(), )); } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_subs(&m, face) || remote_peer_subs(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } } @@ -453,32 +554,8 @@ fn forget_client_subscription( } } -pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - }; - - if face.whatami == WhatAmI::Client { - for sub in &hat!(tables).peer_subs { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face).local_subs.insert(sub.clone(), id); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } +pub(super) fn pubsub_new_face(_tables: &mut Tables, _face: &mut Arc) { + // Nothing to do } pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId) { @@ -534,40 +611,129 @@ pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec, -) { - if net.trees.len() > source as usize { - for sub in subs { - if let Some(sub_idx) = net.get_idx(sub) { - if net.trees[source as usize].directions.len() > sub_idx.index() { - if let Some(direction) = net.trees[source as usize].directions[sub_idx.index()] - { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - route.entry(face.id).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - (face.clone(), key_expr.to_owned(), source) - }); - } +impl HatPubSubTrait for HatCode { + fn declare_sub_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = mode.future().then_some(id); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).peer_subs.iter().any(|sub| { + sub.context.is_some() + && sub.matches(res) + && (remote_client_subs(sub, face) || remote_peer_subs(tables, sub)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + res.expr(), + )); + } + } else { + for sub in &hat!(tables).peer_subs { + if sub.context.is_some() + && sub.matches(res) + && (remote_client_subs(sub, face) || remote_peer_subs(tables, sub)) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } } } + } else { + for sub in &hat!(tables).peer_subs { + if sub.context.is_some() + && (remote_client_subs(sub, face) || remote_peer_subs(tables, sub)) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); + } + } } } - } else { - tracing::trace!("Tree for node sid:{} not yet ready", source); + if mode.future() { + face_hat_mut!(face) + .remote_sub_interests + .insert(id, (res.cloned(), aggregate)); + } + } + + fn undeclare_sub_interest( + &self, + _tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ) { + face_hat_mut!(face).remote_sub_interests.remove(&id); } -} -impl HatPubSubTrait for HatCode { fn declare_subscription( &self, tables: &mut Tables, @@ -644,6 +810,43 @@ impl HatPubSubTrait for HatCode { source: NodeId, source_type: WhatAmI, ) -> Arc { + #[inline] + fn insert_faces_for_subs( + route: &mut Route, + expr: &RoutingExpr, + tables: &Tables, + net: &Network, + source: NodeId, + subs: &HashSet, + ) { + if net.trees.len() > source as usize { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source as usize].directions.len() > sub_idx.index() { + if let Some(direction) = + net.trees[source as usize].directions[sub_idx.index()] + { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| { + let key_expr = Resource::get_best_key( + expr.prefix, + expr.suffix, + face.id, + ); + (face.clone(), key_expr.to_owned(), source) + }); + } + } + } + } + } + } + } else { + tracing::trace!("Tree for node sid:{} not yet ready", source); + } + } + let mut route = HashMap::new(); let key_expr = expr.full_expr(); if key_expr.ends_with('/') { @@ -688,13 +891,7 @@ impl HatPubSubTrait for HatCode { for (sid, context) in &mres.session_ctxs { if context.subs.is_some() - && match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } + && (source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client) { route.entry(*sid).or_insert_with(|| { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); @@ -719,4 +916,72 @@ impl HatPubSubTrait for HatCode { fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes { get_routes_entries(tables) } + + fn get_matching_subscriptions( + &self, + tables: &Tables, + key_expr: &KeyExpr<'_>, + ) -> HashMap> { + #[inline] + fn insert_faces_for_subs( + route: &mut HashMap>, + tables: &Tables, + net: &Network, + source: usize, + subs: &HashSet, + ) { + if net.trees.len() > source { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source].directions.len() > sub_idx.index() { + if let Some(direction) = net.trees[source].directions[sub_idx.index()] { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| face.clone()); + } + } + } + } + } + } + } else { + tracing::trace!("Tree for node sid:{} not yet ready", source); + } + } + + let mut matching_subscriptions = HashMap::new(); + if key_expr.ends_with('/') { + return matching_subscriptions; + } + tracing::trace!("get_matching_subscriptions({})", key_expr,); + + let res = Resource::get_resource(&tables.root_res, key_expr); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, key_expr))); + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + + let net = hat!(tables).peers_net.as_ref().unwrap(); + insert_faces_for_subs( + &mut matching_subscriptions, + tables, + net, + net.idx.index(), + &res_hat!(mres).peer_subs, + ); + + for (sid, context) in &mres.session_ctxs { + if context.subs.is_some() { + matching_subscriptions + .entry(*sid) + .or_insert_with(|| context.face.clone()); + } + } + } + matching_subscriptions + } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index bed683f717..3d9babbd5d 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -28,9 +28,12 @@ use zenoh_protocol::{ }, WhatAmI, WireExpr, ZenohId, }, - network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, QueryableId, UndeclareQueryable, + network::{ + declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, + DeclareBody, DeclareQueryable, QueryableId, UndeclareQueryable, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; @@ -46,7 +49,7 @@ use crate::net::routing::{ resource::{NodeId, Resource, SessionContext}, tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, }, - hat::{HatQueriesTrait, Sources}, + hat::{CurrentFutureTrait, HatQueriesTrait, Sources}, router::RoutesIndexes, RoutingContext, PREFIX_LIVELINESS, }; @@ -170,6 +173,10 @@ fn propagate_simple_queryable( if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) && (current.is_none() || current.unwrap().1 != info) && dst_face.whatami == WhatAmI::Client + && face_hat!(dst_face) + .remote_qabl_interests + .values() + .any(|si| si.as_ref().map(|si| si.matches(res)).unwrap_or(true)) { let id = current .map(|c| c.0) @@ -279,17 +286,11 @@ fn register_client_queryable( // Register queryable { let res = get_mut_unchecked(res); - get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - })) + get_mut_unchecked( + res.session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))), + ) .qabl = Some(*qabl_info); } face_hat_mut!(face).remote_qabls.insert(id, res.clone()); @@ -331,6 +332,13 @@ fn client_qabls(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_client_qabls(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.qabl.is_some()) +} + #[inline] fn send_forget_sourced_queryable_to_net_childs( tables: &Tables, @@ -371,8 +379,8 @@ fn send_forget_sourced_queryable_to_net_childs( } fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { - for face in tables.faces.values_mut() { - if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + for mut face in tables.faces.values().cloned() { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { interest_id: None, @@ -387,6 +395,35 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_qabls(&m, &face) || remote_peer_qabls(tables, &m)) + }) + }) { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } @@ -485,7 +522,7 @@ pub(super) fn undeclare_client_queryable( } if client_qabls.len() == 1 && !peer_qabls { - let face = &mut client_qabls[0]; + let mut face = &mut client_qabls[0]; if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -501,6 +538,35 @@ pub(super) fn undeclare_client_queryable( res.expr(), )); } + for res in face_hat!(face) + .local_qabls + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_qabls(&m, face) || remote_peer_qabls(tables, &m)) + }) + }) { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } } @@ -518,33 +584,8 @@ fn forget_client_queryable( } } -pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { - if face.whatami == WhatAmI::Client { - for qabl in &hat!(tables).peer_qabls { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face) - .local_qabls - .insert(qabl.clone(), (id, info)); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } +pub(super) fn queries_new_face(_tables: &mut Tables, _face: &mut Arc) { + // Nothing to do } pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId) { @@ -644,6 +685,134 @@ lazy_static::lazy_static! { } impl HatQueriesTrait for HatCode { + fn declare_qabl_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = mode.future().then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).peer_qabls.iter().any(|qabl| { + qabl.context.is_some() + && qabl.matches(res) + && (remote_client_qabls(qabl, face) || remote_peer_qabls(tables, qabl)) + }) { + let info = local_qabl_info(tables, res, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert((*res).clone(), (id, info)); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr, + ext_info: info, + }), + }, + res.expr(), + )); + } + } else { + for qabl in hat!(tables).peer_qabls.iter() { + if qabl.context.is_some() + && qabl.matches(res) + && (remote_client_qabls(qabl, face) || remote_peer_qabls(tables, qabl)) + { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } + } else { + for qabl in hat!(tables).peer_qabls.iter() { + if qabl.context.is_some() + && (remote_client_qabls(qabl, face) || remote_peer_qabls(tables, qabl)) + { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } + } + if mode.future() { + face_hat_mut!(face) + .remote_qabl_interests + .insert(id, res.cloned()); + } + } + + fn undeclare_qabl_interest( + &self, + _tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ) { + face_hat_mut!(face).remote_qabl_interests.remove(&id); + } + fn declare_queryable( &self, tables: &mut Tables, @@ -765,10 +934,7 @@ impl HatQueriesTrait for HatCode { ); for (sid, context) in &mres.session_ctxs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client, - } { + if source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); if let Some(qabl_info) = context.qabl.as_ref() { route.push(QueryTargetQabl { diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 5eb812df71..3a7844ea44 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -17,7 +17,7 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use std::{any::Any, sync::Arc}; +use std::{any::Any, collections::HashMap, sync::Arc}; use zenoh_buffers::ZBuf; use zenoh_config::{unwrap_or_default, Config, WhatAmI, ZenohId}; @@ -28,6 +28,7 @@ use zenoh_protocol::{ queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, QueryableId, SubscriberId, }, + interest::{InterestId, InterestMode}, Oam, }, }; @@ -41,7 +42,7 @@ use super::{ }, router::RoutesIndexes, }; -use crate::net::runtime::Runtime; +use crate::{key_expr::KeyExpr, runtime::Runtime}; mod client; mod linkstate_peer; @@ -135,6 +136,22 @@ pub(crate) trait HatBaseTrait { } pub(crate) trait HatPubSubTrait { + #[allow(clippy::too_many_arguments)] // TODO refactor + fn declare_sub_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ); + fn undeclare_sub_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ); fn declare_subscription( &self, tables: &mut Tables, @@ -164,9 +181,31 @@ pub(crate) trait HatPubSubTrait { ) -> Arc; fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes; + + fn get_matching_subscriptions( + &self, + tables: &Tables, + key_expr: &KeyExpr<'_>, + ) -> HashMap>; } pub(crate) trait HatQueriesTrait { + #[allow(clippy::too_many_arguments)] // TODO refactor + fn declare_qabl_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ); + fn undeclare_qabl_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ); fn declare_queryable( &self, tables: &mut Tables, @@ -219,3 +258,20 @@ pub(crate) fn new_hat(whatami: WhatAmI, config: &Config) -> Box Box::new(router::HatCode {}), } } + +trait CurrentFutureTrait { + fn future(&self) -> bool; + fn current(&self) -> bool; +} + +impl CurrentFutureTrait for InterestMode { + #[inline] + fn future(&self) -> bool { + self == &InterestMode::Future || self == &InterestMode::CurrentFuture + } + + #[inline] + fn current(&self) -> bool { + self == &InterestMode::Current || self == &InterestMode::CurrentFuture + } +} diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 530c181335..5ac77a3135 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -28,6 +28,7 @@ use zenoh_protocol::{ common::ZExtBody, network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, + interest::InterestId, oam::id::OAM_LINKSTATE, Oam, }, @@ -357,8 +358,10 @@ impl HatContext { struct HatFace { next_id: AtomicU32, // @TODO: manage rollover and uniqueness + remote_sub_interests: HashMap>, bool)>, local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, + remote_qabl_interests: HashMap>>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } @@ -367,8 +370,10 @@ impl HatFace { fn new() -> Self { Self { next_id: AtomicU32::new(0), + remote_sub_interests: HashMap::new(), local_subs: HashMap::new(), remote_subs: HashMap::new(), + remote_qabl_interests: HashMap::new(), local_qabls: HashMap::new(), remote_qabls: HashMap::new(), } diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index e7cf0c5e5d..31172e2804 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -19,23 +19,29 @@ use std::{ use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI}, - network::declare::{ - common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, SubscriberId, UndeclareSubscriber, + network::{ + declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; -use crate::net::routing::{ - dispatcher::{ - face::FaceState, - resource::{NodeId, Resource, SessionContext}, - tables::{Route, RoutingExpr, Tables}, +use crate::{ + key_expr::KeyExpr, + net::routing::{ + dispatcher::{ + face::FaceState, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, }, - hat::{HatPubSubTrait, Sources}, - router::RoutesIndexes, - RoutingContext, PREFIX_LIVELINESS, }; #[inline] @@ -51,23 +57,59 @@ fn propagate_simple_subscription_to( && !face_hat!(dst_face).local_subs.contains_key(res) && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + if dst_face.whatami != WhatAmI::Client { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); + } else { + let matching_interests = face_hat!(dst_face) + .remote_sub_interests + .values() + .filter(|si| si.0.as_ref().map(|si| si.matches(res)).unwrap_or(true)) + .cloned() + .collect::>, bool)>>(); + + for (int_res, aggregate) in matching_interests { + let res = if aggregate { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_subs.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); + } + } + } } } @@ -104,18 +146,11 @@ fn register_client_subscription( } } None => { - res.session_ctxs.insert( - face.id, - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: Some(*sub_info), - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }), - ); + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).subs = Some(*sub_info); } } } @@ -169,9 +204,16 @@ fn client_subs(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_client_subs(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.subs.is_some()) +} + fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { - for face in tables.faces.values_mut() { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + for mut face in tables.faces.values().cloned() { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { interest_id: None, @@ -186,6 +228,33 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc res.expr(), )); } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && remote_client_subs(&m, &face)) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } @@ -203,8 +272,9 @@ pub(super) fn undeclare_client_subscription( if client_subs.is_empty() { propagate_forget_simple_subscription(tables, res); } + if client_subs.len() == 1 { - let face = &mut client_subs[0]; + let mut face = &mut client_subs[0]; if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( @@ -221,6 +291,33 @@ pub(super) fn undeclare_client_subscription( res.expr(), )); } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && remote_client_subs(&m, face)) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } } @@ -240,22 +337,168 @@ fn forget_client_subscription( } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - }; - for src_face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for sub in face_hat!(src_face).remote_subs.values() { - propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); + if face.whatami != WhatAmI::Client { + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for sub in face_hat!(src_face).remote_subs.values() { + propagate_simple_subscription_to( + tables, + face, + sub, + &sub_info, + &mut src_face.clone(), + ); + } } } } impl HatPubSubTrait for HatCode { + fn declare_sub_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = mode.future().then_some(id); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + if let Some(res) = res.as_ref() { + if aggregate { + if tables.faces.values().any(|src_face| { + src_face.id != face.id + && face_hat!(src_face) + .remote_subs + .values() + .any(|sub| sub.context.is_some() && sub.matches(res)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + res.expr(), + )); + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if src_face.id != face.id { + for sub in face_hat!(src_face).remote_subs.values() { + if sub.context.is_some() && sub.matches(res) { + let id = if mode.future() { + let id = + face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber( + DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }, + ), + }, + sub.expr(), + )); + } + } + } + } + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if src_face.id != face.id { + for sub in face_hat!(src_face).remote_subs.values() { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); + } + } + } + } + } + if mode.future() { + face_hat_mut!(face) + .remote_sub_interests + .insert(id, (res.cloned(), aggregate)); + } + } + + fn undeclare_sub_interest( + &self, + _tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ) { + face_hat_mut!(face).remote_sub_interests.remove(&id); + } + fn declare_subscription( &self, tables: &mut Tables, @@ -334,13 +577,7 @@ impl HatPubSubTrait for HatCode { for (sid, context) in &mres.session_ctxs { if context.subs.is_some() - && match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } + && (source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client) { route.entry(*sid).or_insert_with(|| { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); @@ -365,4 +602,35 @@ impl HatPubSubTrait for HatCode { fn get_data_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { get_routes_entries() } + + fn get_matching_subscriptions( + &self, + tables: &Tables, + key_expr: &KeyExpr<'_>, + ) -> HashMap> { + let mut matching_subscriptions = HashMap::new(); + if key_expr.ends_with('/') { + return matching_subscriptions; + } + tracing::trace!("get_matching_subscriptions({})", key_expr,); + let res = Resource::get_resource(&tables.root_res, key_expr); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, key_expr))); + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + + for (sid, context) in &mres.session_ctxs { + if context.subs.is_some() { + matching_subscriptions + .entry(*sid) + .or_insert_with(|| context.face.clone()); + } + } + } + matching_subscriptions + } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index f0de12d7b9..1801f66c84 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -27,9 +27,12 @@ use zenoh_protocol::{ }, WhatAmI, WireExpr, }, - network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, QueryableId, UndeclareQueryable, + network::{ + declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, + DeclareBody, DeclareQueryable, QueryableId, UndeclareQueryable, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; @@ -41,7 +44,7 @@ use crate::net::routing::{ resource::{NodeId, Resource, SessionContext}, tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, }, - hat::{HatQueriesTrait, Sources}, + hat::{CurrentFutureTrait, HatQueriesTrait, Sources}, router::RoutesIndexes, RoutingContext, PREFIX_LIVELINESS, }; @@ -77,43 +80,62 @@ fn local_qabl_info( .unwrap_or(QueryableInfoType::DEFAULT) } +#[inline] +fn propagate_simple_queryable_to( + tables: &mut Tables, + dst_face: &mut Arc, + res: &Arc, + src_face: &Option<&mut Arc>, +) { + let info = local_qabl_info(tables, res, dst_face); + let current = face_hat!(dst_face).local_qabls.get(res); + if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) + && (current.is_none() || current.unwrap().1 != info) + && (dst_face.whatami != WhatAmI::Client + || face_hat!(dst_face) + .remote_qabl_interests + .values() + .any(|si| si.as_ref().map(|si| si.matches(res)).unwrap_or(true))) + && (src_face.is_none() + || src_face.as_ref().unwrap().whatami == WhatAmI::Client + || dst_face.whatami == WhatAmI::Client) + { + let id = current + .map(|c| c.0) + .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); + face_hat_mut!(dst_face) + .local_qabls + .insert(res.clone(), (id, info)); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + )); + } +} + fn propagate_simple_queryable( tables: &mut Tables, res: &Arc, src_face: Option<&mut Arc>, ) { - let faces = tables.faces.values().cloned(); + let faces = tables + .faces + .values() + .cloned() + .collect::>>(); for mut dst_face in faces { - let info = local_qabl_info(tables, res, &dst_face); - let current = face_hat!(dst_face).local_qabls.get(res); - if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current.is_none() || current.unwrap().1 != info) - && (src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client) - { - let id = current - .map(|c| c.0) - .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); - face_hat_mut!(&mut dst_face) - .local_qabls - .insert(res.clone(), (id, info)); - let key_expr = Resource::decl_key(res, &mut dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - )); - } + propagate_simple_queryable_to(tables, &mut dst_face, res, &src_face); } } @@ -127,17 +149,11 @@ fn register_client_queryable( // Register queryable { let res = get_mut_unchecked(res); - get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - })) + get_mut_unchecked( + res.session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))), + ) .qabl = Some(*qabl_info); } face_hat_mut!(face).remote_qabls.insert(id, res.clone()); @@ -168,6 +184,13 @@ fn client_qabls(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_client_qabls(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.qabl.is_some()) +} + fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { @@ -185,6 +208,33 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && remote_client_qabls(&m, face)) + }) { + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } @@ -209,7 +259,7 @@ pub(super) fn undeclare_client_queryable( propagate_simple_queryable(tables, res, None); } if client_qabls.len() == 1 { - let face = &mut client_qabls[0]; + let mut face = &mut client_qabls[0]; if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -225,6 +275,33 @@ pub(super) fn undeclare_client_queryable( res.expr(), )); } + for res in face_hat!(face) + .local_qabls + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && (remote_client_qabls(&m, face))) + }) { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } } @@ -242,15 +319,17 @@ fn forget_client_queryable( } } -pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) { - for face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for qabl in face_hat!(face).remote_qabls.values() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); +pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { + if face.whatami != WhatAmI::Client { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for qabl in face_hat!(src_face).remote_qabls.values() { + propagate_simple_queryable_to(tables, face, qabl, &Some(&mut src_face.clone())); + } } } } @@ -260,6 +339,150 @@ lazy_static::lazy_static! { } impl HatQueriesTrait for HatCode { + fn declare_qabl_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = mode.future().then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if tables.faces.values().any(|src_face| { + src_face.id != face.id + && face_hat!(src_face) + .remote_qabls + .values() + .any(|qabl| qabl.context.is_some() && qabl.matches(res)) + }) { + let info = local_qabl_info(tables, res, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert((*res).clone(), (id, info)); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr, + ext_info: info, + }), + }, + res.expr(), + )); + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if src_face.id != face.id { + for qabl in face_hat!(src_face).remote_qabls.values() { + if qabl.context.is_some() && qabl.matches(res) { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = + face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } + } + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if src_face.id != face.id { + for qabl in face_hat!(src_face).remote_qabls.values() { + if qabl.context.is_some() { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } + } + } + } + if mode.future() { + face_hat_mut!(face) + .remote_qabl_interests + .insert(id, res.cloned()); + } + } + + fn undeclare_qabl_interest( + &self, + _tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ) { + face_hat_mut!(face).remote_qabl_interests.remove(&id); + } + fn declare_queryable( &self, tables: &mut Tables, @@ -337,10 +560,7 @@ impl HatQueriesTrait for HatCode { let mres = mres.upgrade().unwrap(); let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); for (sid, context) in &mres.session_ctxs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client, - } { + if source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); if let Some(qabl_info) = context.qabl.as_ref() { route.push(QueryTargetQabl { diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index f573acee43..54b132f665 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -30,6 +30,7 @@ use zenoh_protocol::{ common::ZExtBody, network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, + interest::InterestId, oam::id::OAM_LINKSTATE, Oam, }, @@ -785,8 +786,10 @@ impl HatContext { struct HatFace { link_id: usize, next_id: AtomicU32, // @TODO: manage rollover and uniqueness + remote_sub_interests: HashMap>, bool)>, local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, + remote_qabl_interests: HashMap>>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } @@ -796,8 +799,10 @@ impl HatFace { Self { link_id: 0, next_id: AtomicU32::new(0), + remote_sub_interests: HashMap::new(), local_subs: HashMap::new(), remote_subs: HashMap::new(), + remote_qabl_interests: HashMap::new(), local_qabls: HashMap::new(), remote_qabls: HashMap::new(), } diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 14726ac970..3bfb0fdd6f 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -20,9 +20,12 @@ use std::{ use petgraph::graph::NodeIndex; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI, ZenohId}, - network::declare::{ - common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, SubscriberId, UndeclareSubscriber, + network::{ + declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; @@ -31,16 +34,19 @@ use super::{ face_hat, face_hat_mut, get_peer, get_router, get_routes_entries, hat, hat_mut, network::Network, res_hat, res_hat_mut, HatCode, HatContext, HatFace, HatTables, }; -use crate::net::routing::{ - dispatcher::{ - face::FaceState, - pubsub::*, - resource::{NodeId, Resource, SessionContext}, - tables::{Route, RoutingExpr, Tables}, +use crate::{ + key_expr::KeyExpr, + net::routing::{ + dispatcher::{ + face::FaceState, + pubsub::*, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, }, - hat::{HatPubSubTrait, Sources}, - router::RoutesIndexes, - RoutingContext, PREFIX_LIVELINESS, }; #[inline] @@ -105,23 +111,59 @@ fn propagate_simple_subscription_to( || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) } { - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + if dst_face.whatami != WhatAmI::Client { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); + } else { + let matching_interests = face_hat!(dst_face) + .remote_sub_interests + .values() + .filter(|si| si.0.as_ref().map(|si| si.matches(res)).unwrap_or(true)) + .cloned() + .collect::>, bool)>>(); + + for (int_res, aggregate) in matching_interests { + let res = if aggregate { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_subs.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); + } + } + } } } @@ -272,18 +314,11 @@ fn register_client_subscription( } } None => { - res.session_ctxs.insert( - face.id, - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: Some(*sub_info), - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }), - ); + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).subs = Some(*sub_info); } } } @@ -334,6 +369,13 @@ fn client_subs(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_client_subs(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.subs.is_some()) +} + #[inline] fn send_forget_sourced_subscription_to_net_childs( tables: &Tables, @@ -374,8 +416,8 @@ fn send_forget_sourced_subscription_to_net_childs( } fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { - for face in tables.faces.values_mut() { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + for mut face in tables.faces.values().cloned() { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { interest_id: None, @@ -390,6 +432,37 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc res.expr(), )); } + for res in face_hat!(&mut face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_subs(&m, &face) + || remote_peer_subs(tables, &m) + || remote_router_subs(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } @@ -563,8 +636,9 @@ pub(super) fn undeclare_client_subscription( } else { propagate_forget_simple_subscription_to_peers(tables, res); } + if client_subs.len() == 1 && !router_subs && !peer_subs { - let face = &mut client_subs[0]; + let mut face = &mut client_subs[0]; if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( @@ -581,6 +655,37 @@ pub(super) fn undeclare_client_subscription( res.expr(), )); } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_subs(&m, face) + || remote_peer_subs(tables, &m) + || remote_router_subs(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } } @@ -604,27 +709,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; - if face.whatami == WhatAmI::Client { - for sub in &hat!(tables).router_subs { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face).local_subs.insert(sub.clone(), id); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { for sub in &hat!(tables).router_subs { if sub.context.is_some() && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) @@ -826,40 +911,135 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: } } -#[inline] -fn insert_faces_for_subs( - route: &mut Route, - expr: &RoutingExpr, - tables: &Tables, - net: &Network, - source: NodeId, - subs: &HashSet, -) { - if net.trees.len() > source as usize { - for sub in subs { - if let Some(sub_idx) = net.get_idx(sub) { - if net.trees[source as usize].directions.len() > sub_idx.index() { - if let Some(direction) = net.trees[source as usize].directions[sub_idx.index()] - { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - route.entry(face.id).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - (face.clone(), key_expr.to_owned(), source) - }); - } +impl HatPubSubTrait for HatCode { + fn declare_sub_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = mode.future().then_some(id); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).router_subs.iter().any(|sub| { + sub.context.is_some() + && sub.matches(res) + && (remote_client_subs(sub, face) + || remote_peer_subs(tables, sub) + || remote_router_subs(tables, sub)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + res.expr(), + )); + } + } else { + for sub in &hat!(tables).router_subs { + if sub.context.is_some() + && sub.matches(res) + && (remote_client_subs(sub, face) + || remote_peer_subs(tables, sub) + || remote_router_subs(tables, sub)) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } } } + } else { + for sub in &hat!(tables).router_subs { + if sub.context.is_some() + && (remote_client_subs(sub, face) + || remote_peer_subs(tables, sub) + || remote_router_subs(tables, sub)) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); + } + } } } - } else { - tracing::trace!("Tree for node sid:{} not yet ready", source); + if mode.future() { + face_hat_mut!(face) + .remote_sub_interests + .insert(id, (res.cloned(), aggregate)); + } + } + + fn undeclare_sub_interest( + &self, + _tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ) { + face_hat_mut!(face).remote_sub_interests.remove(&id); } -} -impl HatPubSubTrait for HatCode { fn declare_subscription( &self, tables: &mut Tables, @@ -973,6 +1153,43 @@ impl HatPubSubTrait for HatCode { source: NodeId, source_type: WhatAmI, ) -> Arc { + #[inline] + fn insert_faces_for_subs( + route: &mut Route, + expr: &RoutingExpr, + tables: &Tables, + net: &Network, + source: NodeId, + subs: &HashSet, + ) { + if net.trees.len() > source as usize { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source as usize].directions.len() > sub_idx.index() { + if let Some(direction) = + net.trees[source as usize].directions[sub_idx.index()] + { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| { + let key_expr = Resource::get_best_key( + expr.prefix, + expr.suffix, + face.id, + ); + (face.clone(), key_expr.to_owned(), source) + }); + } + } + } + } + } + } + } else { + tracing::trace!("Tree for node sid:{} not yet ready", source); + } + } + let mut route = HashMap::new(); let key_expr = expr.full_expr(); if key_expr.ends_with('/') { @@ -1064,4 +1281,91 @@ impl HatPubSubTrait for HatCode { fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes { get_routes_entries(tables) } + + fn get_matching_subscriptions( + &self, + tables: &Tables, + key_expr: &KeyExpr<'_>, + ) -> HashMap> { + #[inline] + fn insert_faces_for_subs( + route: &mut HashMap>, + tables: &Tables, + net: &Network, + source: usize, + subs: &HashSet, + ) { + if net.trees.len() > source { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source].directions.len() > sub_idx.index() { + if let Some(direction) = net.trees[source].directions[sub_idx.index()] { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| face.clone()); + } + } + } + } + } + } + } else { + tracing::trace!("Tree for node sid:{} not yet ready", source); + } + } + + let mut matching_subscriptions = HashMap::new(); + if key_expr.ends_with('/') { + return matching_subscriptions; + } + tracing::trace!("get_matching_subscriptions({})", key_expr,); + + let res = Resource::get_resource(&tables.root_res, key_expr); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, key_expr))); + + let master = !hat!(tables).full_net(WhatAmI::Peer) + || *hat!(tables).elect_router(&tables.zid, key_expr, hat!(tables).shared_nodes.iter()) + == tables.zid; + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + + if master { + let net = hat!(tables).routers_net.as_ref().unwrap(); + insert_faces_for_subs( + &mut matching_subscriptions, + tables, + net, + net.idx.index(), + &res_hat!(mres).router_subs, + ); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + let net = hat!(tables).peers_net.as_ref().unwrap(); + insert_faces_for_subs( + &mut matching_subscriptions, + tables, + net, + net.idx.index(), + &res_hat!(mres).peer_subs, + ); + } + + if master { + for (sid, context) in &mres.session_ctxs { + if context.subs.is_some() && context.face.whatami != WhatAmI::Router { + matching_subscriptions + .entry(*sid) + .or_insert_with(|| context.face.clone()); + } + } + } + } + matching_subscriptions + } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 9defb80081..72e3a781e5 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -28,9 +28,12 @@ use zenoh_protocol::{ }, WhatAmI, WireExpr, ZenohId, }, - network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, QueryableId, UndeclareQueryable, + network::{ + declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, + DeclareBody, DeclareQueryable, QueryableId, UndeclareQueryable, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; @@ -46,7 +49,7 @@ use crate::net::routing::{ resource::{NodeId, Resource, SessionContext}, tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, }, - hat::{HatQueriesTrait, Sources}, + hat::{CurrentFutureTrait, HatQueriesTrait, Sources}, router::RoutesIndexes, RoutingContext, PREFIX_LIVELINESS, }; @@ -238,6 +241,11 @@ fn propagate_simple_queryable( let current = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) && (current.is_none() || current.unwrap().1 != info) + && (dst_face.whatami != WhatAmI::Client + || face_hat!(dst_face) + .remote_qabl_interests + .values() + .any(|si| si.as_ref().map(|si| si.matches(res)).unwrap_or(true))) && if full_peers_net { dst_face.whatami == WhatAmI::Client } else { @@ -404,17 +412,11 @@ fn register_client_queryable( // Register queryable { let res = get_mut_unchecked(res); - get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - })) + get_mut_unchecked( + res.session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))), + ) .qabl = Some(*qabl_info); } face_hat_mut!(face).remote_qabls.insert(id, res.clone()); @@ -465,6 +467,13 @@ fn client_qabls(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_client_qabls(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.qabl.is_some()) +} + #[inline] fn send_forget_sourced_queryable_to_net_childs( tables: &Tables, @@ -505,8 +514,8 @@ fn send_forget_sourced_queryable_to_net_childs( } fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { - for face in tables.faces.values_mut() { - if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + for mut face in tables.faces.values().cloned() { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { interest_id: None, @@ -521,6 +530,37 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_qabls(&m, &face) + || remote_peer_qabls(tables, &m) + || remote_router_qabls(tables, &m)) + }) + }) { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } @@ -707,7 +747,7 @@ pub(super) fn undeclare_client_queryable( } if client_qabls.len() == 1 && !router_qabls && !peer_qabls { - let face = &mut client_qabls[0]; + let mut face = &mut client_qabls[0]; if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -723,6 +763,37 @@ pub(super) fn undeclare_client_queryable( res.expr(), )); } + for res in face_hat!(face) + .local_qabls + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_qabls(&m, face) + || remote_peer_qabls(tables, &m) + || remote_router_qabls(tables, &m)) + }) + }) { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } } @@ -741,32 +812,7 @@ fn forget_client_queryable( } pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { - if face.whatami == WhatAmI::Client { - for qabl in hat!(tables).router_qabls.iter() { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face) - .local_qabls - .insert(qabl.clone(), (id, info)); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { for qabl in hat!(tables).router_qabls.iter() { if qabl.context.is_some() && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) @@ -864,7 +910,8 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links { let dst_face = &mut get_mut_unchecked(ctx).face; if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if let Some(id) = face_hat!(dst_face).local_subs.get(res).cloned() { + if let Some((id, _)) = face_hat!(dst_face).local_qabls.get(res).cloned() + { let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { let ctx_links = hat!(tables) @@ -1021,6 +1068,140 @@ lazy_static::lazy_static! { } impl HatQueriesTrait for HatCode { + fn declare_qabl_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = mode.future().then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).router_qabls.iter().any(|qabl| { + qabl.context.is_some() + && qabl.matches(res) + && (remote_client_qabls(qabl, face) + || remote_peer_qabls(tables, qabl) + || remote_router_qabls(tables, qabl)) + }) { + let info = local_qabl_info(tables, res, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert((*res).clone(), (id, info)); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr, + ext_info: info, + }), + }, + res.expr(), + )); + } + } else { + for qabl in hat!(tables).router_qabls.iter() { + if qabl.context.is_some() + && qabl.matches(res) + && (remote_client_qabls(qabl, face) + || remote_peer_qabls(tables, qabl) + || remote_router_qabls(tables, qabl)) + { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } + } else { + for qabl in hat!(tables).router_qabls.iter() { + if qabl.context.is_some() + && (remote_client_qabls(qabl, face) + || remote_peer_qabls(tables, qabl) + || remote_router_qabls(tables, qabl)) + { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } + } + if mode.future() { + face_hat_mut!(face) + .remote_qabl_interests + .insert(id, res.cloned()); + } + } + + fn undeclare_qabl_interest( + &self, + _tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ) { + face_hat_mut!(face).remote_qabl_interests.remove(&id); + } + fn declare_queryable( &self, tables: &mut Tables, diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 62f6b7c8b4..12c1f26fdb 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -499,6 +499,11 @@ impl Primitives for AdminSpace { } impl crate::net::primitives::EPrimitives for AdminSpace { + #[inline] + fn send_interest(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_interest(ctx.msg) + } + #[inline] fn send_declare(&self, ctx: crate::net::routing::RoutingContext) { (self as &dyn Primitives).send_declare(ctx.msg) diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 5f04b73d53..1622c1eb52 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -534,6 +534,8 @@ impl Primitives for ClientPrimitives { } impl EPrimitives for ClientPrimitives { + fn send_interest(&self, _ctx: RoutingContext) {} + fn send_declare(&self, ctx: RoutingContext) { match ctx.msg.body { DeclareBody::DeclareKeyExpr(d) => { From 4e16dc9ae23ee463a19a4740507f7548b37feef1 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 14 May 2024 00:13:55 +0300 Subject: [PATCH 331/598] add forgotten types to prelude --- zenoh/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e48388f5e7..e9da14328f 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -397,6 +397,7 @@ pub mod shm { protocol_id::POSIX_PROTOCOL_ID, }, provider::{ + chunk::{AllocatedChunk, ChunkDescriptor}, shared_memory_provider::{ AllocBuilder, AllocBuilder2, AllocLayout, AllocLayoutSizedBuilder, AllocPolicy, AsyncAllocPolicy, BlockOn, DeallocEldest, DeallocOptimal, DeallocYoungest, @@ -405,6 +406,7 @@ pub mod shm { SharedMemoryProviderBuilderBackendID, SharedMemoryProviderBuilderID, StaticProtocolID, }, + shared_memory_provider_backend::SharedMemoryProviderBackend, types::{ AllocAlignment, BufAllocResult, BufLayoutAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError, ZLayoutAllocError, ZLayoutError, From ad6a97440c2c46306bacb8eca7bf2f6fc9a2d60e Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 15 May 2024 19:23:41 +0200 Subject: [PATCH 332/598] Reduce open time (#971) * Router implements interests protocol for clients * Send WireExpr in UndeclareSubscriber/UndeclareQueryable to clients for pico * Fix WireExprExt M flag encoding/decoding * Fix decl_key * Clients send all samples and queries to routers and peers * Avoid self declaration loop on interest * Fix query/replies copy/paste bugs * Peers implement interests protocol for clients * Don't send WireExpr in UndeclareSubscriber/UndeclareQueryable to clients * Add client writer-side filtering (#863) * Add client writer-side filtering * Reimplement liveliness with interests * Fix writer-side filtering before receiving FinalInterest * Fix pubsub interest based routing after router failover * Declare message can be Push/Request/RequestContinuous/Response * Address review comments * Remove F: Future flag from DeclareInterest * cargo fmt --all * Remove unused Interest flags field * Update doc * Remove unneeded interest_id field * Update commons/zenoh-protocol/src/network/declare.rs * Remove unused UndeclareInterest * Implement proper Declare Request/Response id correlation * Add new Interest network message * Update doc * Update codec * Fix stable build * Fix test_acl * Fix writer side filtering * Add separate functions to compute matching status * Fix unstable imports * Remove useless checks * Don't apply open session delay in client mode * Add open_delay test * Peers don't apply writer side filtering until FinalInterest is received * Don't wait for full scouting delay when peers connected all configured connect endpoints * Increase scouting delay and decrease api open delay * Wait for gossip and related connections attempts before returning to open * Remove random backoff for p2p * Fix memory leak * Remove API_OPEN_DELAY * Don't apply any scouting delay when multicast disabled and no configured connect/endpoints * Sleep for scouting/delay in router and linkstate peer modes --------- Co-authored-by: Luca Cominardi --- commons/zenoh-config/src/defaults.rs | 2 +- zenoh/Cargo.toml | 3 + zenoh/src/api/session.rs | 3 - zenoh/src/net/routing/hat/client/mod.rs | 5 + .../src/net/routing/hat/linkstate_peer/mod.rs | 6 + zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 20 +- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 42 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 21 +- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 23 +- zenoh/src/net/routing/hat/router/mod.rs | 6 + zenoh/src/net/runtime/mod.rs | 7 + zenoh/src/net/runtime/orchestrator.rs | 119 ++++- zenoh/tests/open_time.rs | 426 ++++++++++++++++++ 13 files changed, 657 insertions(+), 26 deletions(-) create mode 100644 zenoh/tests/open_time.rs diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index 865da7b5ba..138d00bf85 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -36,7 +36,7 @@ pub const mode: WhatAmI = WhatAmI::Peer; #[allow(dead_code)] pub mod scouting { pub const timeout: u64 = 3000; - pub const delay: u64 = 200; + pub const delay: u64 = 500; pub mod multicast { pub const enabled: bool = true; pub const address: ([u8; 4], u16) = ([224, 0, 0, 224], 7446); diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 15996ce620..c07f22fe9f 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -114,6 +114,9 @@ zenoh-util = { workspace = true } zenoh-runtime = { workspace = true } zenoh-task = { workspace = true } +[dev-dependencies] +tokio = { workspace = true } + [build-dependencies] rustc_version = { workspace = true } diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index e5087e693b..afb4a4b0d3 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -99,7 +99,6 @@ zconfigurable! { pub(crate) static ref API_QUERY_RECEPTION_CHANNEL_SIZE: usize = 256; pub(crate) static ref API_REPLY_EMISSION_CHANNEL_SIZE: usize = 256; pub(crate) static ref API_REPLY_RECEPTION_CHANNEL_SIZE: usize = 256; - pub(crate) static ref API_OPEN_SESSION_DELAY: u64 = 500; } pub(crate) struct SessionState { @@ -865,8 +864,6 @@ impl Session { .await; session.owns_runtime = true; runtime.start().await?; - // Workaround for the declare_and_shoot problem - tokio::time::sleep(Duration::from_millis(*API_OPEN_SESSION_DELAY)).await; Ok(session) }) } diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 921dc7554c..ccedf8d419 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -120,6 +120,11 @@ impl HatBaseTrait for HatCode { fn close_face(&self, tables: &TablesLock, face: &mut Arc) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); + + face_hat_mut!(face).remote_sub_interests.clear(); + face_hat_mut!(face).local_subs.clear(); + face_hat_mut!(face).local_qabls.clear(); + let face = get_mut_unchecked(face); for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index bb5aec4db1..85b65302f0 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -251,6 +251,12 @@ impl HatBaseTrait for HatCode { fn close_face(&self, tables: &TablesLock, face: &mut Arc) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); + + face_hat_mut!(face).remote_sub_interests.clear(); + face_hat_mut!(face).local_subs.clear(); + face_hat_mut!(face).remote_qabl_interests.clear(); + face_hat_mut!(face).local_qabls.clear(); + let face = get_mut_unchecked(face); for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index 57b76fc086..79948d0f0b 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -14,7 +14,6 @@ use std::convert::TryInto; use petgraph::graph::NodeIndex; -use rand::Rng; use vec_map::VecMap; use zenoh_buffers::{ writer::{DidntWrite, HasWriter}, @@ -399,6 +398,11 @@ impl Network { if self.gossip { if let Some(idx) = idx { + zenoh_runtime::ZRuntime::Net.block_in_place( + strong_runtime + .start_conditions() + .add_peer_connector_zid(zid), + ); if self.gossip_multihop || self.links.values().any(|link| link.zid == zid) { self.send_on_links( vec![( @@ -424,12 +428,11 @@ impl Network { .await .is_none() { - // random backoff - let sleep_time = std::time::Duration::from_millis( - rand::thread_rng().gen_range(0..100), - ); - tokio::time::sleep(sleep_time).await; runtime.connect_peer(&zid, &locators).await; + runtime + .start_conditions() + .terminate_peer_connector_zid(zid) + .await; } }); } @@ -437,6 +440,11 @@ impl Network { } } } + zenoh_runtime::ZRuntime::Net.block_in_place( + strong_runtime + .start_conditions() + .terminate_peer_connector_zid(src), + ); } pub(super) fn add_link(&mut self, transport: TransportUnicast) -> usize { diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 5ac77a3135..5485213c3c 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -27,10 +27,14 @@ use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ common::ZExtBody, network::{ - declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, - interest::InterestId, + declare::{ + ext::{NodeIdType, QoSType}, + queryable::ext::QueryableInfoType, + QueryableId, SubscriberId, + }, + interest::{InterestId, InterestOptions}, oam::id::OAM_LINKSTATE, - Oam, + Declare, DeclareBody, DeclareFinal, Oam, }, }; use zenoh_result::ZResult; @@ -53,8 +57,9 @@ use crate::net::{ codec::Zenoh080Routing, protocol::linkstate::LinkStateList, routing::{ - dispatcher::face::Face, + dispatcher::face::{Face, InterestState}, router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + RoutingContext, }, runtime::Runtime, }; @@ -157,14 +162,43 @@ impl HatBaseTrait for HatCode { net.add_link(transport.clone()); } } + if face.state.whatami == WhatAmI::Peer { + get_mut_unchecked(&mut face.state).local_interests.insert( + 0, + InterestState { + options: InterestOptions::ALL, + res: None, + finalized: false, + }, + ); + } + pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); + + if face.state.whatami == WhatAmI::Peer { + face.state + .primitives + .send_declare(RoutingContext::new(Declare { + interest_id: Some(0), + ext_qos: QoSType::default(), + ext_tstamp: None, + ext_nodeid: NodeIdType::default(), + body: DeclareBody::DeclareFinal(DeclareFinal), + })); + } Ok(()) } fn close_face(&self, tables: &TablesLock, face: &mut Arc) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); + + face_hat_mut!(face).remote_sub_interests.clear(); + face_hat_mut!(face).local_subs.clear(); + face_hat_mut!(face).remote_qabl_interests.clear(); + face_hat_mut!(face).local_qabls.clear(); + let face = get_mut_unchecked(face); for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 31172e2804..69cb1619b7 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -39,7 +39,7 @@ use crate::{ tables::{Route, RoutingExpr, Tables}, }, hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, - router::RoutesIndexes, + router::{update_data_routes_from, RoutesIndexes}, RoutingContext, PREFIX_LIVELINESS, }, }; @@ -358,6 +358,10 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { } } } + // recompute routes + // TODO: disable data routes and recompute them in parallel to avoid holding + // tables write lock for a long time on peer conenction. + update_data_routes_from(tables, &mut tables.root_res.clone()); } impl HatPubSubTrait for HatCode { @@ -565,6 +569,21 @@ impl HatPubSubTrait for HatCode { return Arc::new(route); } }; + + for face in tables.faces.values().filter(|f| { + f.whatami == WhatAmI::Peer + && !f + .local_interests + .get(&0) + .map(|i| i.finalized) + .unwrap_or(true) + }) { + route.entry(face.id).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + (face.clone(), key_expr.to_owned(), NodeId::default()) + }); + } + let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res .as_ref() diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 1801f66c84..e986cfa16e 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -45,7 +45,7 @@ use crate::net::routing::{ tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, }, hat::{CurrentFutureTrait, HatQueriesTrait, Sources}, - router::RoutesIndexes, + router::{update_query_routes_from, RoutesIndexes}, RoutingContext, PREFIX_LIVELINESS, }; @@ -332,6 +332,10 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { } } } + // recompute routes + // TODO: disable query routes and recompute them in parallel to avoid holding + // tables write lock for a long time on peer conenction. + update_query_routes_from(tables, &mut tables.root_res.clone()); } lazy_static::lazy_static! { @@ -549,6 +553,23 @@ impl HatQueriesTrait for HatCode { return EMPTY_ROUTE.clone(); } }; + + for face in tables.faces.values().filter(|f| { + f.whatami == WhatAmI::Peer + && !f + .local_interests + .get(&0) + .map(|i| i.finalized) + .unwrap_or(true) + }) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: (face.clone(), key_expr.to_owned(), NodeId::default()), + complete: 0, + distance: 0.5, + }); + } + let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res .as_ref() diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 54b132f665..40fbc2d588 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -424,6 +424,12 @@ impl HatBaseTrait for HatCode { fn close_face(&self, tables: &TablesLock, face: &mut Arc) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); + + face_hat_mut!(face).remote_sub_interests.clear(); + face_hat_mut!(face).local_subs.clear(); + face_hat_mut!(face).remote_qabl_interests.clear(); + face_hat_mut!(face).local_qabls.clear(); + let face = get_mut_unchecked(face); for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index f4eb0289ca..515f3f54be 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -54,6 +54,7 @@ use zenoh_transport::{ TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; +use self::orchestrator::StartConditions; use super::{primitives::DeMux, routing, routing::router::Router}; #[cfg(all(feature = "unstable", feature = "plugins"))] use crate::api::loader::{load_plugins, start_plugins}; @@ -78,6 +79,7 @@ pub(crate) struct RuntimeState { task_controller: TaskController, #[cfg(all(feature = "unstable", feature = "plugins"))] plugins_manager: Mutex, + start_conditions: Arc, } pub struct WeakRuntime { @@ -186,6 +188,7 @@ impl RuntimeBuilder { task_controller: TaskController::default(), #[cfg(all(feature = "unstable", feature = "plugins"))] plugins_manager: Mutex::new(plugins_manager), + start_conditions: Arc::new(StartConditions::default()), }), }; *handler.runtime.write().unwrap() = Runtime::downgrade(&runtime); @@ -354,6 +357,10 @@ impl Runtime { pub fn get_cancellation_token(&self) -> CancellationToken { self.state.task_controller.get_cancellation_token() } + + pub(crate) fn start_conditions(&self) -> &Arc { + &self.state.start_conditions + } } struct RuntimeTransportEventHandler { diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index 610f189b58..6a6263d6b0 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -18,7 +18,10 @@ use std::{ use futures::prelude::*; use socket2::{Domain, Socket, Type}; -use tokio::net::UdpSocket; +use tokio::{ + net::UdpSocket, + sync::{futures::Notified, Mutex, Notify}, +}; use zenoh_buffers::{ reader::{DidntRead, HasReader}, writer::HasWriter, @@ -48,6 +51,72 @@ pub enum Loop { Break, } +#[derive(Default, Debug)] +pub(crate) struct PeerConnector { + zid: Option, + terminated: bool, +} + +#[derive(Default, Debug)] +pub(crate) struct StartConditions { + notify: Notify, + peer_connectors: Mutex>, +} + +impl StartConditions { + pub(crate) fn notified(&self) -> Notified<'_> { + self.notify.notified() + } + + pub(crate) async fn add_peer_connector(&self) -> usize { + let mut peer_connectors = self.peer_connectors.lock().await; + peer_connectors.push(PeerConnector::default()); + peer_connectors.len() - 1 + } + + pub(crate) async fn add_peer_connector_zid(&self, zid: ZenohId) { + let mut peer_connectors = self.peer_connectors.lock().await; + if !peer_connectors.iter().any(|pc| pc.zid == Some(zid)) { + peer_connectors.push(PeerConnector { + zid: Some(zid), + terminated: false, + }) + } + } + + pub(crate) async fn set_peer_connector_zid(&self, idx: usize, zid: ZenohId) { + let mut peer_connectors = self.peer_connectors.lock().await; + if let Some(peer_connector) = peer_connectors.get_mut(idx) { + peer_connector.zid = Some(zid); + } + } + + pub(crate) async fn terminate_peer_connector(&self, idx: usize) { + let mut peer_connectors = self.peer_connectors.lock().await; + if let Some(peer_connector) = peer_connectors.get_mut(idx) { + peer_connector.terminated = true; + } + if !peer_connectors.iter().any(|pc| !pc.terminated) { + self.notify.notify_one() + } + } + + pub(crate) async fn terminate_peer_connector_zid(&self, zid: ZenohId) { + let mut peer_connectors = self.peer_connectors.lock().await; + if let Some(peer_connector) = peer_connectors.iter_mut().find(|pc| pc.zid == Some(zid)) { + peer_connector.terminated = true; + } else { + peer_connectors.push(PeerConnector { + zid: Some(zid), + terminated: true, + }) + } + if !peer_connectors.iter().any(|pc| !pc.terminated) { + self.notify.notify_one() + } + } +} + impl Runtime { pub async fn start(&mut self) -> ZResult<()> { match self.whatami() { @@ -96,7 +165,7 @@ impl Runtime { } async fn start_peer(&self) -> ZResult<()> { - let (listeners, peers, scouting, listen, autoconnect, addr, ifaces, delay) = { + let (listeners, peers, scouting, listen, autoconnect, addr, ifaces, delay, linkstate) = { let guard = &self.state.config.lock(); let listeners = if guard.listen().endpoints().is_empty() { let endpoint: EndPoint = PEER_DEFAULT_LISTENER.parse().unwrap(); @@ -125,6 +194,7 @@ impl Runtime { unwrap_or_default!(guard.scouting().multicast().address()), unwrap_or_default!(guard.scouting().multicast().interface()), Duration::from_millis(unwrap_or_default!(guard.scouting().delay())), + unwrap_or_default!(guard.routing().peer().mode()) == *"linkstate", ) }; @@ -135,12 +205,22 @@ impl Runtime { if scouting { self.start_scout(listen, autoconnect, addr, ifaces).await?; } - tokio::time::sleep(delay).await; + + if linkstate { + tokio::time::sleep(delay).await; + } else if (scouting || !peers.is_empty()) + && tokio::time::timeout(delay, self.state.start_conditions.notified()) + .await + .is_err() + && !peers.is_empty() + { + tracing::warn!("Scouting delay elapsed before start conditions are met."); + } Ok(()) } async fn start_router(&self) -> ZResult<()> { - let (listeners, peers, scouting, listen, autoconnect, addr, ifaces) = { + let (listeners, peers, scouting, listen, autoconnect, addr, ifaces, delay) = { let guard = self.state.config.lock(); let listeners = if guard.listen().endpoints().is_empty() { let endpoint: EndPoint = ROUTER_DEFAULT_LISTENER.parse().unwrap(); @@ -168,6 +248,7 @@ impl Runtime { *unwrap_or_default!(guard.scouting().multicast().autoconnect().router()), unwrap_or_default!(guard.scouting().multicast().address()), unwrap_or_default!(guard.scouting().multicast().interface()), + Duration::from_millis(unwrap_or_default!(guard.scouting().delay())), ) }; @@ -179,6 +260,7 @@ impl Runtime { self.start_scout(listen, autoconnect, addr, ifaces).await?; } + tokio::time::sleep(delay).await; Ok(()) } @@ -277,7 +359,7 @@ impl Runtime { } } else { // try to connect with retry waiting - self.peer_connector_retry(endpoint).await; + let _ = self.peer_connector_retry(endpoint).await; return Ok(()); } } @@ -309,7 +391,7 @@ impl Runtime { } } else if retry_config.exit_on_failure { // try to connect with retry waiting - self.peer_connector_retry(endpoint).await; + let _ = self.peer_connector_retry(endpoint).await; } else { // try to connect in background self.spawn_peer_connector(endpoint).await? @@ -656,14 +738,31 @@ impl Runtime { .await? { let this = self.clone(); - self.spawn(async move { this.peer_connector_retry(peer).await }); + let idx = self.state.start_conditions.add_peer_connector().await; + let config = this.config().lock(); + let gossip = unwrap_or_default!(config.scouting().gossip().enabled()); + drop(config); + self.spawn(async move { + if let Ok(zid) = this.peer_connector_retry(peer).await { + this.state + .start_conditions + .set_peer_connector_zid(idx, zid) + .await; + } + if !gossip { + this.state + .start_conditions + .terminate_peer_connector(idx) + .await; + } + }); Ok(()) } else { bail!("Forbidden multicast endpoint in connect list!") } } - async fn peer_connector_retry(&self, peer: EndPoint) { + async fn peer_connector_retry(&self, peer: EndPoint) -> ZResult { let retry_config = self.get_connect_retry_config(&peer); let mut period = retry_config.period(); let cancellation_token = self.get_cancellation_token(); @@ -683,7 +782,7 @@ impl Runtime { *zwrite!(orch_transport.endpoint) = Some(peer); } } - break; + return transport.get_zid(); } Ok(Err(e)) => { tracing::debug!( @@ -703,7 +802,7 @@ impl Runtime { } } } - _ = cancellation_token.cancelled() => { break; } + _ = cancellation_token.cancelled() => { bail!(zerror!("Peer connector terminated")); } } tokio::time::sleep(period.next_duration()).await; } diff --git a/zenoh/tests/open_time.rs b/zenoh/tests/open_time.rs new file mode 100644 index 0000000000..87c080bc97 --- /dev/null +++ b/zenoh/tests/open_time.rs @@ -0,0 +1,426 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{ + future::IntoFuture, + time::{Duration, Instant}, +}; + +use zenoh_config::Config; +use zenoh_link::EndPoint; +use zenoh_protocol::core::WhatAmI; + +const TIMEOUT_EXPECTED: Duration = Duration::from_secs(5); +const SLEEP: Duration = Duration::from_millis(100); + +macro_rules! ztimeout_expected { + ($f:expr) => { + tokio::time::timeout(TIMEOUT_EXPECTED, $f).await.unwrap() + }; +} + +async fn time_open( + listen_endpoint: &EndPoint, + connect_endpoint: &EndPoint, + connect_mode: WhatAmI, + lowlatency: bool, +) { + /* [ROUTER] */ + let mut router_config = Config::default(); + router_config.set_mode(Some(WhatAmI::Router)).unwrap(); + router_config + .listen + .set_endpoints(vec![listen_endpoint.clone()]) + .unwrap(); + router_config + .transport + .unicast + .set_lowlatency(lowlatency) + .unwrap(); + router_config + .transport + .unicast + .qos + .set_enabled(!lowlatency) + .unwrap(); + + let start = Instant::now(); + let router = ztimeout_expected!(zenoh::open(router_config).into_future()).unwrap(); + println!( + "open(mode:{}, listen_endpoint:{}, lowlatency:{}): {:#?}", + WhatAmI::Router, + listen_endpoint.as_str().split('#').next().unwrap(), + lowlatency, + start.elapsed() + ); + + /* [APP] */ + let mut app_config = Config::default(); + app_config.set_mode(Some(connect_mode)).unwrap(); + app_config + .connect + .set_endpoints(vec![connect_endpoint.clone()]) + .unwrap(); + app_config + .transport + .unicast + .set_lowlatency(lowlatency) + .unwrap(); + app_config + .transport + .unicast + .qos + .set_enabled(!lowlatency) + .unwrap(); + + /* [1] */ + // Open a transport from the app to the router + let start = Instant::now(); + let app = ztimeout_expected!(zenoh::open(app_config).into_future()).unwrap(); + println!( + "open(mode:{}, connect_endpoint:{}, lowlatency:{}): {:#?}", + connect_mode, + connect_endpoint.as_str().split('#').next().unwrap(), + lowlatency, + start.elapsed() + ); + + /* [2] */ + // Close the open transport on the app + let start = Instant::now(); + ztimeout_expected!(app.close().into_future()).unwrap(); + println!( + "close(mode:{}, connect_endpoint:{}, lowlatency:{}): {:#?}", + connect_mode, + connect_endpoint.as_str().split('#').next().unwrap(), + lowlatency, + start.elapsed() + ); + + /* [3] */ + // Close the router + let start = Instant::now(); + ztimeout_expected!(router.close().into_future()).unwrap(); + println!( + "close(mode:{}, listen_endpoint:{}, lowlatency:{}): {:#?}", + WhatAmI::Router, + listen_endpoint.as_str().split('#').next().unwrap(), + lowlatency, + start.elapsed() + ); + + // Wait a little bit + tokio::time::sleep(SLEEP).await; +} + +async fn time_universal_open(endpoint: &EndPoint, mode: WhatAmI) { + time_open(endpoint, endpoint, mode, false).await +} + +async fn time_lowlatency_open(endpoint: &EndPoint, mode: WhatAmI) { + time_open(endpoint, endpoint, mode, true).await +} + +#[cfg(feature = "transport_tcp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tcp_only_open() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14000).parse().unwrap(); + time_universal_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(feature = "transport_tcp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tcp_only_with_lowlatency_open() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14100).parse().unwrap(); + time_lowlatency_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(feature = "transport_udp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_udp_only_open() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 14010).parse().unwrap(); + time_universal_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(feature = "transport_udp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_udp_only_with_lowlatency_open() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 14110).parse().unwrap(); + time_lowlatency_open(&endpoint, WhatAmI::Client).await; +} + +// #[cfg(feature = "transport_ws")] +// #[tokio::test(flavor = "multi_thread", worker_threads = 4)] +// #[ignore] +// async fn time_ws_only_open() { +// zenoh_util::try_init_log_from_env(); +// let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14020).parse().unwrap(); +// time_universal_open(&endpoint, WhatAmI::Client).await; +// } + +// #[cfg(feature = "transport_ws")] +// #[tokio::test(flavor = "multi_thread", worker_threads = 4)] +// #[ignore] +// async fn time_ws_only_with_lowlatency_open() { +// zenoh_util::try_init_log_from_env(); +// let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14120).parse().unwrap(); +// time_lowlatency_open(&endpoint, WhatAmI::Client).await; +// } + +#[cfg(feature = "transport_unixpipe")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unixpipe_only_open() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = "unixpipe/time_unixpipe_only_open".parse().unwrap(); + time_universal_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(feature = "transport_unixpipe")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unixpipe_only_with_lowlatency_open() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = "unixpipe/time_unixpipe_only_with_lowlatency_open" + .parse() + .unwrap(); + time_lowlatency_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unix_only_open() { + zenoh_util::try_init_log_from_env(); + let f1 = "zenoh-test-unix-socket-9-open.sock"; + let _ = std::fs::remove_file(f1); + let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); + time_universal_open(&endpoint, WhatAmI::Client).await; + let _ = std::fs::remove_file(f1); + let _ = std::fs::remove_file(format!("{f1}.lock")); +} + +#[cfg(feature = "transport_tls")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tls_only_open() { + use zenoh_link::tls::config::*; + + zenoh_util::try_init_log_from_env(); + // NOTE: this an auto-generated pair of certificate and key. + // The target domain is localhost, so it has no real + // mapping to any existing domain. The certificate and key + // have been generated using: https://github.com/jsha/minica + let key = "-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi +qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy +aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU +cELdFdaUOSbdCipJhbnkwixEr2h7PXxwba7SIZgZtRaQWak1VE9b716qe3iMuMha +Efo/UoFmeZCPu5spfwaOZsnCsxRPk2IjbzlsHTJ09lM9wmbEFHBMVAXejLTk++Sr +Xt8jASZhNen/2GzyLQNAquGn98lCMQ6SsE9vLQIDAQABAoIBAGQkKggHm6Q20L+4 +2+bNsoOqguLplpvM4RMpyx11qWE9h6GeUmWD+5yg+SysJQ9aw0ZSHWEjRD4ePji9 +lxvm2IIxzuIftp+NcM2gBN2ywhpfq9XbO/2NVR6PJ0dQQJzBG12bzKDFDdYkP0EU +WdiPL+WoEkvo0F57bAd77n6G7SZSgxYekBF+5S6rjbu5I1cEKW+r2vLehD4uFCVX +Q0Tu7TyIOE1KJ2anRb7ZXVUaguNj0/Er7EDT1+wN8KJKvQ1tYGIq/UUBtkP9nkOI +9XJd25k6m5AQPDddzd4W6/5+M7kjyVPi3CsQcpBPss6ueyecZOMaKqdWAHeEyaak +r67TofUCgYEA6GBa+YkRvp0Ept8cd5mh4gCRM8wUuhtzTQnhubCPivy/QqMWScdn +qD0OiARLAsqeoIfkAVgyqebVnxwTrKTvWe0JwpGylEVWQtpGz3oHgjST47yZxIiY +CSAaimi2CYnJZ+QB2oBkFVwNCuXdPEGX6LgnOGva19UKrm6ONsy6V9MCgYEAxBJu +fu4dGXZreARKEHa/7SQjI9ayAFuACFlON/EgSlICzQyG/pumv1FsMEiFrv6w7PRj +4AGqzyzGKXWVDRMrUNVeGPSKJSmlPGNqXfPaXRpVEeB7UQhAs5wyMrWDl8jEW7Ih +XcWhMLn1f/NOAKyrSDSEaEM+Nuu+xTifoAghvP8CgYEAlta9Fw+nihDIjT10cBo0 +38w4dOP7bFcXQCGy+WMnujOYPzw34opiue1wOlB3FIfL8i5jjY/fyzPA5PhHuSCT +Ec9xL3B9+AsOFHU108XFi/pvKTwqoE1+SyYgtEmGKKjdKOfzYA9JaCgJe1J8inmV +jwXCx7gTJVjwBwxSmjXIm+sCgYBQF8NhQD1M0G3YCdCDZy7BXRippCL0OGxVfL2R +5oKtOVEBl9NxH/3+evE5y/Yn5Mw7Dx3ZPHUcygpslyZ6v9Da5T3Z7dKcmaVwxJ+H +n3wcugv0EIHvOPLNK8npovINR6rGVj6BAqD0uZHKYYYEioQxK5rGyGkaoDQ+dgHm +qku12wKBgQDem5FvNp5iW7mufkPZMqf3sEGtu612QeqejIPFM1z7VkUgetsgPBXD +tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v +F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== +-----END RSA PRIVATE KEY-----"; + + let cert = "-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw +MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCx+oC6ESU3gefJ6oui9J3hB76c2/kDAKNI74cWIXfT +He9DUeKpEDRSbIWVKoGcUfdNQebglxp3jRB+tfx/XU0oZl2m8oewxipiNmdiREUZ +Lazh9DJoNtXkzTqzdQNfwRM+BjjVjx8IpNJV2L2IeTBxWtczFS7ggEHHQLWvYZKj +eCQgGdRwQt0V1pQ5Jt0KKkmFueTCLESvaHs9fHBtrtIhmBm1FpBZqTVUT1vvXqp7 +eIy4yFoR+j9SgWZ5kI+7myl/Bo5mycKzFE+TYiNvOWwdMnT2Uz3CZsQUcExUBd6M +tOT75Kte3yMBJmE16f/YbPItA0Cq4af3yUIxDpKwT28tAgMBAAGjdjB0MA4GA1Ud +DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzAUBgNVHREE +DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAG/POnBob0S7iYwsbtI2 +3LTTbRnmseIErtJuJmI9yYzgVIm6sUSKhlIUfAIm4rfRuzE94KFeWR2w9RabxOJD +wjYLLKvQ6rFY5g2AV/J0TwDjYuq0absdaDPZ8MKJ+/lpGYK3Te+CTOfq5FJRFt1q +GOkXAxnNpGg0obeRWRKFiAMHbcw6a8LIMfRjCooo3+uSQGsbVzGxSB4CYo720KcC +9vB1K9XALwzoqCewP4aiQsMY1GWpAmzXJftY3w+lka0e9dBYcdEdOqxSoZb5OBBZ +p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ +abY= +-----END CERTIFICATE-----"; + + // Configure the client + let ca = "-----BEGIN CERTIFICATE----- +MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw +MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuCq24O4P4Aep5vAVlrIQ7P8+ +uWWgcHIFYa02TmhBUB/hjo0JANCQvAtpVNuQ8NyKPlqnnq1cttePbSYVeA0rrnOs +DcfySAiyGBEY9zMjFfHJtH1wtrPcJEU8XIEY3xUlrAJE2CEuV9dVYgfEEydnvgLc +8Ug0WXSiARjqbnMW3l8jh6bYCp/UpL/gSM4mxdKrgpfyPoweGhlOWXc3RTS7cqM9 +T25acURGOSI6/g8GF0sNE4VZmUvHggSTmsbLeXMJzxDWO+xVehRmbQx3IkG7u++b +QdRwGIJcDNn7zHlDMHtQ0Z1DBV94fZNBwCULhCBB5g20XTGw//S7Fj2FPwyhAgMB +AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr +BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTWfAmQ/BUIQm/9 +/llJJs2jUMWzGzAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzANBgkq +hkiG9w0BAQsFAAOCAQEAvtcZFAELKiTuOiAeYts6zeKxc+nnHCzayDeD/BDCbxGJ +e1n+xdHjLtWGd+/Anc+fvftSYBPTFQqCi84lPiUIln5z/rUxE+ke81hNPIfw2obc +yIg87xCabQpVyEh8s+MV+7YPQ1+fH4FuSi2Fck1FejxkVqN2uOZPvOYUmSTsaVr1 +8SfRnwJNZ9UMRPM2bD4Jkvj0VcL42JM3QkOClOzYW4j/vll2cSs4kx7er27cIoo1 +Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN +R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== +-----END CERTIFICATE-----"; + + let mut endpoint: EndPoint = format!("tls/localhost:{}", 14030).parse().unwrap(); + endpoint + .config_mut() + .extend_from_iter( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, ca), + (TLS_SERVER_PRIVATE_KEY_RAW, key), + (TLS_SERVER_CERTIFICATE_RAW, cert), + ] + .iter() + .copied(), + ) + .unwrap(); + + time_universal_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(feature = "transport_quic")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_quic_only_open() { + use zenoh_link::quic::config::*; + + // NOTE: this an auto-generated pair of certificate and key. + // The target domain is localhost, so it has no real + // mapping to any existing domain. The certificate and key + // have been generated using: https://github.com/jsha/minica + let key = "-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi +qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy +aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU +cELdFdaUOSbdCipJhbnkwixEr2h7PXxwba7SIZgZtRaQWak1VE9b716qe3iMuMha +Efo/UoFmeZCPu5spfwaOZsnCsxRPk2IjbzlsHTJ09lM9wmbEFHBMVAXejLTk++Sr +Xt8jASZhNen/2GzyLQNAquGn98lCMQ6SsE9vLQIDAQABAoIBAGQkKggHm6Q20L+4 +2+bNsoOqguLplpvM4RMpyx11qWE9h6GeUmWD+5yg+SysJQ9aw0ZSHWEjRD4ePji9 +lxvm2IIxzuIftp+NcM2gBN2ywhpfq9XbO/2NVR6PJ0dQQJzBG12bzKDFDdYkP0EU +WdiPL+WoEkvo0F57bAd77n6G7SZSgxYekBF+5S6rjbu5I1cEKW+r2vLehD4uFCVX +Q0Tu7TyIOE1KJ2anRb7ZXVUaguNj0/Er7EDT1+wN8KJKvQ1tYGIq/UUBtkP9nkOI +9XJd25k6m5AQPDddzd4W6/5+M7kjyVPi3CsQcpBPss6ueyecZOMaKqdWAHeEyaak +r67TofUCgYEA6GBa+YkRvp0Ept8cd5mh4gCRM8wUuhtzTQnhubCPivy/QqMWScdn +qD0OiARLAsqeoIfkAVgyqebVnxwTrKTvWe0JwpGylEVWQtpGz3oHgjST47yZxIiY +CSAaimi2CYnJZ+QB2oBkFVwNCuXdPEGX6LgnOGva19UKrm6ONsy6V9MCgYEAxBJu +fu4dGXZreARKEHa/7SQjI9ayAFuACFlON/EgSlICzQyG/pumv1FsMEiFrv6w7PRj +4AGqzyzGKXWVDRMrUNVeGPSKJSmlPGNqXfPaXRpVEeB7UQhAs5wyMrWDl8jEW7Ih +XcWhMLn1f/NOAKyrSDSEaEM+Nuu+xTifoAghvP8CgYEAlta9Fw+nihDIjT10cBo0 +38w4dOP7bFcXQCGy+WMnujOYPzw34opiue1wOlB3FIfL8i5jjY/fyzPA5PhHuSCT +Ec9xL3B9+AsOFHU108XFi/pvKTwqoE1+SyYgtEmGKKjdKOfzYA9JaCgJe1J8inmV +jwXCx7gTJVjwBwxSmjXIm+sCgYBQF8NhQD1M0G3YCdCDZy7BXRippCL0OGxVfL2R +5oKtOVEBl9NxH/3+evE5y/Yn5Mw7Dx3ZPHUcygpslyZ6v9Da5T3Z7dKcmaVwxJ+H +n3wcugv0EIHvOPLNK8npovINR6rGVj6BAqD0uZHKYYYEioQxK5rGyGkaoDQ+dgHm +qku12wKBgQDem5FvNp5iW7mufkPZMqf3sEGtu612QeqejIPFM1z7VkUgetsgPBXD +tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v +F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== +-----END RSA PRIVATE KEY-----"; + + let cert = "-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw +MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCx+oC6ESU3gefJ6oui9J3hB76c2/kDAKNI74cWIXfT +He9DUeKpEDRSbIWVKoGcUfdNQebglxp3jRB+tfx/XU0oZl2m8oewxipiNmdiREUZ +Lazh9DJoNtXkzTqzdQNfwRM+BjjVjx8IpNJV2L2IeTBxWtczFS7ggEHHQLWvYZKj +eCQgGdRwQt0V1pQ5Jt0KKkmFueTCLESvaHs9fHBtrtIhmBm1FpBZqTVUT1vvXqp7 +eIy4yFoR+j9SgWZ5kI+7myl/Bo5mycKzFE+TYiNvOWwdMnT2Uz3CZsQUcExUBd6M +tOT75Kte3yMBJmE16f/YbPItA0Cq4af3yUIxDpKwT28tAgMBAAGjdjB0MA4GA1Ud +DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzAUBgNVHREE +DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAG/POnBob0S7iYwsbtI2 +3LTTbRnmseIErtJuJmI9yYzgVIm6sUSKhlIUfAIm4rfRuzE94KFeWR2w9RabxOJD +wjYLLKvQ6rFY5g2AV/J0TwDjYuq0absdaDPZ8MKJ+/lpGYK3Te+CTOfq5FJRFt1q +GOkXAxnNpGg0obeRWRKFiAMHbcw6a8LIMfRjCooo3+uSQGsbVzGxSB4CYo720KcC +9vB1K9XALwzoqCewP4aiQsMY1GWpAmzXJftY3w+lka0e9dBYcdEdOqxSoZb5OBBZ +p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ +abY= +-----END CERTIFICATE-----"; + + // Configure the client + let ca = "-----BEGIN CERTIFICATE----- +MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw +MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuCq24O4P4Aep5vAVlrIQ7P8+ +uWWgcHIFYa02TmhBUB/hjo0JANCQvAtpVNuQ8NyKPlqnnq1cttePbSYVeA0rrnOs +DcfySAiyGBEY9zMjFfHJtH1wtrPcJEU8XIEY3xUlrAJE2CEuV9dVYgfEEydnvgLc +8Ug0WXSiARjqbnMW3l8jh6bYCp/UpL/gSM4mxdKrgpfyPoweGhlOWXc3RTS7cqM9 +T25acURGOSI6/g8GF0sNE4VZmUvHggSTmsbLeXMJzxDWO+xVehRmbQx3IkG7u++b +QdRwGIJcDNn7zHlDMHtQ0Z1DBV94fZNBwCULhCBB5g20XTGw//S7Fj2FPwyhAgMB +AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr +BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTWfAmQ/BUIQm/9 +/llJJs2jUMWzGzAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzANBgkq +hkiG9w0BAQsFAAOCAQEAvtcZFAELKiTuOiAeYts6zeKxc+nnHCzayDeD/BDCbxGJ +e1n+xdHjLtWGd+/Anc+fvftSYBPTFQqCi84lPiUIln5z/rUxE+ke81hNPIfw2obc +yIg87xCabQpVyEh8s+MV+7YPQ1+fH4FuSi2Fck1FejxkVqN2uOZPvOYUmSTsaVr1 +8SfRnwJNZ9UMRPM2bD4Jkvj0VcL42JM3QkOClOzYW4j/vll2cSs4kx7er27cIoo1 +Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN +R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== +-----END CERTIFICATE-----"; + + // Define the locator + let mut endpoint: EndPoint = format!("quic/localhost:{}", 14040).parse().unwrap(); + endpoint + .config_mut() + .extend_from_iter( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, ca), + (TLS_SERVER_PRIVATE_KEY_RAW, key), + (TLS_SERVER_CERTIFICATE_RAW, cert), + ] + .iter() + .copied(), + ) + .unwrap(); + + time_universal_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(all(feature = "transport_vsock", target_os = "linux"))] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_vsock_only_open() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:18000".parse().unwrap(); + time_lowlatency_open(&endpoint, WhatAmI::Client).await; +} From fdb86be02184e2d5c4c0661628ffedc5052d9d7b Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 16 May 2024 14:22:36 +0200 Subject: [PATCH 333/598] Fix clippy warning --- zenoh/src/api/encoding.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index 29c65f837e..a46b59b6c4 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -723,6 +723,7 @@ impl fmt::Display for Encoding { } } +#[allow(dead_code)] // - Encoding trait pub trait EncodingMapping { const ENCODING: Encoding; From 1015a503c5fc9fe05a2a64620c3f8efb244aef2f Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 17 May 2024 12:19:45 +0200 Subject: [PATCH 334/598] Recompute routes on DeclareFinal to activate writer side filtering --- zenoh/src/net/routing/dispatcher/face.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index c8cf5d8770..6eb9ee5b90 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -335,6 +335,14 @@ impl Primitives for Face { .local_interests .entry(id) .and_modify(|interest| interest.finalized = true); + + // recompute routes + // TODO: disable routes and recompute them in parallel to avoid holding + // tables write lock for a long time. + let mut wtables = zwrite!(self.tables.tables); + let mut root_res = wtables.root_res.clone(); + update_data_routes_from(&mut wtables, &mut root_res); + update_query_routes_from(&mut wtables, &mut root_res); } } } From df2bc5ad89c2492eea8517652a9e37e7656797d1 Mon Sep 17 00:00:00 2001 From: Dmitrii Bannov <104833606+yellowhatter@users.noreply.github.com> Date: Wed, 22 May 2024 11:48:20 +0300 Subject: [PATCH 335/598] Fixed stabby ( https://github.com/rust-lang/rust/issues/123281 ) (#1036) * Fixed stabby ( https://github.com/rust-lang/rust/issues/123281 ) * temp: use fixed stabby from branch --------- Co-authored-by: OlivierHecart --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 2ec98a4761..a25aff85d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -149,7 +149,7 @@ serde_cbor = "0.11.2" serde_json = "1.0.114" serde-pickle = "1.1.1" serde_yaml = "0.9.19" -stabby = "4.0.5" +stabby = { git="https://github.com/ZettaScaleLabs/stabby.git", branch="fix_stabby_abi_build" } sha3 = "0.10.6" shared_memory = "0.12.4" shellexpand = "3.0.0" From 127c29fe2bdea1465cb72eb8637fde3f710dfce2 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Wed, 22 May 2024 14:49:27 +0200 Subject: [PATCH 336/598] fix: add missing builders to public API --- zenoh/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 26d87e57e3..4f9d5bd71d 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -279,8 +279,8 @@ pub mod publication { pub use crate::api::publication::PublisherRef; pub use crate::api::{ builders::publication::{ - PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, - PublisherDeleteBuilder, + PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, + PublisherDeleteBuilder, PublisherPutBuilder, }, publication::{Priority, Publisher, PublisherUndeclaration}, }; From b3848704ad418cad5961de08cd0626e975dfe813 Mon Sep 17 00:00:00 2001 From: Darius Maitia Date: Thu, 23 May 2024 09:50:20 +0200 Subject: [PATCH 337/598] issue(encoding): exposing api::encoding internal values through `mod internals` to allow bindings to access them. --- zenoh/src/api/encoding.rs | 16 ++++++++++++++++ zenoh/src/lib.rs | 2 ++ 2 files changed, 18 insertions(+) diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index a46b59b6c4..7a76b98aca 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -836,6 +836,22 @@ impl EncodingMapping for serde_pickle::Value { const ENCODING: Encoding = Encoding::APPLICATION_PYTHON_SERIALIZED_OBJECT; } +pub trait EncodingInternals { + fn id(&self) -> u16; + + fn schema(&self) -> Option<&ZSlice>; +} + +impl EncodingInternals for Encoding { + fn id(&self) -> u16 { + self.0.id + } + + fn schema(&self) -> Option<&ZSlice> { + self.0.schema.as_ref() + } +} + // - Zenoh SHM #[cfg(feature = "shared-memory")] impl EncodingMapping for ZShm { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 26d87e57e3..0323938b0d 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -376,6 +376,8 @@ pub mod internal { pub use zenoh_util::{ core::ResolveFuture, zenoh_home, LibLoader, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR, }; + + pub use crate::api::encoding::EncodingInternals; } #[cfg(all(feature = "unstable", feature = "shared-memory"))] From 6c494060dd470c6645319817696bc6d363d0b038 Mon Sep 17 00:00:00 2001 From: Dmitrii Bannov <104833606+yellowhatter@users.noreply.github.com> Date: Thu, 23 May 2024 13:24:16 +0300 Subject: [PATCH 338/598] Fix stabby compilation (#1045) * Fixed stabby ( https://github.com/rust-lang/rust/issues/123281 ) * temp: use fixed stabby from branch * fixed stabby package --- Cargo.lock | 14 +++++++------- Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0b16bfde62..f792677886 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3733,9 +3733,9 @@ dependencies = [ [[package]] name = "stabby" -version = "4.0.5" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ec04c5825384722310b6a1fd83023bee0bfdc838f7aa3069f0a59e10203836b" +checksum = "c7708f5b0e8bddba162d20fa10c8d17c31a2ec6bba369f7904bb18a8bde49ba2" dependencies = [ "lazy_static", "rustversion", @@ -3744,9 +3744,9 @@ dependencies = [ [[package]] name = "stabby-abi" -version = "4.0.5" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976322da1deb6cc64a8406fd24378b840b1962acaac1978a993131c3838d81b3" +checksum = "1a6e7a8b2ff2c116bfab6afcce0adec14509eb38fd3f231bb97826d01de4021e" dependencies = [ "libc", "rustversion", @@ -3756,9 +3756,9 @@ dependencies = [ [[package]] name = "stabby-macros" -version = "4.0.5" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736712a13ab37b1fa6e073831efca751bbcb31033af4d7308bd5d9d605939183" +checksum = "db97bd3101fab9929a08fa0138d30d46c7a80b9d32bc8a3a00706ba00358a275" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -4436,7 +4436,7 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "static_assertions", ] diff --git a/Cargo.toml b/Cargo.toml index a25aff85d7..935eacb328 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -149,7 +149,7 @@ serde_cbor = "0.11.2" serde_json = "1.0.114" serde-pickle = "1.1.1" serde_yaml = "0.9.19" -stabby = { git="https://github.com/ZettaScaleLabs/stabby.git", branch="fix_stabby_abi_build" } +stabby = "5.0.1" sha3 = "0.10.6" shared_memory = "0.12.4" shellexpand = "3.0.0" From 959fb6e6e2ecdf8a28874e70a8f7e8602254e52b Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 23 May 2024 14:48:45 +0200 Subject: [PATCH 339/598] Fix Interest Declare replies behavior --- zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs | 2 +- zenoh/src/net/routing/hat/linkstate_peer/queries.rs | 2 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 2 +- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 2 +- zenoh/src/net/routing/hat/router/pubsub.rs | 2 +- zenoh/src/net/routing/hat/router/queries.rs | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 135f899656..67b04661c6 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -622,7 +622,7 @@ impl HatPubSubTrait for HatCode { aggregate: bool, ) { if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = mode.future().then_some(id); + let interest_id = (!mode.future()).then_some(id); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 3d9babbd5d..9c3d502e5f 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -695,7 +695,7 @@ impl HatQueriesTrait for HatCode { aggregate: bool, ) { if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = mode.future().then_some(id); + let interest_id = (!mode.future()).then_some(id); if let Some(res) = res.as_ref() { if aggregate { if hat!(tables).peer_qabls.iter().any(|qabl| { diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 69cb1619b7..e46ff3ff16 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -375,7 +375,7 @@ impl HatPubSubTrait for HatCode { aggregate: bool, ) { if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = mode.future().then_some(id); + let interest_id = (!mode.future()).then_some(id); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index e986cfa16e..caa5f79694 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -353,7 +353,7 @@ impl HatQueriesTrait for HatCode { aggregate: bool, ) { if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = mode.future().then_some(id); + let interest_id = (!mode.future()).then_some(id); if let Some(res) = res.as_ref() { if aggregate { if tables.faces.values().any(|src_face| { diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 3bfb0fdd6f..2af567d989 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -922,7 +922,7 @@ impl HatPubSubTrait for HatCode { aggregate: bool, ) { if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = mode.future().then_some(id); + let interest_id = (!mode.future()).then_some(id); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 72e3a781e5..9a2beeb001 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -1078,7 +1078,7 @@ impl HatQueriesTrait for HatCode { aggregate: bool, ) { if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = mode.future().then_some(id); + let interest_id = (!mode.future()).then_some(id); if let Some(res) = res.as_ref() { if aggregate { if hat!(tables).router_qabls.iter().any(|qabl| { From b06c58cd931c24a504a010e8d5cd63c7bacca90e Mon Sep 17 00:00:00 2001 From: Darius Maitia Date: Fri, 24 May 2024 12:45:50 +0200 Subject: [PATCH 340/598] issue(encoding): adding `new` function to EncodingInternals --- zenoh/src/api/encoding.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index 7a76b98aca..f63c339ba3 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -840,6 +840,8 @@ pub trait EncodingInternals { fn id(&self) -> u16; fn schema(&self) -> Option<&ZSlice>; + + fn new(id: u16, schema: Option) -> Self; } impl EncodingInternals for Encoding { @@ -850,6 +852,10 @@ impl EncodingInternals for Encoding { fn schema(&self) -> Option<&ZSlice> { self.0.schema.as_ref() } + + fn new(id: u16, schema: Option) -> Self { + Encoding(zenoh_protocol::core::Encoding { id, schema }) + } } // - Zenoh SHM From 6657ff1d5606981a2e9857d4f85948dd92f5e5c3 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 27 May 2024 23:06:55 +0200 Subject: [PATCH 341/598] fix: replace `Query::reply_err` value parameter by payload to be consistent with other methods --- zenoh/src/api/queryable.rs | 6 +++--- zenoh/tests/session.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 0653c4433d..863184d718 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -193,13 +193,13 @@ impl Query { /// Sends a error reply to this Query. /// #[inline(always)] - pub fn reply_err(&self, value: IntoValue) -> ReplyErrBuilder<'_> + pub fn reply_err(&self, payload: IntoZBytes) -> ReplyErrBuilder<'_> where - IntoValue: Into, + IntoZBytes: Into, { ReplyErrBuilder { query: self, - value: value.into(), + value: Value::new(payload, Encoding::default()), } } diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 4d0205f5be..1dfca3072e 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -166,7 +166,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re }); } "err" => { - let rep = Value::from(vec![0u8; size]); + let rep = vec![0u8; size]; tokio::task::block_in_place(|| { tokio::runtime::Handle::current() .block_on(async { ztimeout!(query.reply_err(rep)).unwrap() }) From 1e45e19af54f59236139e9114839b4a1299a3a97 Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Tue, 28 May 2024 14:33:17 +0200 Subject: [PATCH 342/598] chore: Remove `config.json5` file (#1048) This file doesn't seem to have been intentionally added. It dates from merge commit 56746c103358381b4273caf86055e4151d27437f and not from a feature/bugfix commit. --- config.json5 | 24 ------------------------ 1 file changed, 24 deletions(-) delete mode 100644 config.json5 diff --git a/config.json5 b/config.json5 deleted file mode 100644 index e782fde9bf..0000000000 --- a/config.json5 +++ /dev/null @@ -1,24 +0,0 @@ -{ - plugins: { - rest: { - http_port: 8000 - }, - storage_manager: { - volumes: { - fs: { - __path__: [ "../zenoh-backend-filesystem/target/debug/libzenoh_backend_fs.dylib" ] - } - }, - storages: { - demo: { - "key_expr":"demo/example/**", - "volume": { - "dir":"example", - "id":"fs" - }, - "strip_prefix":"demo/example" - } - } - } - } -} \ No newline at end of file From 546435e57682846a711cfc8462e6902505b69643 Mon Sep 17 00:00:00 2001 From: oteffahi <70609372+oteffahi@users.noreply.github.com> Date: Wed, 29 May 2024 12:25:01 +0200 Subject: [PATCH 343/598] Disable generation of multilink keys if not used (#1042) * Disable generation of multilink keys if not used This change required making the AuthPubKeyFsm an option, and checking it at every step of the MultilinkFsm * Fix linting * Revert "Disable generation of multilink keys if not used" This reverts commits 322f71039415503b690194676fbda24a3e3cd9e2 and b1bb94a6d510463556d5849dfabdbea126115741. * Disable multilink extension when max_links=1 * Chore: fix code format * Revert "Disable multilink extension when max_links=1" Reverting back to b1bb94a6d510463556d5849dfabdbea126115741, disabling only key-generation instead of disabling the multilink extension. --- .../unicast/establishment/ext/multilink.rs | 113 ++++++++---------- io/zenoh-transport/src/unicast/manager.rs | 2 +- 2 files changed, 51 insertions(+), 64 deletions(-) diff --git a/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs b/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs index 8980766888..51c4170755 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs @@ -33,21 +33,25 @@ const KEY_SIZE: usize = 512; // Extension Fsm pub(crate) struct MultiLink { - pubkey: RwLock, + pubkey: Option>, } impl MultiLink { - pub(crate) fn make(rng: &mut R) -> ZResult + pub(crate) fn make(rng: &mut R, is_multilink: bool) -> ZResult where R: Rng + CryptoRng, { - let pri_key = RsaPrivateKey::new(rng, KEY_SIZE)?; - let pub_key = RsaPublicKey::from(&pri_key); - let mut auth = AuthPubKey::new(pub_key.into(), pri_key.into()); - auth.disable_lookup(); - Ok(Self { - pubkey: RwLock::new(auth), - }) + if is_multilink { + let pri_key = RsaPrivateKey::new(rng, KEY_SIZE)?; + let pub_key = RsaPublicKey::from(&pri_key); + let mut auth = AuthPubKey::new(pub_key.into(), pri_key.into()); + auth.disable_lookup(); + Ok(Self { + pubkey: Some(RwLock::new(auth)), + }) + } else { + Ok(Self { pubkey: None }) + } } pub(crate) fn open(&self, is_multilink: bool) -> StateOpen { @@ -70,13 +74,16 @@ impl MultiLink { pub(crate) fn fsm<'a>(&'a self, prng: &'a Mutex) -> MultiLinkFsm<'a> { MultiLinkFsm { - fsm: AuthPubKeyFsm::new(&self.pubkey, prng), + fsm: self + .pubkey + .is_some() + .then(|| AuthPubKeyFsm::new(self.pubkey.as_ref().unwrap(), prng)), } } } pub(crate) struct MultiLinkFsm<'a> { - fsm: AuthPubKeyFsm<'a>, + fsm: Option>, } /*************************************/ @@ -102,16 +109,12 @@ impl<'a> OpenFsm for &'a MultiLinkFsm<'a> { self, input: Self::SendInitSynIn, ) -> Result { - let pubkey = match input.pubkey.as_ref() { - Some(pubkey) => pubkey, - None => return Ok(None), + let (pubkey, fsm) = match (input.pubkey.as_ref(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(None), }; - let r = self - .fsm - .send_init_syn(&pubkey.0) - .await? - .map(|x| x.transmute()); + let r = fsm.send_init_syn(&pubkey.0).await?.map(|x| x.transmute()); Ok(r) } @@ -124,9 +127,9 @@ impl<'a> OpenFsm for &'a MultiLinkFsm<'a> { const S: &str = "MultiLink extension - Recv InitAck."; let (state, mut ext) = input; - let mut pubkey = match state.pubkey.take() { - Some(pubkey) => pubkey, - None => return Ok(()), + let (mut pubkey, fsm) = match (state.pubkey.take(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(()), }; match ext.take() { @@ -137,8 +140,7 @@ impl<'a> OpenFsm for &'a MultiLinkFsm<'a> { .read(&mut reader) .map_err(|_| zerror!("{S} Decoding error."))?; - self.fsm - .recv_init_ack((&mut pubkey.0, Some(ext.transmute()))) + fsm.recv_init_ack((&mut pubkey.0, Some(ext.transmute()))) .await?; state.pubkey = Some((pubkey.0, init_ack.bob_pubkey)); @@ -156,16 +158,12 @@ impl<'a> OpenFsm for &'a MultiLinkFsm<'a> { self, input: Self::SendOpenSynIn, ) -> Result { - let pubkey = match input.pubkey.as_ref() { - Some(pubkey) => pubkey, - None => return Ok(None), + let (pubkey, fsm) = match (input.pubkey.as_ref(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(None), }; - let r = self - .fsm - .send_open_syn(&pubkey.0) - .await? - .map(|x| x.transmute()); + let r = fsm.send_open_syn(&pubkey.0).await?.map(|x| x.transmute()); Ok(r) } @@ -176,15 +174,14 @@ impl<'a> OpenFsm for &'a MultiLinkFsm<'a> { input: Self::RecvOpenAckIn, ) -> Result { let (state, mut ext) = input; - let pubkey = match state.pubkey.as_mut() { - Some(pubkey) => pubkey, - None => return Ok(()), + let (pubkey, fsm) = match (state.pubkey.as_mut(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(()), }; match ext.take() { Some(ext) => { - self.fsm - .recv_open_ack((&mut pubkey.0, Some(ext.transmute()))) + fsm.recv_open_ack((&mut pubkey.0, Some(ext.transmute()))) .await?; } None => { @@ -280,9 +277,9 @@ impl<'a> AcceptFsm for &'a MultiLinkFsm<'a> { const S: &str = "MultiLink extension - Recv InitSyn."; let (state, mut ext) = input; - let mut pubkey = match state.pubkey.take() { - Some(pubkey) => pubkey, - None => return Ok(()), + let (mut pubkey, fsm) = match (state.pubkey.take(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(()), }; match ext.take() { @@ -293,8 +290,7 @@ impl<'a> AcceptFsm for &'a MultiLinkFsm<'a> { .read(&mut reader) .map_err(|_| zerror!("{S} Decoding error."))?; - self.fsm - .recv_init_syn((&mut pubkey.0, Some(ext.transmute()))) + fsm.recv_init_syn((&mut pubkey.0, Some(ext.transmute()))) .await?; state.pubkey = Some((pubkey.0, init_syn.alice_pubkey)); @@ -313,16 +309,12 @@ impl<'a> AcceptFsm for &'a MultiLinkFsm<'a> { self, input: Self::SendInitAckIn, ) -> Result { - let pubkey = match input.pubkey.as_ref() { - Some(pubkey) => pubkey, - None => return Ok(None), + let (pubkey, fsm) = match (input.pubkey.as_ref(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(None), }; - let r = self - .fsm - .send_init_ack(&pubkey.0) - .await? - .map(|x| x.transmute()); + let r = fsm.send_init_ack(&pubkey.0).await?.map(|x| x.transmute()); Ok(r) } @@ -333,13 +325,12 @@ impl<'a> AcceptFsm for &'a MultiLinkFsm<'a> { input: Self::RecvOpenSynIn, ) -> Result { let (state, ext) = input; - let pubkey = match state.pubkey.as_mut() { - Some(pubkey) => pubkey, - None => return Ok(()), + let (pubkey, fsm) = match (state.pubkey.as_mut(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(()), }; - self.fsm - .recv_open_syn((&mut pubkey.0, ext.map(|x| x.transmute()))) + fsm.recv_open_syn((&mut pubkey.0, ext.map(|x| x.transmute()))) .await } @@ -349,16 +340,12 @@ impl<'a> AcceptFsm for &'a MultiLinkFsm<'a> { self, input: Self::SendOpenAckIn, ) -> Result { - let pubkey = match input.pubkey.as_ref() { - Some(pubkey) => pubkey, - None => return Ok(None), + let (pubkey, fsm) = match (input.pubkey.as_ref(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(None), }; - let r = self - .fsm - .send_open_ack(&pubkey.0) - .await? - .map(|x| x.transmute()); + let r = fsm.send_open_ack(&pubkey.0).await?.map(|x| x.transmute()); Ok(r) } } diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index b92462276a..f42002b0d3 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -243,7 +243,7 @@ impl TransportManagerBuilderUnicast { protocols: Arc::new(AsyncMutex::new(HashMap::new())), transports: Arc::new(AsyncMutex::new(HashMap::new())), #[cfg(feature = "transport_multilink")] - multilink: Arc::new(MultiLink::make(prng)?), + multilink: Arc::new(MultiLink::make(prng, config.max_links > 1)?), #[cfg(feature = "transport_auth")] authenticator: Arc::new(self.authenticator), #[cfg(feature = "shared-memory")] From 7fb6a12fdcf7377fede20e4885d0b648f02f584e Mon Sep 17 00:00:00 2001 From: Dmitrii Bannov <104833606+yellowhatter@users.noreply.github.com> Date: Thu, 30 May 2024 11:17:01 +0300 Subject: [PATCH 344/598] Fix async SHM API (#1063) --- .../zenoh-shm/src/api/provider/shared_memory_provider.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs index 811ff9ec57..9c0c497044 100644 --- a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs +++ b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs @@ -277,7 +277,7 @@ pub trait AllocPolicy { /// Trait for async allocation policies #[zenoh_macros::unstable_doc] #[async_trait] -pub trait AsyncAllocPolicy { +pub trait AsyncAllocPolicy: Send { async fn alloc_async( layout: &MemoryLayout, provider: &SharedMemoryProvider, @@ -420,7 +420,7 @@ where #[async_trait] impl AsyncAllocPolicy for BlockOn where - InnerPolicy: AllocPolicy, + InnerPolicy: AllocPolicy + Send, { async fn alloc_async< IDSource: ProtocolIDSource, @@ -577,7 +577,7 @@ where Policy: AsyncAllocPolicy, { type Output = ::To; - type IntoFuture = Pin::To> + 'a>>; + type IntoFuture = Pin::Output> + 'a + Send>>; fn into_future(self) -> Self::IntoFuture { Box::pin( @@ -652,7 +652,7 @@ where Policy: AsyncAllocPolicy, { type Output = ::To; - type IntoFuture = Pin::To> + 'a>>; + type IntoFuture = Pin::To> + 'a + Send>>; fn into_future(self) -> Self::IntoFuture { Box::pin( From 1cb33c058e9b4092665b5b061709b80030b31991 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Thu, 30 May 2024 13:50:16 +0200 Subject: [PATCH 345/598] refactor: keep only trait in prelude (#1007) * refactor: keep only traits in payload * fix: fix doctests * fix: fix ci tests --- Cargo.lock | 2 + .../src/pub_sub/bin/z_pub_sub.rs | 2 +- .../src/queryable_get/bin/z_queryable_get.rs | 4 +- examples/examples/z_alloc_shm.rs | 9 +- examples/examples/z_bytes_shm.rs | 9 +- examples/examples/z_delete.rs | 2 +- examples/examples/z_formats.rs | 9 +- examples/examples/z_forward.rs | 2 +- examples/examples/z_get.rs | 2 +- examples/examples/z_get_liveliness.rs | 2 +- examples/examples/z_get_shm.rs | 11 +- examples/examples/z_info.rs | 4 +- examples/examples/z_liveliness.rs | 2 +- examples/examples/z_ping.rs | 2 +- examples/examples/z_ping_shm.rs | 9 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_posix_shm_provider.rs | 5 +- examples/examples/z_pub.rs | 2 +- examples/examples/z_pub_shm.rs | 10 +- examples/examples/z_pub_shm_thr.rs | 8 +- examples/examples/z_pub_thr.rs | 6 +- examples/examples/z_pull.rs | 2 +- examples/examples/z_put.rs | 2 +- examples/examples/z_put_float.rs | 2 +- examples/examples/z_queryable.rs | 2 +- examples/examples/z_queryable_shm.rs | 10 +- examples/examples/z_scout.rs | 2 +- examples/examples/z_storage.rs | 7 +- examples/examples/z_sub.rs | 2 +- examples/examples/z_sub_liveliness.rs | 2 +- examples/examples/z_sub_shm.rs | 2 +- examples/examples/z_sub_thr.rs | 2 +- plugins/zenoh-backend-traits/src/lib.rs | 2 +- plugins/zenoh-plugin-rest/Cargo.toml | 3 +- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 6 +- plugins/zenoh-plugin-rest/src/lib.rs | 5 +- .../zenoh-plugin-storage-manager/Cargo.toml | 1 + .../zenoh-plugin-storage-manager/src/lib.rs | 6 +- .../src/replica/align_queryable.rs | 5 +- .../src/replica/aligner.rs | 11 +- .../src/replica/mod.rs | 3 + .../tests/operations.rs | 5 +- .../tests/wildcard.rs | 5 +- zenoh-ext/examples/examples/z_member.rs | 2 +- zenoh-ext/examples/examples/z_pub_cache.rs | 2 +- zenoh-ext/examples/examples/z_query_sub.rs | 2 +- zenoh-ext/src/group.rs | 6 +- zenoh-ext/src/querying_subscriber.rs | 10 +- zenoh-ext/src/session_ext.rs | 2 +- zenoh-ext/src/subscriber_ext.rs | 15 +- zenoh/src/api/builders.rs | 2 +- .../builders/{publication.rs => publisher.rs} | 12 +- zenoh/src/api/builders/sample.rs | 2 +- zenoh/src/api/encoding.rs | 6 +- zenoh/src/api/info.rs | 14 +- zenoh/src/api/key_expr.rs | 4 +- zenoh/src/api/liveliness.rs | 34 +- zenoh/src/api/mod.rs | 2 +- .../src/api/{publication.rs => publisher.rs} | 42 +-- zenoh/src/api/query.rs | 12 +- zenoh/src/api/queryable.rs | 22 +- zenoh/src/api/sample.rs | 2 +- zenoh/src/api/scouting.rs | 37 +- zenoh/src/api/session.rs | 64 ++-- zenoh/src/api/subscriber.rs | 22 +- zenoh/src/lib.rs | 48 +-- zenoh/src/prelude.rs | 47 +-- zenoh/tests/acl.rs | 9 +- zenoh/tests/attachments.rs | 6 +- zenoh/tests/bytes.rs | 12 +- zenoh/tests/connection_retry.rs | 8 +- zenoh/tests/events.rs | 4 +- zenoh/tests/formatters.rs | 10 +- zenoh/tests/handler.rs | 2 +- zenoh/tests/interceptors.rs | 8 +- zenoh/tests/liveliness.rs | 14 +- zenoh/tests/matching.rs | 28 +- zenoh/tests/qos.rs | 6 +- zenoh/tests/routing.rs | 4 +- zenoh/tests/session.rs | 7 +- zenoh/tests/shm.rs | 350 +++++++++--------- zenoh/tests/unicity.rs | 10 +- 82 files changed, 595 insertions(+), 485 deletions(-) rename zenoh/src/api/builders/{publication.rs => publisher.rs} (96%) rename zenoh/src/api/{publication.rs => publisher.rs} (95%) diff --git a/Cargo.lock b/Cargo.lock index f792677886..aff6c4950a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5579,6 +5579,7 @@ dependencies = [ "zenoh", "zenoh-plugin-trait", "zenoh-result", + "zenoh-util", ] [[package]] @@ -5604,6 +5605,7 @@ dependencies = [ "urlencoding", "zenoh", "zenoh-plugin-trait", + "zenoh-util", "zenoh_backend_traits", ] diff --git a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs index 7cd3d9c9f8..f3b1dd0efe 100644 --- a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs +++ b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs @@ -13,7 +13,7 @@ // use std::time::Duration; -use zenoh::{config::Config, prelude::*}; +use zenoh::{config::Config, key_expr::KeyExpr, prelude::*}; #[tokio::main] async fn main() { diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 36ef2d07de..06d1d79152 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -13,7 +13,9 @@ // use std::{convert::TryFrom, time::Duration}; -use zenoh::{config::Config, prelude::*}; +use zenoh::{ + config::Config, key_expr::KeyExpr, prelude::*, query::QueryTarget, selector::Selector, +}; #[tokio::main] async fn main() { diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index 297626fc73..abdbb2e443 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -11,7 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::prelude::*; +use zenoh::{ + prelude::*, + shm::{ + AllocAlignment, BlockOn, Deallocate, Defragment, GarbageCollect, + PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, POSIX_PROTOCOL_ID, + }, + Config, +}; #[tokio::main] async fn main() { diff --git a/examples/examples/z_bytes_shm.rs b/examples/examples/z_bytes_shm.rs index 60a50ba0d1..66d47193ae 100644 --- a/examples/examples/z_bytes_shm.rs +++ b/examples/examples/z_bytes_shm.rs @@ -11,7 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::prelude::*; +use zenoh::{ + bytes::ZBytes, + prelude::*, + shm::{ + zshm, zshmmut, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, ZShm, + ZShmMut, POSIX_PROTOCOL_ID, + }, +}; fn main() { // create an SHM backend... diff --git a/examples/examples/z_delete.rs b/examples/examples/z_delete.rs index 4fbb46367c..294d1b850a 100644 --- a/examples/examples/z_delete.rs +++ b/examples/examples/z_delete.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::*; +use zenoh::{key_expr::KeyExpr, Config}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_formats.rs b/examples/examples/z_formats.rs index aeadc8d55d..7c3d3988d3 100644 --- a/examples/examples/z_formats.rs +++ b/examples/examples/z_formats.rs @@ -12,8 +12,7 @@ // ZettaScale Zenoh Team, // -use zenoh::prelude as zenoh; - +use zenoh::key_expr::keyexpr; zenoh::kedefine!( pub file_format: "user_id/${user_id:*}/file/${file:*/**}", pub(crate) settings_format: "user_id/${user_id:*}/settings/${setting:**}" @@ -26,8 +25,8 @@ fn main() { let ke = zenoh::keformat!(formatter, user_id = 42, file).unwrap(); println!("{formatter:?} => {ke}"); // Parsing - let settings_ke = zenoh::keyexpr::new("user_id/30/settings/dark_mode").unwrap(); + let settings_ke = keyexpr::new("user_id/30/settings/dark_mode").unwrap(); let parsed = settings_format::parse(settings_ke).unwrap(); - assert_eq!(parsed.user_id(), zenoh::keyexpr::new("30").unwrap()); - assert_eq!(parsed.setting(), zenoh::keyexpr::new("dark_mode").ok()); + assert_eq!(parsed.user_id(), keyexpr::new("30").unwrap()); + assert_eq!(parsed.setting(), keyexpr::new("dark_mode").ok()); } diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index 22a6ef4229..deb82a2a7f 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::*; +use zenoh::{key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; use zenoh_ext::SubscriberForward; diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 2b5ba011f6..e04fc8bcf6 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -14,7 +14,7 @@ use std::time::Duration; use clap::Parser; -use zenoh::prelude::*; +use zenoh::{prelude::*, query::QueryTarget, selector::Selector, Config}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 6a616bfa2d..150308aea4 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -14,7 +14,7 @@ use std::time::Duration; use clap::Parser; -use zenoh::prelude::*; +use zenoh::{key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 05d5f6ae7a..bfb9213ab5 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -14,7 +14,16 @@ use std::time::Duration; use clap::Parser; -use zenoh::prelude::*; +use zenoh::{ + prelude::*, + query::QueryTarget, + selector::Selector, + shm::{ + zshm, BlockOn, GarbageCollect, PosixSharedMemoryProviderBackend, + SharedMemoryProviderBuilder, POSIX_PROTOCOL_ID, + }, + Config, +}; use zenoh_examples::CommonArgs; const N: usize = 10; diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index db28970897..281532e236 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::*; +use zenoh::{config::ZenohId, prelude::*}; use zenoh_examples::CommonArgs; #[tokio::main] @@ -43,7 +43,7 @@ struct Args { common: CommonArgs, } -fn parse_args() -> Config { +fn parse_args() -> zenoh::Config { let args = Args::parse(); args.common.into() } diff --git a/examples/examples/z_liveliness.rs b/examples/examples/z_liveliness.rs index cee7a29376..71b1fe4e4e 100644 --- a/examples/examples/z_liveliness.rs +++ b/examples/examples/z_liveliness.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::*; +use zenoh::{key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index ad761bddd2..56ba47b7f5 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -14,7 +14,7 @@ use std::time::{Duration, Instant}; use clap::Parser; -use zenoh::prelude::*; +use zenoh::{bytes::ZBytes, key_expr::keyexpr, prelude::*, publisher::CongestionControl, Config}; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 2e4d5f86f8..a88a9f59a6 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -14,7 +14,14 @@ use std::time::{Duration, Instant}; use clap::Parser; -use zenoh::prelude::*; +use zenoh::{ + buffers::ZSlice, + key_expr::keyexpr, + prelude::*, + publisher::CongestionControl, + shm::{PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, POSIX_PROTOCOL_ID}, + Config, +}; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index 7d7b60b6e9..ecf2aa1643 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::*; +use zenoh::{key_expr::keyexpr, prelude::*, publisher::CongestionControl, Config}; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_posix_shm_provider.rs b/examples/examples/z_posix_shm_provider.rs index cdf502bc61..d89d419846 100644 --- a/examples/examples/z_posix_shm_provider.rs +++ b/examples/examples/z_posix_shm_provider.rs @@ -11,7 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::prelude::*; +use zenoh::shm::{ + AllocAlignment, MemoryLayout, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, + POSIX_PROTOCOL_ID, +}; fn main() { // Construct an SHM backend diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 0a2e4e09c1..6812246cfa 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -14,7 +14,7 @@ use std::time::Duration; use clap::Parser; -use zenoh::prelude::*; +use zenoh::{key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index d5a6c56a67..93ce1df553 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -12,7 +12,15 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::*; +use zenoh::{ + key_expr::KeyExpr, + prelude::*, + shm::{ + BlockOn, GarbageCollect, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, + POSIX_PROTOCOL_ID, + }, + Config, +}; use zenoh_examples::CommonArgs; const N: usize = 10; diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index d3e6d50181..2d52668ac9 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -12,7 +12,13 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::*; +use zenoh::{ + buffers::ZSlice, + prelude::*, + publisher::CongestionControl, + shm::{PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, POSIX_PROTOCOL_ID}, + Config, +}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index d047d63203..8ea7226c8a 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -15,7 +15,11 @@ use std::convert::TryInto; use clap::Parser; -use zenoh::prelude::*; +use zenoh::{ + bytes::ZBytes, + prelude::*, + publisher::{CongestionControl, Priority}, +}; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 1e13cefb2f..3127e76c14 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -14,7 +14,7 @@ use std::time::Duration; use clap::Parser; -use zenoh::prelude::*; +use zenoh::{handlers::RingChannel, key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index 5d68d205f9..f56fbf2c8c 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::*; +use zenoh::{key_expr::KeyExpr, Config}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_put_float.rs b/examples/examples/z_put_float.rs index 97e4abd69d..234579b8d5 100644 --- a/examples/examples/z_put_float.rs +++ b/examples/examples/z_put_float.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::*; +use zenoh::{key_expr::KeyExpr, Config}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index dcdca82c09..eb950766ab 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::*; +use zenoh::{key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs index 80bbafb076..5cc8e301d3 100644 --- a/examples/examples/z_queryable_shm.rs +++ b/examples/examples/z_queryable_shm.rs @@ -12,7 +12,15 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::*; +use zenoh::{ + key_expr::KeyExpr, + prelude::*, + shm::{ + zshm, BlockOn, GarbageCollect, PosixSharedMemoryProviderBackend, + SharedMemoryProviderBuilder, POSIX_PROTOCOL_ID, + }, + Config, +}; use zenoh_examples::CommonArgs; const N: usize = 10; diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index bcd65ffb0e..f099beae46 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::prelude::*; +use zenoh::{config::WhatAmI, scout, Config}; #[tokio::main] async fn main() { diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 46ccfc8193..86d73da2bb 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -17,7 +17,12 @@ use std::collections::HashMap; use clap::Parser; use futures::select; -use zenoh::prelude::*; +use zenoh::{ + key_expr::{keyexpr, KeyExpr}, + prelude::*, + sample::{Sample, SampleKind}, + Config, +}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index 156968eb36..95cd5f8988 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::*; +use zenoh::{key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_sub_liveliness.rs b/examples/examples/z_sub_liveliness.rs index af2c02342d..86420381e1 100644 --- a/examples/examples/z_sub_liveliness.rs +++ b/examples/examples/z_sub_liveliness.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::*; +use zenoh::{key_expr::KeyExpr, prelude::*, sample::SampleKind, Config}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index bab31d4a2a..04ba8e9753 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::{config::Config, prelude::*}; +use zenoh::{config::Config, key_expr::KeyExpr, prelude::*, shm::zshm}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index 1006fdb434..bee15ada2b 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -14,7 +14,7 @@ use std::time::Instant; use clap::Parser; -use zenoh::prelude::*; +use zenoh::{prelude::*, Config}; use zenoh_examples::CommonArgs; struct Stats { diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index fe01231447..913ce35bbf 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -29,7 +29,7 @@ //! ``` //! use std::sync::Arc; //! use async_trait::async_trait; -//! use zenoh::prelude::*; +//! use zenoh::{key_expr::OwnedKeyExpr, prelude::*, time::Timestamp, value::Value}; //! use zenoh_backend_traits::*; //! use zenoh_backend_traits::config::*; //! diff --git a/plugins/zenoh-plugin-rest/Cargo.toml b/plugins/zenoh-plugin-rest/Cargo.toml index c84105eb5f..632b19a6f5 100644 --- a/plugins/zenoh-plugin-rest/Cargo.toml +++ b/plugins/zenoh-plugin-rest/Cargo.toml @@ -41,7 +41,7 @@ futures = { workspace = true } git-version = { workspace = true } http-types = { workspace = true } lazy_static = { workspace = true } -tracing = {workspace = true} +tracing = { workspace = true } schemars = { workspace = true } serde = { workspace = true, features = ["default"] } serde_json = { workspace = true } @@ -49,6 +49,7 @@ tide = { workspace = true } zenoh = { workspace = true, features = ["unstable"] } zenoh-plugin-trait = { workspace = true } zenoh-result = { workspace = true } +zenoh-util = { workspace = true } [build-dependencies] rustc_version = { workspace = true } diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index c1c8f69ce7..3823554bea 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -15,8 +15,8 @@ use std::time::Duration; use clap::{arg, Command}; use zenoh::{ - config::Config, core::try_init_log_from_env, key_expr::keyexpr, publication::CongestionControl, - sample::QoSBuilderTrait, session::SessionDeclarations, + config::Config, key_expr::keyexpr, publisher::CongestionControl, sample::QoSBuilderTrait, + session::SessionDeclarations, }; const HTML: &str = r#" @@ -35,7 +35,7 @@ if(typeof(EventSource) !== "undefined") { #[async_std::main] async fn main() { // initiate logging - try_init_log_from_env(); + zenoh_util::try_init_log_from_env(); let config = parse_args(); let key = keyexpr::new("demo/sse").unwrap(); diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 049ce49354..9ca97e385e 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -27,7 +27,6 @@ use serde::{Deserialize, Serialize}; use tide::{http::Mime, sse::Sender, Request, Response, Server, StatusCode}; use zenoh::{ bytes::{StringOrBase64, ZBytes}, - core::try_init_log_from_env, encoding::Encoding, key_expr::{keyexpr, KeyExpr}, plugins::{RunningPluginTrait, ZenohPlugin}, @@ -222,7 +221,7 @@ impl Plugin for RestPlugin { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - try_init_log_from_env(); + zenoh_util::try_init_log_from_env(); tracing::debug!("REST plugin {}", LONG_VERSION.as_str()); let runtime_conf = runtime.config().lock(); @@ -466,7 +465,7 @@ pub async fn run(runtime: Runtime, conf: Config) -> ZResult<()> { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - try_init_log_from_env(); + zenoh_util::try_init_log_from_env(); let zid = runtime.zid().to_string(); let session = zenoh::session::init(runtime).await.unwrap(); diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index 25393358e7..e328e16948 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -47,6 +47,7 @@ serde_json = { workspace = true } urlencoding = { workspace = true } zenoh = { workspace = true, features = ["unstable"] } zenoh-plugin-trait = { workspace = true } +zenoh-util = { workspace = true } zenoh_backend_traits = { workspace = true } [build-dependencies] diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index f2241a237c..26cd58a093 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -30,7 +30,7 @@ use flume::Sender; use memory_backend::MemoryBackend; use storages_mgt::StorageMessage; use zenoh::{ - core::{try_init_log_from_env, Result as ZResult}, + core::Result as ZResult, internal::{zlock, LibLoader}, key_expr::keyexpr, plugins::{RunningPluginTrait, ZenohPlugin}, @@ -68,7 +68,7 @@ impl Plugin for StoragesPlugin { type Instance = zenoh::plugins::RunningPlugin; fn start(name: &str, runtime: &Self::StartArgs) -> ZResult { - try_init_log_from_env(); + zenoh_util::try_init_log_from_env(); tracing::debug!("StorageManager plugin {}", Self::PLUGIN_VERSION); let config = { PluginConfig::try_from((name, runtime.config().lock().plugin(name).unwrap())) }?; @@ -101,7 +101,7 @@ impl StorageRuntimeInner { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - try_init_log_from_env(); + zenoh_util::try_init_log_from_env(); let PluginConfig { name, backend_search_dirs, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 50c93fe3dd..66233d2535 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -19,7 +19,10 @@ use std::{ }; use async_std::sync::Arc; -use zenoh::prelude::*; +use zenoh::{ + bytes::StringOrBase64, key_expr::OwnedKeyExpr, prelude::*, sample::Sample, selector::Selector, + time::Timestamp, value::Value, Session, +}; use super::{digest::*, Snapshotter}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index e0301f1a4e..eaecee5246 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -19,7 +19,16 @@ use std::{ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; -use zenoh::prelude::*; +use zenoh::{ + bytes::StringOrBase64, + key_expr::{KeyExpr, OwnedKeyExpr}, + prelude::*, + sample::{Sample, SampleBuilder}, + selector::Selector, + time::Timestamp, + value::Value, + Session, +}; use super::{Digest, EraType, LogEntry, Snapshotter, CONTENTS, ERA, INTERVALS, SUBINTERVALS}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 421d45ade6..97bf86e764 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -44,6 +44,9 @@ pub use aligner::Aligner; pub use digest::{Digest, DigestConfig, EraType, LogEntry}; pub use snapshotter::Snapshotter; pub use storage::{ReplicationService, StorageService}; +use zenoh::{ + bytes::StringOrBase64, key_expr::OwnedKeyExpr, sample::Locality, time::Timestamp, Session, +}; const ERA: &str = "era"; const INTERVALS: &str = "intervals"; diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index dd20c71936..78d9cc3b24 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -19,7 +19,10 @@ use std::{str::FromStr, thread::sleep}; use async_std::task; -use zenoh::{internal::zasync_executor_init, prelude::*}; +use zenoh::{ + bytes::StringOrBase64, internal::zasync_executor_init, prelude::*, query::Reply, + sample::Sample, time::Timestamp, Config, Session, +}; use zenoh_plugin_trait::Plugin; async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 8bafeb9bbe..969db36c4f 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -20,7 +20,10 @@ use std::{str::FromStr, thread::sleep}; // use std::collections::HashMap; use async_std::task; -use zenoh::{internal::zasync_executor_init, prelude::*}; +use zenoh::{ + bytes::StringOrBase64, internal::zasync_executor_init, prelude::*, query::Reply, + sample::Sample, time::Timestamp, Config, Session, +}; use zenoh_plugin_trait::Plugin; async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { diff --git a/zenoh-ext/examples/examples/z_member.rs b/zenoh-ext/examples/examples/z_member.rs index 783ee97a9e..5ddd6e3141 100644 --- a/zenoh-ext/examples/examples/z_member.rs +++ b/zenoh-ext/examples/examples/z_member.rs @@ -14,7 +14,7 @@ use std::{sync::Arc, time::Duration}; use futures::StreamExt; -use zenoh::prelude::*; +use zenoh::Config; use zenoh_ext::group::*; #[tokio::main] diff --git a/zenoh-ext/examples/examples/z_pub_cache.rs b/zenoh-ext/examples/examples/z_pub_cache.rs index 684cc7cb75..56de7b2fbc 100644 --- a/zenoh-ext/examples/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/examples/z_pub_cache.rs @@ -16,7 +16,7 @@ use std::time::Duration; use clap::{arg, Parser}; use zenoh::{ config::{Config, ModeDependentValue}, - prelude::*, + key_expr::KeyExpr, }; use zenoh_ext::*; use zenoh_ext_examples::CommonArgs; diff --git a/zenoh-ext/examples/examples/z_query_sub.rs b/zenoh-ext/examples/examples/z_query_sub.rs index 2fa077eba1..513ac3ca58 100644 --- a/zenoh-ext/examples/examples/z_query_sub.rs +++ b/zenoh-ext/examples/examples/z_query_sub.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::{arg, Parser}; -use zenoh::{config::Config, prelude::*}; +use zenoh::{config::Config, prelude::*, query::ReplyKeyExpr}; use zenoh_ext::*; use zenoh_ext_examples::CommonArgs; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 44600b038c..3eb807a638 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -27,8 +27,12 @@ use futures::{prelude::*, select}; use serde::{Deserialize, Serialize}; use tokio::sync::Mutex; use zenoh::{ + bytes::ZBytesReader, internal::{bail, Condition, TaskController}, + key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}, prelude::*, + publisher::{Priority, Publisher}, + Session, }; const GROUP_PREFIX: &str = "zenoh/ext/net/group"; @@ -289,7 +293,7 @@ async fn net_event_handler(z: Arc, state: Arc) { ); let qres = format!("{}/{}/{}", GROUP_PREFIX, &state.gid, kae.mid); // @TODO: we could also send this member info - let qc = ConsolidationMode::None; + let qc = zenoh::query::ConsolidationMode::None; tracing::trace!("Issuing Query for {}", &qres); let receiver = z.get(&qres).consolidation(qc).await.unwrap(); diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index e76c6f7f5c..4bcaca0565 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -605,7 +605,7 @@ where /// use zenoh::prelude::*; /// use zenoh_ext::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { @@ -740,11 +740,10 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let mut subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { - /// use zenoh::prelude::sync::SyncResolve; /// session /// .get("key/expr") /// .callback(cb) @@ -756,7 +755,6 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { /// // perform an additional fetch /// subscriber /// .fetch( |cb| { - /// use zenoh::prelude::sync::SyncResolve; /// session /// .get("key/expr") /// .callback(cb) @@ -820,11 +818,10 @@ impl Drop for RepliesHandler { /// # use zenoh::prelude::*; /// # use zenoh_ext::*; /// # -/// # let session = zenoh::open(config::peer()).await.unwrap(); +/// # let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// # let mut fetching_subscriber = session /// # .declare_subscriber("key/expr") /// # .fetching( |cb| { -/// # use zenoh::prelude::sync::SyncResolve; /// # session /// # .get("key/expr") /// # .callback(cb) @@ -835,7 +832,6 @@ impl Drop for RepliesHandler { /// # /// fetching_subscriber /// .fetch( |cb| { -/// use zenoh::prelude::sync::SyncResolve; /// session /// .get("key/expr") /// .callback(cb) diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index 3b33bc9b16..2b9cda7cb0 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -67,7 +67,7 @@ impl<'s> SessionExt<'s, 'static> for Arc { /// use zenoh::config::ModeDependentValue::Unique; /// use zenoh_ext::SessionExt; /// - /// let mut config = config::default(); + /// let mut config = zenoh::config::default(); /// config.timestamping.set_enabled(Some(Unique(true))); /// let session = zenoh::open(config).await.unwrap().into_arc(); /// let publication_cache = session.declare_publication_cache("key/expression").await.unwrap(); diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 81c969a223..3a52c04170 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -63,11 +63,10 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { - /// use zenoh::prelude::sync::SyncResolve; /// session /// .get("key/expr") /// .callback(cb) @@ -108,7 +107,7 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .querying() @@ -142,11 +141,10 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> for SubscriberBuilde /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { - /// use zenoh::prelude::sync::SyncResolve; /// session /// .get("key/expr") /// .callback(cb) @@ -199,7 +197,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> for SubscriberBuilde /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .querying() @@ -253,12 +251,11 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .liveliness() /// .declare_subscriber("key/expr") /// .fetching( |cb| { - /// use zenoh::prelude::sync::SyncResolve; /// session /// .liveliness() /// .get("key/expr") @@ -313,7 +310,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .liveliness() /// .declare_subscriber("key/expr") diff --git a/zenoh/src/api/builders.rs b/zenoh/src/api/builders.rs index 94dbda2dd0..5327dabe90 100644 --- a/zenoh/src/api/builders.rs +++ b/zenoh/src/api/builders.rs @@ -12,5 +12,5 @@ // ZettaScale Zenoh Team, // -pub(crate) mod publication; +pub(crate) mod publisher; pub(crate) mod sample; diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publisher.rs similarity index 96% rename from zenoh/src/api/builders/publication.rs rename to zenoh/src/api/builders/publisher.rs index 0b7bb01eae..950cd946b3 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publisher.rs @@ -27,7 +27,7 @@ use crate::api::{ bytes::ZBytes, encoding::Encoding, key_expr::KeyExpr, - publication::{Priority, Publisher}, + publisher::{Priority, Publisher}, sample::{Locality, SampleKind}, session::SessionRef, value::Value, @@ -53,15 +53,15 @@ pub struct PublicationBuilderPut { pub struct PublicationBuilderDelete; /// A builder for initializing [`Session::put`](crate::session::Session::put), [`Session::delete`](crate::session::Session::delete), -/// [`Publisher::put`](crate::publication::Publisher::put), and [`Publisher::delete`](crate::publication::Publisher::delete) operations. +/// [`Publisher::put`](crate::publisher::Publisher::put), and [`Publisher::delete`](crate::publisher::Publisher::delete) operations. /// /// # Examples /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::*; +/// use zenoh::{encoding::Encoding, prelude::*, publisher::CongestionControl}; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// session /// .put("key/expression", "payload") /// .encoding(Encoding::TEXT_PLAIN) @@ -238,9 +238,9 @@ impl IntoFuture for PublicationBuilder, PublicationBuil /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::*; +/// use zenoh::{prelude::*, publisher::CongestionControl}; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let publisher = session /// .declare_publisher("key/expression") /// .congestion_control(CongestionControl::Block) diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 56ae8c6c1b..e80253a074 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -21,7 +21,7 @@ use crate::api::{ bytes::ZBytes, encoding::Encoding, key_expr::KeyExpr, - publication::Priority, + publisher::Priority, sample::{QoS, QoSBuilder, Sample, SampleKind}, value::Value, }; diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index f63c339ba3..db5c28ed98 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -37,7 +37,7 @@ use super::bytes::ZBytes; /// /// Create an [`Encoding`] from a string and viceversa. /// ``` -/// use zenoh::prelude::Encoding; +/// use zenoh::encoding::Encoding; /// /// let encoding: Encoding = "text/plain".into(); /// let text: String = encoding.clone().into(); @@ -49,7 +49,7 @@ use super::bytes::ZBytes; /// Since some encoding values are internally optimized by Zenoh, it's generally more efficient to use /// the defined constants and [`Cow`][std::borrow::Cow] conversion to obtain its string representation. /// ``` -/// use zenoh::prelude::Encoding; +/// use zenoh::encoding::Encoding; /// use std::borrow::Cow; /// /// // This allocates @@ -64,7 +64,7 @@ use super::bytes::ZBytes; /// The convetions is to use the `;` separator if an encoding is created from a string. /// Alternatively, [`with_schema()`](Encoding::with_schema) can be used to add a schme to one of the associated constants. /// ``` -/// use zenoh::prelude::Encoding; +/// use zenoh::encoding::Encoding; /// /// let encoding1 = Encoding::from("text/plain;utf-8"); /// let encoding2 = Encoding::TEXT_PLAIN.with_schema("utf-8"); diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index 0c75252a78..6409760a72 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -29,7 +29,7 @@ use super::session::SessionRef; /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let zid = session.info().zid().await; /// # } /// ``` @@ -68,7 +68,7 @@ impl<'a> IntoFuture for ZenohIdBuilder<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let mut routers_zid = session.info().routers_zid().await; /// while let Some(router_zid) = routers_zid.next() {} /// # } @@ -117,7 +117,7 @@ impl<'a> IntoFuture for RoutersZenohIdBuilder<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let zid = session.info().zid().await; /// let mut peers_zid = session.info().peers_zid().await; /// while let Some(peer_zid) = peers_zid.next() {} @@ -167,7 +167,7 @@ impl<'a> IntoFuture for PeersZenohIdBuilder<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let info = session.info(); /// let zid = info.zid().await; /// # } @@ -185,7 +185,7 @@ impl SessionInfo<'_> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let zid = session.info().zid().await; /// # } /// ``` @@ -204,7 +204,7 @@ impl SessionInfo<'_> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let mut routers_zid = session.info().routers_zid().await; /// while let Some(router_zid) = routers_zid.next() {} /// # } @@ -223,7 +223,7 @@ impl SessionInfo<'_> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let mut peers_zid = session.info().peers_zid().await; /// while let Some(peer_zid) = peers_zid.next() {} /// # } diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index c5fdf12609..8215fe5278 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -205,7 +205,7 @@ impl<'a> KeyExpr<'a> { /// This is notably useful for workspaces: /// ```rust /// # use std::convert::TryFrom; - /// # use zenoh::prelude::KeyExpr; + /// # use zenoh::key_expr::KeyExpr; /// # let get_workspace = || KeyExpr::try_from("some/workspace").unwrap(); /// let workspace: KeyExpr = get_workspace(); /// let topic = workspace.join("some/topic").unwrap(); @@ -566,7 +566,7 @@ impl<'a> Undeclarable<&'a Session, KeyExprUndeclaration<'a>> for KeyExpr<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let key_expr = session.declare_keyexpr("key/expression").await.unwrap(); /// session.undeclare(key_expr).await.unwrap(); /// # } diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 640c639dec..5011b99a7e 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -63,7 +63,7 @@ lazy_static::lazy_static!( /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") @@ -90,7 +90,7 @@ impl<'a> Liveliness<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") @@ -123,9 +123,9 @@ impl<'a> Liveliness<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::*; + /// use zenoh::{prelude::*, sample::SampleKind}; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session.liveliness().declare_subscriber("key/expression").await.unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { /// match sample.kind() { @@ -163,7 +163,7 @@ impl<'a> Liveliness<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let replies = session.liveliness().get("key/expression").await.unwrap(); /// while let Ok(reply) = replies.recv_async().await { /// if let Ok(sample) = reply.result() { @@ -203,7 +203,7 @@ impl<'a> Liveliness<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") @@ -278,7 +278,7 @@ pub(crate) struct LivelinessTokenState { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") @@ -302,7 +302,7 @@ pub struct LivelinessToken<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") @@ -355,7 +355,7 @@ impl<'a> LivelinessToken<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") @@ -395,7 +395,7 @@ impl Drop for LivelinessToken<'_> { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .best_effort() @@ -422,7 +422,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) @@ -462,7 +462,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let mut n = 0; /// let subscriber = session /// .declare_subscriber("key/expression") @@ -491,7 +491,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .with(flume::bounded(32)) @@ -584,7 +584,7 @@ where /// # use std::convert::TryFrom; /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let tokens = session /// .liveliness() /// .get("key/expression") @@ -616,7 +616,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let queryable = session /// .liveliness() /// .get("key/expression") @@ -655,7 +655,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let mut n = 0; /// let queryable = session /// .liveliness() @@ -684,7 +684,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let replies = session /// .liveliness() /// .get("key/expression") diff --git a/zenoh/src/api/mod.rs b/zenoh/src/api/mod.rs index 694890ad6c..785f47817b 100644 --- a/zenoh/src/api/mod.rs +++ b/zenoh/src/api/mod.rs @@ -27,7 +27,7 @@ pub(crate) mod liveliness; pub(crate) mod loader; #[cfg(all(feature = "unstable", feature = "plugins"))] pub(crate) mod plugins; -pub(crate) mod publication; +pub(crate) mod publisher; pub(crate) mod query; pub(crate) mod queryable; pub(crate) mod sample; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publisher.rs similarity index 95% rename from zenoh/src/api/publication.rs rename to zenoh/src/api/publisher.rs index d72f18739d..2432c0ebee 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publisher.rs @@ -37,7 +37,7 @@ use { }; use super::{ - builders::publication::{ + builders::publisher::{ PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherDeleteBuilder, PublisherPutBuilder, }, @@ -104,7 +104,7 @@ impl std::fmt::Debug for PublisherRef<'_> { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// publisher.put("value").await.unwrap(); /// # } @@ -119,7 +119,7 @@ impl std::fmt::Debug for PublisherRef<'_> { /// use futures::StreamExt; /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let mut subscriber = session.declare_subscriber("key/expression").await.unwrap(); /// let publisher = session.declare_publisher("another/key/expression").await.unwrap(); /// subscriber.stream().map(Ok).forward(publisher).await.unwrap(); @@ -145,7 +145,7 @@ impl<'a> Publisher<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let publisher = session.declare_publisher("key/expression") /// .await /// .unwrap(); @@ -200,7 +200,7 @@ impl<'a> Publisher<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); /// let matching_listener = publisher.matching_listener().await.unwrap(); /// @@ -228,7 +228,7 @@ impl<'a> Publisher<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// publisher.put("value").await.unwrap(); /// # } @@ -260,7 +260,7 @@ impl<'a> Publisher<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// publisher.delete().await.unwrap(); /// # } @@ -288,7 +288,7 @@ impl<'a> Publisher<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_subscribers: bool = publisher /// .matching_status() @@ -316,7 +316,7 @@ impl<'a> Publisher<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_listener = publisher.matching_listener().await.unwrap(); /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -344,7 +344,7 @@ impl<'a> Publisher<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// publisher.undeclare().await.unwrap(); /// # } @@ -368,7 +368,7 @@ impl<'a> Publisher<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); /// let matching_listener = publisher.matching_listener().await.unwrap(); /// @@ -391,7 +391,7 @@ pub trait PublisherDeclarations { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); /// let matching_listener = publisher.matching_listener().await.unwrap(); /// @@ -418,7 +418,7 @@ impl PublisherDeclarations for std::sync::Arc> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); /// let matching_listener = publisher.matching_listener().await.unwrap(); /// @@ -456,7 +456,7 @@ impl<'a> Undeclarable<(), PublisherUndeclaration<'a>> for Publisher<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// publisher.undeclare().await.unwrap(); /// # } @@ -737,7 +737,7 @@ impl TryFrom for Priority { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_status = publisher.matching_status().await.unwrap(); /// # } @@ -758,7 +758,7 @@ impl MatchingStatus { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_subscribers: bool = publisher /// .matching_status() @@ -790,7 +790,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_listener = publisher /// .matching_listener() @@ -830,7 +830,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// use zenoh::prelude::*; /// /// let mut n = 0; - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_listener = publisher /// .matching_listener() @@ -859,7 +859,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_listener = publisher /// .matching_listener() @@ -986,7 +986,7 @@ impl<'a> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListene /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_listener = publisher.matching_listener().await.unwrap(); /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -1017,7 +1017,7 @@ impl<'a, Receiver> MatchingListener<'a, Receiver> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_listener = publisher.matching_listener().await.unwrap(); /// matching_listener.undeclare().await.unwrap(); diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 39d4ea9c26..ffe3d67e14 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -31,7 +31,7 @@ use super::{ encoding::Encoding, handlers::{locked, Callback, DefaultHandler, IntoHandler}, key_expr::KeyExpr, - publication::Priority, + publisher::Priority, sample::{Locality, QoSBuilder, Sample}, selector::Selector, session::Session, @@ -130,9 +130,9 @@ pub(crate) struct QueryState { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::*; +/// use zenoh::{prelude::*, query::{ConsolidationMode, QueryTarget}}; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let replies = session /// .get("key/expression?value>1") /// .target(QueryTarget::All) @@ -236,7 +236,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let queryable = session /// .get("key/expression") /// .callback(|reply| {println!("Received {:?}", reply.result());}) @@ -294,7 +294,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let mut n = 0; /// let queryable = session /// .get("key/expression") @@ -322,7 +322,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let replies = session /// .get("key/expression") /// .with(flume::bounded(32)) diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 863184d718..127d8cb281 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -41,7 +41,7 @@ use super::{ encoding::Encoding, handlers::{locked, DefaultHandler, IntoHandler}, key_expr::KeyExpr, - publication::Priority, + publisher::Priority, sample::{Locality, QoSBuilder, Sample, SampleKind}, selector::{Parameters, Selector}, session::{SessionRef, Undeclarable}, @@ -611,11 +611,11 @@ impl fmt::Debug for QueryableState { /// use futures::prelude::*; /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let queryable = session.declare_queryable("key/expression").await.unwrap(); /// while let Ok(query) = queryable.recv_async().await { /// println!(">> Handling query '{}'", query.selector()); -/// query.reply(KeyExpr::try_from("key/expression").unwrap(), "value") +/// query.reply("key/expression", "value") /// .await /// .unwrap(); /// } @@ -642,7 +642,7 @@ impl<'a> Undeclarable<(), QueryableUndeclaration<'a>> for CallbackQueryable<'a> /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let queryable = session.declare_queryable("key/expression").await.unwrap(); /// queryable.undeclare().await.unwrap(); /// # } @@ -690,7 +690,7 @@ impl Drop for CallbackQueryable<'_> { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let queryable = session.declare_queryable("key/expression").await.unwrap(); /// # } /// ``` @@ -713,7 +713,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let queryable = session /// .declare_queryable("key/expression") /// .callback(|query| {println!(">> Handling query '{}'", query.selector());}) @@ -753,7 +753,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let mut n = 0; /// let queryable = session /// .declare_queryable("key/expression") @@ -781,7 +781,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let queryable = session /// .declare_queryable("key/expression") /// .with(flume::bounded(32)) @@ -846,7 +846,7 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let queryable = session /// .declare_queryable("key/expression") /// .with(flume::bounded(32)) @@ -854,7 +854,7 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { /// .unwrap(); /// while let Ok(query) = queryable.recv_async().await { /// println!(">> Handling query '{}'", query.selector()); -/// query.reply(KeyExpr::try_from("key/expression").unwrap(), "value") +/// query.reply("key/expression", "value") /// .await /// .unwrap(); /// } @@ -876,7 +876,7 @@ impl<'a, Handler> Queryable<'a, Handler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let queryable = session.declare_queryable("key/expression") /// .await /// .unwrap(); diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index f70f024677..117e9c5924 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -24,7 +24,7 @@ use zenoh_protocol::{ use super::{ builders::sample::QoSBuilderTrait, bytes::ZBytes, encoding::Encoding, key_expr::KeyExpr, - publication::Priority, value::Value, + publisher::Priority, value::Value, }; pub type SourceSn = u64; diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 8963d37e30..a7764a5cab 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -36,9 +36,9 @@ use crate::{ /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::*; +/// use zenoh::{config::WhatAmI, prelude::*}; /// -/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) +/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) /// .await /// .unwrap(); /// while let Ok(hello) = receiver.recv_async().await { @@ -61,9 +61,9 @@ impl ScoutBuilder { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::*; + /// use zenoh::{config::WhatAmI, prelude::*}; /// - /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) + /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) /// .callback(|hello| { println!("{}", hello); }) /// .await /// .unwrap(); @@ -95,10 +95,10 @@ impl ScoutBuilder { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::*; + /// use zenoh::{config::WhatAmI, prelude::*}; /// /// let mut n = 0; - /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) + /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) /// .callback_mut(move |_hello| { n += 1; }) /// .await /// .unwrap(); @@ -121,9 +121,9 @@ impl ScoutBuilder { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::*; + /// use zenoh::{config::WhatAmI, prelude::*}; /// - /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) + /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) /// .with(flume::bounded(32)) /// .await /// .unwrap(); @@ -188,9 +188,9 @@ where /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::*; +/// use zenoh::{config::WhatAmI, prelude::*}; /// -/// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) +/// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) /// .callback(|hello| { println!("{}", hello); }) /// .await /// .unwrap(); @@ -208,9 +208,9 @@ impl ScoutInner { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::*; + /// use zenoh::{config::WhatAmI, prelude::*}; /// - /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) + /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) /// .callback(|hello| { println!("{}", hello); }) /// .await /// .unwrap(); @@ -243,9 +243,9 @@ impl fmt::Debug for ScoutInner { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::*; +/// use zenoh::{config::WhatAmI, prelude::*}; /// -/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) +/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) /// .with(flume::bounded(32)) /// .await /// .unwrap(); @@ -276,9 +276,9 @@ impl Scout { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::*; + /// use zenoh::{config::WhatAmI, prelude::*}; /// - /// let scout = zenoh::scout(WhatAmI::Router, config::default()) + /// let scout = zenoh::scout(WhatAmI::Router, zenoh::config::default()) /// .with(flume::bounded(32)) /// .await /// .unwrap(); @@ -352,10 +352,9 @@ fn _scout( /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::*; -/// use zenoh::scouting::WhatAmI; +/// use zenoh::{config::WhatAmI, prelude::*}; /// -/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) +/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) /// .await /// .unwrap(); /// while let Ok(hello) = receiver.recv_async().await { diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 5c107b1655..a81835cadc 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -62,7 +62,7 @@ use zenoh_task::TaskController; use super::{ admin, - builders::publication::{ + builders::publisher::{ PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, SessionDeleteBuilder, SessionPutBuilder, }, @@ -71,7 +71,7 @@ use super::{ handlers::{Callback, DefaultHandler}, info::SessionInfo, key_expr::{KeyExpr, KeyExprInner}, - publication::{Priority, PublisherState}, + publisher::{Priority, PublisherState}, query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, @@ -83,8 +83,8 @@ use super::{ #[cfg(feature = "unstable")] use super::{ liveliness::{Liveliness, LivelinessTokenState}, - publication::Publisher, - publication::{MatchingListenerState, MatchingStatus}, + publisher::Publisher, + publisher::{MatchingListenerState, MatchingStatus}, query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, sample::SourceInfo, }; @@ -458,7 +458,7 @@ impl Session { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") /// .await /// .unwrap(); @@ -491,7 +491,7 @@ impl Session { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = Session::leak(zenoh::open(config::peer()).await.unwrap()); + /// let session = zenoh::Session::leak(zenoh::open(zenoh::config::peer()).await.unwrap()); /// let subscriber = session.declare_subscriber("key/expression").await.unwrap(); /// tokio::task::spawn(async move { /// while let Ok(sample) = subscriber.recv_async().await { @@ -525,7 +525,7 @@ impl Session { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// session.close().await.unwrap(); /// # } /// ``` @@ -569,7 +569,7 @@ impl Session { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let peers = session.config().get("connect/endpoints").unwrap(); /// # } /// ``` @@ -580,7 +580,7 @@ impl Session { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let _ = session.config().insert_json5("connect/endpoints", r#"["tcp/127.0.0.1/7447"]"#); /// # } /// ``` @@ -641,7 +641,7 @@ impl Session { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let key_expr = session.declare_keyexpr("key/expression").await.unwrap(); /// # } /// ``` @@ -701,9 +701,9 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::*; + /// use zenoh::{encoding::Encoding, prelude::*}; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// session /// .put("key/expression", "payload") /// .encoding(Encoding::TEXT_PLAIN) @@ -748,7 +748,7 @@ impl Session { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// session.delete("key/expression").await.unwrap(); /// # } /// ``` @@ -786,7 +786,7 @@ impl Session { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let replies = session.get("key/expression").await.unwrap(); /// while let Ok(reply) = replies.recv_async().await { /// println!(">> Received {:?}", reply.result()); @@ -1893,7 +1893,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") /// .await /// .unwrap(); @@ -1934,14 +1934,14 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let queryable = session.declare_queryable("key/expression") /// .await /// .unwrap(); /// tokio::task::spawn(async move { /// while let Ok(query) = queryable.recv_async().await { /// query.reply( - /// KeyExpr::try_from("key/expression").unwrap(), + /// "key/expression", /// "value", /// ).await.unwrap(); /// } @@ -1965,7 +1965,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { } } - /// Create a [`Publisher`](crate::publication::Publisher) for the given key expression. + /// Create a [`Publisher`](crate::publisher::Publisher) for the given key expression. /// /// # Arguments /// @@ -1977,7 +1977,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression") /// .await /// .unwrap(); @@ -2010,7 +2010,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") @@ -2503,7 +2503,7 @@ impl fmt::Debug for Session { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") /// .await /// .unwrap(); @@ -2527,7 +2527,7 @@ pub trait SessionDeclarations<'s, 'a> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") /// .await /// .unwrap(); @@ -2559,14 +2559,14 @@ pub trait SessionDeclarations<'s, 'a> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let queryable = session.declare_queryable("key/expression") /// .await /// .unwrap(); /// tokio::task::spawn(async move { /// while let Ok(query) = queryable.recv_async().await { /// query.reply( - /// KeyExpr::try_from("key/expression").unwrap(), + /// "key/expression", /// "value", /// ).await.unwrap(); /// } @@ -2581,7 +2581,7 @@ pub trait SessionDeclarations<'s, 'a> { TryIntoKeyExpr: TryInto>, >>::Error: Into; - /// Create a [`Publisher`](crate::publication::Publisher) for the given key expression. + /// Create a [`Publisher`](crate::publisher::Publisher) for the given key expression. /// /// # Arguments /// @@ -2593,7 +2593,7 @@ pub trait SessionDeclarations<'s, 'a> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression") /// .await /// .unwrap(); @@ -2616,7 +2616,7 @@ pub trait SessionDeclarations<'s, 'a> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") @@ -2634,7 +2634,7 @@ pub trait SessionDeclarations<'s, 'a> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let info = session.info(); /// # } /// ``` @@ -2689,7 +2689,7 @@ impl crate::net::primitives::EPrimitives for Session { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// # } /// ``` /// @@ -2697,9 +2697,9 @@ impl crate::net::primitives::EPrimitives for Session { /// # #[tokio::main] /// # async fn main() { /// use std::str::FromStr; -/// use zenoh::prelude::*; +/// use zenoh::{config::ZenohId, prelude::*}; /// -/// let mut config = config::peer(); +/// let mut config = zenoh::config::peer(); /// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); /// config.connect.endpoints.extend("tcp/10.10.10.10:7447,tcp/11.11.11.11:7447".split(',').map(|s|s.parse().unwrap())); /// @@ -2726,7 +2726,7 @@ where /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index a0cfd51811..5235ad4917 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -66,7 +66,7 @@ impl fmt::Debug for SubscriberState { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()) }) @@ -91,9 +91,9 @@ impl<'a> SubscriberInner<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::*; + /// use zenoh::{prelude::*, sample::Sample}; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// # fn data_handler(_sample: Sample) { }; /// let subscriber = session /// .declare_subscriber("key/expression") @@ -123,7 +123,7 @@ impl<'a> Undeclarable<(), SubscriberUndeclaration<'a>> for SubscriberInner<'a> { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .await @@ -174,7 +174,7 @@ impl Drop for SubscriberInner<'_> { /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .best_effort() @@ -220,7 +220,7 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) @@ -262,7 +262,7 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let mut n = 0; /// let subscriber = session /// .declare_subscriber("key/expression") @@ -290,7 +290,7 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .with(flume::bounded(32)) @@ -422,7 +422,7 @@ where /// # async fn main() { /// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .with(flume::bounded(32)) @@ -449,7 +449,7 @@ impl<'a, Handler> Subscriber<'a, Handler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session.declare_subscriber("key/expression") /// .await /// .unwrap(); @@ -494,7 +494,7 @@ impl<'a, Handler> Subscriber<'a, Handler> { /// # async fn main() { /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session.declare_subscriber("key/expression") /// .await /// .unwrap(); diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 2cc8612ef9..c033c7feee 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -38,7 +38,7 @@ //! //! #[tokio::main] //! async fn main() { -//! let session = zenoh::open(config::default()).await.unwrap(); +//! let session = zenoh::open(zenoh::config::default()).await.unwrap(); //! session.put("key/expression", "value").await.unwrap(); //! session.close().await.unwrap(); //! } @@ -52,7 +52,7 @@ //! //! #[tokio::main] //! async fn main() { -//! let session = zenoh::open(config::default()).await.unwrap(); +//! let session = zenoh::open(zenoh::config::default()).await.unwrap(); //! let subscriber = session.declare_subscriber("key/expression").await.unwrap(); //! while let Ok(sample) = subscriber.recv_async().await { //! println!("Received: {:?}", sample); @@ -69,7 +69,7 @@ //! //! #[tokio::main] //! async fn main() { -//! let session = zenoh::open(config::default()).await.unwrap(); +//! let session = zenoh::open(zenoh::config::default()).await.unwrap(); //! let replies = session.get("key/expression").await.unwrap(); //! while let Ok(reply) = replies.recv_async().await { //! println!(">> Received {:?}", reply.result()); @@ -111,23 +111,27 @@ pub const FEATURES: &str = zenoh_util::concat_enabled_features!( ] ); -// Expose some functions directly to root `zenoh::`` namespace for convenience -pub use crate::api::{scouting::scout, session::open}; +#[doc(inline)] +pub use crate::{ + config::Config, + core::{Error, Result}, + key_expr::{kedefine, keformat, kewrite}, + scouting::scout, + session::{open, Session}, +}; pub mod prelude; /// Zenoh core types pub mod core { #[allow(deprecated)] - pub use zenoh_core::AsyncResolve; - #[allow(deprecated)] - pub use zenoh_core::SyncResolve; + pub use zenoh_core::{AsyncResolve, SyncResolve}; pub use zenoh_core::{Resolvable, Resolve, Wait}; + pub use zenoh_result::ErrNo; /// A zenoh error. pub use zenoh_result::Error; /// A zenoh result. pub use zenoh_result::ZResult as Result; - pub use zenoh_util::{core::zresult::ErrNo, try_init_log_from_env}; } /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate @@ -200,7 +204,7 @@ pub mod session { #[doc(hidden)] pub use crate::api::session::InitBuilder; pub use crate::api::{ - builders::publication::{SessionDeleteBuilder, SessionPutBuilder}, + builders::publisher::{SessionDeleteBuilder, SessionPutBuilder}, session::{open, OpenBuilder, Session, SessionDeclarations, SessionRef, Undeclarable}, }; } @@ -262,27 +266,27 @@ pub mod subscriber { } /// Publishing primitives -pub mod publication { +pub mod publisher { pub use zenoh_protocol::core::CongestionControl; #[zenoh_macros::unstable] - pub use crate::api::publication::MatchingListener; + pub use crate::api::publisher::MatchingListener; #[zenoh_macros::unstable] - pub use crate::api::publication::MatchingListenerBuilder; + pub use crate::api::publisher::MatchingListenerBuilder; #[zenoh_macros::unstable] - pub use crate::api::publication::MatchingListenerUndeclaration; + pub use crate::api::publisher::MatchingListenerUndeclaration; #[zenoh_macros::unstable] - pub use crate::api::publication::MatchingStatus; + pub use crate::api::publisher::MatchingStatus; #[zenoh_macros::unstable] - pub use crate::api::publication::PublisherDeclarations; + pub use crate::api::publisher::PublisherDeclarations; #[zenoh_macros::unstable] - pub use crate::api::publication::PublisherRef; + pub use crate::api::publisher::PublisherRef; pub use crate::api::{ - builders::publication::{ + builders::publisher::{ PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, PublisherDeleteBuilder, PublisherPutBuilder, }, - publication::{Priority, Publisher, PublisherUndeclaration}, + publisher::{Priority, Publisher, PublisherUndeclaration}, }; } @@ -369,13 +373,11 @@ pub mod plugins { #[doc(hidden)] pub mod internal { - pub use zenoh_core::{zasync_executor_init, zerror, zlock, ztimeout}; + pub use zenoh_core::{zasync_executor_init, zerror, zlock, ztimeout, ResolveFuture}; pub use zenoh_result::bail; pub use zenoh_sync::Condition; pub use zenoh_task::{TaskController, TerminatableTask}; - pub use zenoh_util::{ - core::ResolveFuture, zenoh_home, LibLoader, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR, - }; + pub use zenoh_util::{zenoh_home, LibLoader, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; pub use crate::api::encoding::EncodingInternals; } diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 2ed94e6f47..605b0638ab 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -24,43 +24,22 @@ //!use zenoh::prelude::*; //! ``` -// Reexport API in flat namespace -pub(crate) mod flat { - #[cfg(all(feature = "unstable", feature = "shared-memory"))] - pub use crate::shm::*; +mod _prelude { + #[zenoh_macros::unstable] + pub use crate::api::publisher::PublisherDeclarations; pub use crate::{ - buffers::*, - bytes::*, - config::*, + api::{ + builders::sample::{ + QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, + }, + session::{SessionDeclarations, Undeclarable}, + }, + config::ValidatedMap, core::{Error as ZError, Resolvable, Resolve, Result as ZResult}, - encoding::*, - handlers::*, - key_expr::*, - publication::*, - query::*, - queryable::*, - sample::*, - scouting::*, - selector::*, - session::*, - subscriber::*, - time::*, - value::*, }; } -// Reexport API in hierarchical namespace -pub(crate) mod mods { - #[cfg(all(feature = "unstable", feature = "shared-memory"))] - pub use crate::shm; - pub use crate::{ - buffers, bytes, config, core, encoding, handlers, key_expr, publication, query, queryable, - sample, scouting, selector, session, subscriber, time, value, - }; -} - -pub use flat::*; -pub use mods::*; +pub use _prelude::*; #[allow(deprecated)] pub use crate::core::AsyncResolve; @@ -71,14 +50,14 @@ pub use crate::core::Wait; /// Prelude to import when using Zenoh's sync API. #[deprecated = "use `zenoh::prelude` instead"] pub mod sync { - pub use super::{flat::*, mods::*}; + pub use super::_prelude::*; #[allow(deprecated)] pub use crate::core::SyncResolve; } /// Prelude to import when using Zenoh's async API. #[deprecated = "use `zenoh::prelude` instead"] pub mod r#async { - pub use super::{flat::*, mods::*}; + pub use super::_prelude::*; #[allow(deprecated)] pub use crate::core::AsyncResolve; } diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 1889a9f9fa..b78a9ac888 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -19,8 +19,13 @@ mod test { }; use tokio::runtime::Handle; - use zenoh::prelude::*; - use zenoh_core::{zlock, ztimeout}; + use zenoh::{ + config, + config::{EndPoint, WhatAmI}, + internal::{zlock, ztimeout}, + prelude::*, + Config, Session, + }; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index a63137ccfc..b52fd067ba 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -11,10 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "unstable")] +#![cfg(feature = "unstable")] +use zenoh::{bytes::ZBytes, config::Config, prelude::*}; + #[test] fn attachment_pubsub() { - use zenoh::{bytes::ZBytes, prelude::*}; let zenoh = zenoh::open(Config::default()).wait().unwrap(); let _sub = zenoh .declare_subscriber("test/attachment") @@ -53,7 +54,6 @@ fn attachment_pubsub() { } } -#[cfg(feature = "unstable")] #[test] fn attachment_queries() { use zenoh::prelude::*; diff --git a/zenoh/tests/bytes.rs b/zenoh/tests/bytes.rs index 039c1b1986..e97475c237 100644 --- a/zenoh/tests/bytes.rs +++ b/zenoh/tests/bytes.rs @@ -11,12 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // +#![cfg(all(feature = "shared-memory", feature = "unstable"))] +use zenoh::{ + bytes::ZBytes, + prelude::*, + shm::{ + zshm, zshmmut, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, ZShm, + ZShmMut, POSIX_PROTOCOL_ID, + }, +}; #[test] -#[cfg(all(feature = "shared-memory", feature = "unstable"))] fn shm_bytes_single_buf() { - use zenoh::prelude::*; - // create an SHM backend... let backend = PosixSharedMemoryProviderBackend::builder() .with_size(4096) diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index 67a1c9c093..9bee87f199 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -11,7 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::prelude::*; +use zenoh::{ + config::{ConnectionRetryConf, EndPoint}, + prelude::*, + Config, +}; #[test] fn retry_config_overriding() { @@ -132,7 +136,7 @@ fn retry_config_const_period() { } #[test] -fn retry_config_infinit_period() { +fn retry_config_infinite_period() { let mut config = Config::default(); config .insert_json5( diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 41a681dc8f..17819390aa 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -13,12 +13,12 @@ // use std::time::Duration; -use zenoh::{internal::ztimeout, prelude::*}; +use zenoh::{config, internal::ztimeout, prelude::*, query::Reply, sample::SampleKind, Session}; const TIMEOUT: Duration = Duration::from_secs(10); async fn open_session(listen: &[&str], connect: &[&str]) -> Session { - let mut config = peer(); + let mut config = config::peer(); config.listen.endpoints = listen .iter() .map(|e| e.parse().unwrap()) diff --git a/zenoh/tests/formatters.rs b/zenoh/tests/formatters.rs index 4a5a1e3808..e1c366ac52 100644 --- a/zenoh/tests/formatters.rs +++ b/zenoh/tests/formatters.rs @@ -11,13 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // +use zenoh::{kedefine, keformat}; + #[test] fn kedefine_reuse() { - zenoh::key_expr::kedefine!( + kedefine!( pub gkeys: "zenoh/${group:*}/${member:*}", ); let mut formatter = gkeys::formatter(); - let k1 = zenoh::key_expr::keformat!(formatter, group = "foo", member = "bar").unwrap(); + let k1 = keformat!(formatter, group = "foo", member = "bar").unwrap(); assert_eq!(dbg!(k1).as_str(), "zenoh/foo/bar"); formatter.set("member", "*").unwrap(); @@ -29,8 +31,8 @@ fn kedefine_reuse() { let k2 = dbg!(&mut formatter).build().unwrap(); assert_eq!(dbg!(k2).as_str(), "zenoh/foo/*"); - let k3 = zenoh::key_expr::keformat!(formatter, group = "foo", member = "*").unwrap(); + let k3 = keformat!(formatter, group = "foo", member = "*").unwrap(); assert_eq!(dbg!(k3).as_str(), "zenoh/foo/*"); - zenoh::key_expr::keformat!(formatter, group = "**", member = "**").unwrap_err(); + keformat!(formatter, group = "**", member = "**").unwrap_err(); } diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index 55f9368a87..5ecdc363d5 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -13,7 +13,7 @@ // use std::{thread, time::Duration}; -use zenoh::prelude::*; +use zenoh::{handlers::RingChannel, prelude::*, Config}; #[test] fn pubsub_with_ringbuffer() { diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index f6e6ee561d..32001f1875 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -19,8 +19,12 @@ use std::{ }, }; -use zenoh::prelude::*; -use zenoh_config::{DownsamplingItemConf, DownsamplingRuleConf, InterceptorFlow}; +use zenoh::{ + config::{DownsamplingItemConf, DownsamplingRuleConf, InterceptorFlow}, + key_expr::KeyExpr, + prelude::*, + Config, +}; // Tokio's time granularity on different platforms #[cfg(target_os = "windows")] diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index b974b5d705..6c666ca26d 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -11,12 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "unstable")] +#![cfg(feature = "unstable")] +use std::time::Duration; + +use zenoh::{ + config, + internal::ztimeout, + prelude::*, + sample::{Sample, SampleKind}, +}; + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_liveliness() { - use std::time::Duration; - - use zenoh::{internal::ztimeout, prelude::*}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index 1473d7f6fc..339bc196b1 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -11,21 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "unstable")] -use { - flume::RecvTimeoutError, std::str::FromStr, std::time::Duration, zenoh::internal::ztimeout, - zenoh::prelude::*, -}; +#![cfg(feature = "unstable")] +use std::{str::FromStr, time::Duration}; + +use flume::RecvTimeoutError; +use zenoh::{config, config::Locator, internal::ztimeout, prelude::*, sample::Locality, Session}; -#[cfg(feature = "unstable")] const TIMEOUT: Duration = Duration::from_secs(60); -#[cfg(feature = "unstable")] const RECV_TIMEOUT: Duration = Duration::from_secs(1); -#[cfg(feature = "unstable")] async fn create_session_pair(locator: &str) -> (Session, Session) { let config1 = { - let mut config = zenoh::config::peer(); + let mut config = config::peer(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config .listen @@ -33,14 +30,13 @@ async fn create_session_pair(locator: &str) -> (Session, Session) { .unwrap(); config }; - let config2 = zenoh::config::client([Locator::from_str(locator).unwrap()]); + let config2 = config::client([Locator::from_str(locator).unwrap()]); let session1 = ztimeout!(zenoh::open(config1)).unwrap(); let session2 = ztimeout!(zenoh::open(config2)).unwrap(); (session1, session2) } -#[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_any() -> ZResult<()> { let (session1, session2) = create_session_pair("tcp/127.0.0.1:18001").await; @@ -92,12 +88,11 @@ async fn zenoh_matching_status_any() -> ZResult<()> { Ok(()) } -#[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_remote() -> ZResult<()> { - let session1 = ztimeout!(zenoh::open(peer())).unwrap(); + let session1 = ztimeout!(zenoh::open(config::peer())).unwrap(); - let session2 = ztimeout!(zenoh::open(peer())).unwrap(); + let session2 = ztimeout!(zenoh::open(config::peer())).unwrap(); let publisher1 = ztimeout!(session1 .declare_publisher("zenoh_matching_status_remote_test") @@ -147,12 +142,11 @@ async fn zenoh_matching_status_remote() -> ZResult<()> { Ok(()) } -#[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_local() -> ZResult<()> { - let session1 = ztimeout!(zenoh::open(config::peer())).unwrap(); + let session1 = ztimeout!(zenoh::open(zenoh::config::peer())).unwrap(); - let session2 = ztimeout!(zenoh::open(config::peer())).unwrap(); + let session2 = ztimeout!(zenoh::open(zenoh::config::peer())).unwrap(); let publisher1 = ztimeout!(session1 .declare_publisher("zenoh_matching_status_local_test") diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 3a75cc9f37..ab2dd1d000 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -13,7 +13,11 @@ // use std::time::Duration; -use zenoh::{internal::ztimeout, prelude::*}; +use zenoh::{ + internal::ztimeout, + prelude::*, + publisher::{CongestionControl, Priority}, +}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index fac785d7c0..123ff24201 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -22,9 +22,11 @@ use std::{ use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::{ - core::Result, + config::{ModeDependentValue, WhatAmI, WhatAmIMatcher}, internal::{bail, ztimeout}, prelude::*, + publisher::CongestionControl, + Config, Result, Session, }; const TIMEOUT: Duration = Duration::from_secs(10); diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 1dfca3072e..5201be24a2 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -21,7 +21,10 @@ use std::{ #[cfg(feature = "unstable")] use zenoh::runtime::{Runtime, RuntimeBuilder}; -use zenoh::{internal::ztimeout, prelude::*}; +use zenoh::{ + config, internal::ztimeout, key_expr::KeyExpr, prelude::*, publisher::CongestionControl, + sample::SampleKind, subscriber::Reliability, Session, +}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); @@ -295,7 +298,7 @@ async fn zenoh_2sessions_1runtime_init() { println!("[RI][02c] Creating peer01a session from runtime 1"); let peer01a = zenoh::session::init(r1.clone()).await.unwrap(); println!("[RI][03c] Closing peer01a session"); - std::mem::drop(peer01a); + drop(peer01a); test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; close_session(peer01, peer02).await; println!("[ ][01e] Closing r1 runtime"); diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 3a0447fc92..a969af4dbe 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -11,188 +11,194 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(all(feature = "unstable", feature = "shared-memory"))] -mod tests { - use std::{ - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, - time::Duration, - }; - - use zenoh::{internal::ztimeout, prelude::*}; - - const TIMEOUT: Duration = Duration::from_secs(60); - const SLEEP: Duration = Duration::from_secs(1); - - const MSG_COUNT: usize = 1_00; - const MSG_SIZE: [usize; 2] = [1_024, 100_000]; - - async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { - // Open the sessions - let mut config = config::peer(); - config.listen.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - config.transport.shared_memory.set_enabled(true).unwrap(); - println!("[ ][01a] Opening peer01 session: {:?}", endpoints); - let peer01 = ztimeout!(zenoh::open(config)).unwrap(); - - let mut config = config::peer(); - config.connect.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - config.transport.shared_memory.set_enabled(true).unwrap(); - println!("[ ][02a] Opening peer02 session: {:?}", endpoints); - let peer02 = ztimeout!(zenoh::open(config)).unwrap(); - - (peer01, peer02) - } +#![cfg(all(feature = "unstable", feature = "shared-memory"))] +use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + +use zenoh::{ + config, + internal::ztimeout, + prelude::*, + publisher::CongestionControl, + shm::{ + BlockOn, GarbageCollect, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, + POSIX_PROTOCOL_ID, + }, + subscriber::Reliability, + Session, +}; + +const TIMEOUT: Duration = Duration::from_secs(60); +const SLEEP: Duration = Duration::from_secs(1); + +const MSG_COUNT: usize = 1_00; +const MSG_SIZE: [usize; 2] = [1_024, 100_000]; + +async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { + // Open the sessions + let mut config = config::peer(); + config.listen.endpoints = endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config.transport.shared_memory.set_enabled(true).unwrap(); + println!("[ ][01a] Opening peer01 session: {:?}", endpoints); + let peer01 = ztimeout!(zenoh::open(config)).unwrap(); + + let mut config = config::peer(); + config.connect.endpoints = endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config.transport.shared_memory.set_enabled(true).unwrap(); + println!("[ ][02a] Opening peer02 session: {:?}", endpoints); + let peer02 = ztimeout!(zenoh::open(config)).unwrap(); + + (peer01, peer02) +} - async fn open_session_multicast(endpoint01: &str, endpoint02: &str) -> (Session, Session) { - // Open the sessions - let mut config = config::peer(); - config.listen.endpoints = vec![endpoint01.parse().unwrap()]; - config.scouting.multicast.set_enabled(Some(true)).unwrap(); - config.transport.shared_memory.set_enabled(true).unwrap(); - println!("[ ][01a] Opening peer01 session: {}", endpoint01); - let peer01 = ztimeout!(zenoh::open(config)).unwrap(); - - let mut config = config::peer(); - config.listen.endpoints = vec![endpoint02.parse().unwrap()]; - config.scouting.multicast.set_enabled(Some(true)).unwrap(); - config.transport.shared_memory.set_enabled(true).unwrap(); - println!("[ ][02a] Opening peer02 session: {}", endpoint02); - let peer02 = ztimeout!(zenoh::open(config)).unwrap(); - - (peer01, peer02) - } +async fn open_session_multicast(endpoint01: &str, endpoint02: &str) -> (Session, Session) { + // Open the sessions + let mut config = config::peer(); + config.listen.endpoints = vec![endpoint01.parse().unwrap()]; + config.scouting.multicast.set_enabled(Some(true)).unwrap(); + config.transport.shared_memory.set_enabled(true).unwrap(); + println!("[ ][01a] Opening peer01 session: {}", endpoint01); + let peer01 = ztimeout!(zenoh::open(config)).unwrap(); + + let mut config = config::peer(); + config.listen.endpoints = vec![endpoint02.parse().unwrap()]; + config.scouting.multicast.set_enabled(Some(true)).unwrap(); + config.transport.shared_memory.set_enabled(true).unwrap(); + println!("[ ][02a] Opening peer02 session: {}", endpoint02); + let peer02 = ztimeout!(zenoh::open(config)).unwrap(); + + (peer01, peer02) +} - async fn close_session(peer01: Session, peer02: Session) { - println!("[ ][01d] Closing peer02 session"); - ztimeout!(peer01.close()).unwrap(); - println!("[ ][02d] Closing peer02 session"); - ztimeout!(peer02.close()).unwrap(); - } +async fn close_session(peer01: Session, peer02: Session) { + println!("[ ][01d] Closing peer02 session"); + ztimeout!(peer01.close()).unwrap(); + println!("[ ][02d] Closing peer02 session"); + ztimeout!(peer02.close()).unwrap(); +} - async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Reliability) { - let msg_count = match reliability { - Reliability::Reliable => MSG_COUNT, - Reliability::BestEffort => 1, - }; - let msgs = Arc::new(AtomicUsize::new(0)); - - for size in MSG_SIZE { - let key_expr = format!("shm{size}"); - - msgs.store(0, Ordering::SeqCst); - - // Subscribe to data - println!("[PS][01b] Subscribing on peer01 session"); - let c_msgs = msgs.clone(); - let _sub = ztimeout!(peer01 - .declare_subscriber(&key_expr) - .callback(move |sample| { - assert_eq!(sample.payload().len(), size); - c_msgs.fetch_add(1, Ordering::Relaxed); - })) +async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Reliability) { + let msg_count = match reliability { + Reliability::Reliable => MSG_COUNT, + Reliability::BestEffort => 1, + }; + let msgs = Arc::new(AtomicUsize::new(0)); + + for size in MSG_SIZE { + let key_expr = format!("shm{size}"); + + msgs.store(0, Ordering::SeqCst); + + // Subscribe to data + println!("[PS][01b] Subscribing on peer01 session"); + let c_msgs = msgs.clone(); + let _sub = ztimeout!(peer01 + .declare_subscriber(&key_expr) + .callback(move |sample| { + assert_eq!(sample.payload().len(), size); + c_msgs.fetch_add(1, Ordering::Relaxed); + })) + .unwrap(); + + // Wait for the declaration to propagate + tokio::time::sleep(SLEEP).await; + + // create SHM backend... + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(size * MSG_COUNT / 10) + .unwrap() + .res() .unwrap(); + // ...and SHM provider + let shm01 = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // remember segment size that was allocated + let shm_segment_size = shm01.available(); + + // Prepare a layout for allocations + let layout = shm01.alloc(size).into_layout().unwrap(); + + // Put data + println!("[PS][03b] Putting on peer02 session. {MSG_COUNT} msgs of {size} bytes."); + for c in 0..msg_count { + // Allocate new message + let sbuf = ztimeout!(layout.alloc().with_policy::>()).unwrap(); + println!("{c} created"); + + // Publish this message + ztimeout!(peer02 + .put(&key_expr, sbuf) + .congestion_control(CongestionControl::Block)) + .unwrap(); + println!("{c} putted"); + } - // Wait for the declaration to propagate - tokio::time::sleep(SLEEP).await; - - // create SHM backend... - let backend = PosixSharedMemoryProviderBackend::builder() - .with_size(size * MSG_COUNT / 10) - .unwrap() - .res() - .unwrap(); - // ...and SHM provider - let shm01 = SharedMemoryProviderBuilder::builder() - .protocol_id::() - .backend(backend) - .res(); - - // remember segment size that was allocated - let shm_segment_size = shm01.available(); - - // Prepare a layout for allocations - let layout = shm01.alloc(size).into_layout().unwrap(); - - // Put data - println!("[PS][03b] Putting on peer02 session. {MSG_COUNT} msgs of {size} bytes."); - for c in 0..msg_count { - // Allocate new message - let sbuf = - ztimeout!(layout.alloc().with_policy::>()).unwrap(); - println!("{c} created"); - - // Publish this message - ztimeout!(peer02 - .put(&key_expr, sbuf) - .congestion_control(CongestionControl::Block)) - .unwrap(); - println!("{c} putted"); + // wat for all messages received + ztimeout!(async { + loop { + let cnt = msgs.load(Ordering::Relaxed); + println!("[PS][03b] Received {cnt}/{msg_count}."); + if cnt != msg_count { + tokio::time::sleep(SLEEP).await; + } else { + break; + } } + }); - // wat for all messages received - ztimeout!(async { - loop { - let cnt = msgs.load(Ordering::Relaxed); - println!("[PS][03b] Received {cnt}/{msg_count}."); - if cnt != msg_count { - tokio::time::sleep(SLEEP).await; - } else { - break; - } - } - }); - - // wat for all memory reclaimed - ztimeout!(async { - loop { - shm01.garbage_collect(); - let available = shm01.available(); - println!("[PS][03b] SHM available {available}/{shm_segment_size}"); - if available != shm_segment_size { - tokio::time::sleep(SLEEP).await; - } else { - break; - } + // wat for all memory reclaimed + ztimeout!(async { + loop { + shm01.garbage_collect(); + let available = shm01.available(); + println!("[PS][03b] SHM available {available}/{shm_segment_size}"); + if available != shm_segment_size { + tokio::time::sleep(SLEEP).await; + } else { + break; } - }); - } + } + }); } +} - #[cfg(feature = "shared-memory")] - #[test] - fn zenoh_shm_unicast() { - tokio::runtime::Runtime::new().unwrap().block_on(async { - // Initiate logging - zenoh_util::try_init_log_from_env(); +#[test] +fn zenoh_shm_unicast() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Initiate logging + zenoh_util::try_init_log_from_env(); - let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:19447"]).await; - test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; - close_session(peer01, peer02).await; - }); - } + let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:19447"]).await; + test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; + close_session(peer01, peer02).await; + }); +} - #[cfg(feature = "shared-memory")] - #[test] - fn zenoh_shm_multicast() { - tokio::runtime::Runtime::new().unwrap().block_on(async { - // Initiate logging - zenoh_util::try_init_log_from_env(); - - let (peer01, peer02) = - open_session_multicast("udp/224.0.0.1:19448", "udp/224.0.0.1:19448").await; - test_session_pubsub(&peer01, &peer02, Reliability::BestEffort).await; - close_session(peer01, peer02).await; - }); - } +#[test] +fn zenoh_shm_multicast() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Initiate logging + zenoh_util::try_init_log_from_env(); + + let (peer01, peer02) = + open_session_multicast("udp/224.0.0.1:19448", "udp/224.0.0.1:19448").await; + test_session_pubsub(&peer01, &peer02, Reliability::BestEffort).await; + close_session(peer01, peer02).await; + }); } diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index b62a842b28..70a70c5dce 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -20,7 +20,15 @@ use std::{ }; use tokio::runtime::Handle; -use zenoh::{internal::ztimeout, prelude::*}; +use zenoh::{ + config, + config::{EndPoint, WhatAmI}, + internal::ztimeout, + key_expr::KeyExpr, + prelude::*, + publisher::CongestionControl, + Session, +}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); From 6772f2fe3eec1fdf289e7e96563b9b005e15489c Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 31 May 2024 09:27:07 +0200 Subject: [PATCH 346/598] Enhance subscribers, queryables and liveliness tokens propagation to improve scalability for peers subsystems (#1044) * Implement interest protocol between peers and routers * Add logger init in matching test * Peers send subscribers interests to newly connected routers * Remove commented code * Remove empty functions * Fix code fmt --------- Co-authored-by: Luca Cominardi --- .../src/net/routing/hat/linkstate_peer/mod.rs | 13 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 4 - .../net/routing/hat/linkstate_peer/queries.rs | 4 - zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 266 ++++++++++-------- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 10 + zenoh/src/net/routing/hat/router/mod.rs | 17 +- zenoh/src/net/routing/hat/router/pubsub.rs | 154 ++++------ zenoh/src/net/routing/hat/router/queries.rs | 81 +++--- zenoh/tests/matching.rs | 7 +- 9 files changed, 263 insertions(+), 293 deletions(-) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 2b0c8e4ca7..82c2a96166 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -41,8 +41,8 @@ use zenoh_transport::unicast::TransportUnicast; use self::{ network::Network, - pubsub::{pubsub_new_face, pubsub_remove_node, undeclare_client_subscription}, - queries::{queries_new_face, queries_remove_node, undeclare_client_queryable}, + pubsub::{pubsub_remove_node, undeclare_client_subscription}, + queries::{queries_remove_node, undeclare_client_queryable}, }; use super::{ super::dispatcher::{ @@ -212,12 +212,11 @@ impl HatBaseTrait for HatCode { fn new_local_face( &self, - tables: &mut Tables, + _tables: &mut Tables, _tables_ref: &Arc, - face: &mut Face, + _face: &mut Face, ) -> ZResult<()> { - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); + // Nothing to do Ok(()) } @@ -239,8 +238,6 @@ impl HatBaseTrait for HatCode { }; face_hat_mut!(&mut face.state).link_id = link_id; - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); if face.state.whatami != WhatAmI::Client { hat_mut!(tables).schedule_compute_trees(tables_ref.clone()); diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 67b04661c6..0bd9f62f98 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -554,10 +554,6 @@ fn forget_client_subscription( } } -pub(super) fn pubsub_new_face(_tables: &mut Tables, _face: &mut Arc) { - // Nothing to do -} - pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId) { for mut res in hat!(tables) .peer_subs diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 9c3d502e5f..b75233409d 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -584,10 +584,6 @@ fn forget_client_queryable( } } -pub(super) fn queries_new_face(_tables: &mut Tables, _face: &mut Arc) { - // Nothing to do -} - pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId) { let mut qabls = vec![]; for res in hat!(tables).peer_qabls.iter() { diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index e46ff3ff16..ef092d286a 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -24,7 +24,8 @@ use zenoh_protocol::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareSubscriber, SubscriberId, UndeclareSubscriber, }, - interest::{InterestId, InterestMode}, + interest::{InterestId, InterestMode, InterestOptions}, + Interest, }, }; use zenoh_sync::get_mut_unchecked; @@ -34,11 +35,11 @@ use crate::{ key_expr::KeyExpr, net::routing::{ dispatcher::{ - face::FaceState, + face::{FaceState, InterestState}, resource::{NodeId, Resource, SessionContext}, tables::{Route, RoutingExpr, Tables}, }, - hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, + hat::{HatPubSubTrait, Sources}, router::{update_data_routes_from, RoutesIndexes}, RoutingContext, PREFIX_LIVELINESS, }, @@ -341,7 +342,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; - for src_face in tables + for mut src_face in tables .faces .values() .cloned() @@ -356,6 +357,33 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { &mut src_face.clone(), ); } + if face.whatami == WhatAmI::Router { + for (res, _) in face_hat_mut!(&mut src_face).remote_sub_interests.values() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + let options = InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS; + get_mut_unchecked(face).local_interests.insert( + id, + InterestState { + options, + res: res.as_ref().map(|res| (*res).clone()), + finalized: false, + }, + ); + let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, face)); + face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode: InterestMode::CurrentFuture, + options, + wire_expr, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + res.as_ref().map(|res| res.expr()).unwrap_or_default(), + )); + } + } } } // recompute routes @@ -374,133 +402,91 @@ impl HatPubSubTrait for HatCode { mode: InterestMode, aggregate: bool, ) { - if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = (!mode.future()).then_some(id); - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - }; - if let Some(res) = res.as_ref() { - if aggregate { - if tables.faces.values().any(|src_face| { - src_face.id != face.id - && face_hat!(src_face) - .remote_subs - .values() - .any(|sub| sub.context.is_some() && sub.matches(res)) - }) { - let id = if mode.future() { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face).local_subs.insert((*res).clone(), id); - id - } else { - 0 - }; - let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }), - }, - res.expr(), - )); - } - } else { - for src_face in tables - .faces + face_hat_mut!(face) + .remote_sub_interests + .insert(id, (res.as_ref().map(|res| (*res).clone()), aggregate)); + for dst_face in tables + .faces + .values_mut() + .filter(|f| f.whatami == WhatAmI::Router) + { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + let options = InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS; + get_mut_unchecked(dst_face).local_interests.insert( + id, + InterestState { + options, + res: res.as_ref().map(|res| (*res).clone()), + finalized: mode == InterestMode::Future, + }, + ); + let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, dst_face)); + dst_face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode, + options, + wire_expr, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + res.as_ref().map(|res| res.expr()).unwrap_or_default(), + )); + } + } + + fn undeclare_sub_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ) { + if let Some(interest) = face_hat_mut!(face).remote_sub_interests.remove(&id) { + if !tables.faces.values().any(|f| { + f.whatami == WhatAmI::Client + && face_hat!(f) + .remote_sub_interests .values() - .cloned() - .collect::>>() - { - if src_face.id != face.id { - for sub in face_hat!(src_face).remote_subs.values() { - if sub.context.is_some() && sub.matches(res) { - let id = if mode.future() { - let id = - face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face).local_subs.insert(sub.clone(), id); - id - } else { - 0 - }; - let wire_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber( - DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }, - ), - }, - sub.expr(), - )); - } - } - } - } - } - } else { - for src_face in tables + .any(|i| *i == interest) + }) { + for dst_face in tables .faces - .values() - .cloned() - .collect::>>() + .values_mut() + .filter(|f| f.whatami == WhatAmI::Router) { - if src_face.id != face.id { - for sub in face_hat!(src_face).remote_subs.values() { - let id = if mode.future() { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face).local_subs.insert(sub.clone(), id); - id - } else { - 0 - }; - let wire_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, + for id in dst_face + .local_interests + .keys() + .cloned() + .collect::>() + { + let local_interest = dst_face.local_interests.get(&id).unwrap(); + if local_interest.options.subscribers() + && (local_interest.res == interest.0) + { + dst_face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode: InterestMode::Final, + options: InterestOptions::empty(), + wire_expr: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }), }, - sub.expr(), + local_interest + .res + .as_ref() + .map(|res| res.expr()) + .unwrap_or_default(), )); + get_mut_unchecked(dst_face).local_interests.remove(&id); } } } } } - if mode.future() { - face_hat_mut!(face) - .remote_sub_interests - .insert(id, (res.cloned(), aggregate)); - } - } - - fn undeclare_sub_interest( - &self, - _tables: &mut Tables, - face: &mut Arc, - id: InterestId, - ) { - face_hat_mut!(face).remote_sub_interests.remove(&id); } fn declare_subscription( @@ -570,6 +556,50 @@ impl HatPubSubTrait for HatCode { } }; + for face in tables + .faces + .values() + .filter(|f| f.whatami == WhatAmI::Router) + { + if face.local_interests.values().any(|interest| { + interest.finalized + && interest.options.subscribers() + && interest + .res + .as_ref() + .map(|res| { + KeyExpr::try_from(res.expr()) + .and_then(|intres| { + KeyExpr::try_from(expr.full_expr()) + .map(|putres| intres.includes(&putres)) + }) + .unwrap_or(false) + }) + .unwrap_or(true) + }) { + if face_hat!(face).remote_subs.values().any(|sub| { + KeyExpr::try_from(sub.expr()) + .and_then(|subres| { + KeyExpr::try_from(expr.full_expr()) + .map(|putres| subres.intersects(&putres)) + }) + .unwrap_or(false) + }) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.insert( + face.id, + (face.clone(), key_expr.to_owned(), NodeId::default()), + ); + } + } else { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.insert( + face.id, + (face.clone(), key_expr.to_owned(), NodeId::default()), + ); + } + } + for face in tables.faces.values().filter(|f| { f.whatami == WhatAmI::Peer && !f diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index caa5f79694..b909190184 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -554,6 +554,16 @@ impl HatQueriesTrait for HatCode { } }; + // TODO: BNestMatching: What if there is a local compete ? + if let Some(face) = tables.faces.values().find(|f| f.whatami == WhatAmI::Router) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: (face.clone(), key_expr.to_owned(), NodeId::default()), + complete: 0, + distance: f64::MAX, + }); + } + for face in tables.faces.values().filter(|f| { f.whatami == WhatAmI::Peer && !f diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 910e527bfe..dd7c6e11c7 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -42,12 +42,8 @@ use zenoh_transport::unicast::TransportUnicast; use self::{ network::{shared_nodes, Network}, - pubsub::{ - pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription, - }, - queries::{ - queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable, - }, + pubsub::{pubsub_linkstate_change, pubsub_remove_node, undeclare_client_subscription}, + queries::{queries_linkstate_change, queries_remove_node, undeclare_client_queryable}, }; use super::{ super::dispatcher::{ @@ -364,12 +360,11 @@ impl HatBaseTrait for HatCode { fn new_local_face( &self, - tables: &mut Tables, + _tables: &mut Tables, _tables_ref: &Arc, - face: &mut Face, + _face: &mut Face, ) -> ZResult<()> { - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); + // Nothing to do Ok(()) } @@ -404,8 +399,6 @@ impl HatBaseTrait for HatCode { } face_hat_mut!(&mut face.state).link_id = link_id; - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); match face.state.whatami { WhatAmI::Router => { diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 2af567d989..233e0b8cdf 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -111,57 +111,37 @@ fn propagate_simple_subscription_to( || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) } { - if dst_face.whatami != WhatAmI::Client { - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); - } else { - let matching_interests = face_hat!(dst_face) - .remote_sub_interests - .values() - .filter(|si| si.0.as_ref().map(|si| si.matches(res)).unwrap_or(true)) - .cloned() - .collect::>, bool)>>(); + let matching_interests = face_hat!(dst_face) + .remote_sub_interests + .values() + .filter(|si| si.0.as_ref().map(|si| si.matches(res)).unwrap_or(true)) + .cloned() + .collect::>, bool)>>(); - for (int_res, aggregate) in matching_interests { - let res = if aggregate { - int_res.as_ref().unwrap_or(res) - } else { - res - }; - if !face_hat!(dst_face).local_subs.contains_key(res) { - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); - } + for (int_res, aggregate) in matching_interests { + let res = if aggregate { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_subs.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); } } } @@ -704,44 +684,6 @@ fn forget_client_subscription( } } -pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - }; - - if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - for sub in &hat!(tables).router_subs { - if sub.context.is_some() - && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) - || sub.session_ctxs.values().any(|s| { - s.subs.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables).failover_brokering(s.face.zid, face.zid))) - })) - { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face).local_subs.insert(sub.clone(), id); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } - } -} - pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { match net_type { WhatAmI::Router => { @@ -919,9 +861,16 @@ impl HatPubSubTrait for HatCode { id: InterestId, res: Option<&mut Arc>, mode: InterestMode, - aggregate: bool, + mut aggregate: bool, ) { - if mode.current() && face.whatami == WhatAmI::Client { + if aggregate && face.whatami == WhatAmI::Peer { + tracing::warn!( + "Received Interest with aggregate=true from peer {}. Not supported!", + face.zid + ); + aggregate = true; + } + if mode.current() { let interest_id = (!mode.future()).then_some(id); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers @@ -962,9 +911,17 @@ impl HatPubSubTrait for HatCode { for sub in &hat!(tables).router_subs { if sub.context.is_some() && sub.matches(res) - && (remote_client_subs(sub, face) - || remote_peer_subs(tables, sub) - || remote_router_subs(tables, sub)) + && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) + || res_hat!(sub).peer_subs.iter().any(|r| *r != tables.zid) + || sub.session_ctxs.values().any(|s| { + s.face.id != face.id + && s.subs.is_some() + && (s.face.whatami == WhatAmI::Client + || face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) + })) { let id = if mode.future() { let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); @@ -994,9 +951,14 @@ impl HatPubSubTrait for HatCode { } else { for sub in &hat!(tables).router_subs { if sub.context.is_some() - && (remote_client_subs(sub, face) - || remote_peer_subs(tables, sub) - || remote_router_subs(tables, sub)) + && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) + || res_hat!(sub).peer_subs.iter().any(|r| *r != tables.zid) + || sub.session_ctxs.values().any(|s| { + s.subs.is_some() + && (s.face.whatami != WhatAmI::Peer + || face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(s.face.zid, face.zid)) + })) { let id = if mode.future() { let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 9a2beeb001..3ab0ac507d 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -241,11 +241,10 @@ fn propagate_simple_queryable( let current = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) && (current.is_none() || current.unwrap().1 != info) - && (dst_face.whatami != WhatAmI::Client - || face_hat!(dst_face) - .remote_qabl_interests - .values() - .any(|si| si.as_ref().map(|si| si.matches(res)).unwrap_or(true))) + && face_hat!(dst_face) + .remote_qabl_interests + .values() + .any(|si| si.as_ref().map(|si| si.matches(res)).unwrap_or(true)) && if full_peers_net { dst_face.whatami == WhatAmI::Client } else { @@ -811,43 +810,6 @@ fn forget_client_queryable( } } -pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { - if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - for qabl in hat!(tables).router_qabls.iter() { - if qabl.context.is_some() - && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) - || qabl.session_ctxs.values().any(|s| { - s.qabl.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables).failover_brokering(s.face.zid, face.zid))) - })) - { - let info = local_qabl_info(tables, qabl, face); - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face) - .local_qabls - .insert(qabl.clone(), (id, info)); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } -} - pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { match net_type { WhatAmI::Router => { @@ -1075,8 +1037,15 @@ impl HatQueriesTrait for HatCode { id: InterestId, res: Option<&mut Arc>, mode: InterestMode, - aggregate: bool, + mut aggregate: bool, ) { + if aggregate && face.whatami == WhatAmI::Peer { + tracing::warn!( + "Received Interest with aggregate=true from peer {}. Not supported!", + face.zid + ); + aggregate = true; + } if mode.current() && face.whatami == WhatAmI::Client { let interest_id = (!mode.future()).then_some(id); if let Some(res) = res.as_ref() { @@ -1084,9 +1053,17 @@ impl HatQueriesTrait for HatCode { if hat!(tables).router_qabls.iter().any(|qabl| { qabl.context.is_some() && qabl.matches(res) - && (remote_client_qabls(qabl, face) - || remote_peer_qabls(tables, qabl) - || remote_router_qabls(tables, qabl)) + && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) + || res_hat!(qabl).peer_qabls.keys().any(|r| *r != tables.zid) + || qabl.session_ctxs.values().any(|s| { + s.face.id != face.id + && s.qabl.is_some() + && (s.face.whatami == WhatAmI::Client + || face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) + })) }) { let info = local_qabl_info(tables, res, face); let id = if mode.future() { @@ -1118,9 +1095,15 @@ impl HatQueriesTrait for HatCode { for qabl in hat!(tables).router_qabls.iter() { if qabl.context.is_some() && qabl.matches(res) - && (remote_client_qabls(qabl, face) - || remote_peer_qabls(tables, qabl) - || remote_router_qabls(tables, qabl)) + && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) + || res_hat!(qabl).peer_qabls.keys().any(|r| *r != tables.zid) + || qabl.session_ctxs.values().any(|s| { + s.qabl.is_some() + && (s.face.whatami != WhatAmI::Peer + || face.whatami != WhatAmI::Peer + || hat!(tables) + .failover_brokering(s.face.zid, face.zid)) + })) { let info = local_qabl_info(tables, qabl, face); let id = if mode.future() { diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index 339bc196b1..db10241cc4 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -39,6 +39,7 @@ async fn create_session_pair(locator: &str) -> (Session, Session) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_any() -> ZResult<()> { + zenoh_util::try_init_log_from_env(); let (session1, session2) = create_session_pair("tcp/127.0.0.1:18001").await; let publisher1 = ztimeout!(session1 @@ -90,8 +91,9 @@ async fn zenoh_matching_status_any() -> ZResult<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_remote() -> ZResult<()> { - let session1 = ztimeout!(zenoh::open(config::peer())).unwrap(); + zenoh_util::try_init_log_from_env(); + let session1 = ztimeout!(zenoh::open(config::peer())).unwrap(); let session2 = ztimeout!(zenoh::open(config::peer())).unwrap(); let publisher1 = ztimeout!(session1 @@ -144,8 +146,9 @@ async fn zenoh_matching_status_remote() -> ZResult<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_local() -> ZResult<()> { - let session1 = ztimeout!(zenoh::open(zenoh::config::peer())).unwrap(); + zenoh_util::try_init_log_from_env(); + let session1 = ztimeout!(zenoh::open(zenoh::config::peer())).unwrap(); let session2 = ztimeout!(zenoh::open(zenoh::config::peer())).unwrap(); let publisher1 = ztimeout!(session1 From a448215cc60e77b50cb80ae9d1305bb250af9c7f Mon Sep 17 00:00:00 2001 From: snehilzs <148767022+snehilzs@users.noreply.github.com> Date: Fri, 31 May 2024 19:13:07 +0530 Subject: [PATCH 347/598] Adding TLS authentication (#840) * adding test files * testing cert names * testing cert authn * adding basic authID functionality * remove secret files * add extensibility * add extensibility * add extensibility * adding type constraints * adding level abstraction for authentication info * adding username authentication * cleaning code * added cfg checks for auth_usrpwd * adding test files * fix error due to vsock * fix test error * access auth ids in acl interceptor * add authentication support in acl * added Subject * adding test files * add authn features with acl * remove error * add tests for tls and quic * add tests for user-password * remove format error * ignore tests without testfiles * remove shm test errors * remove typos * add testfiles for authn * fix testfiles for authn * Chore: Code format * Change port numbers to allow tests to run concurrently * Fix TLS and Quic test failures due to subsequent sessions on same port number * Format json configs * Remove unused deprecated dependency async-rustls * Chore: format list of cargo dependencies * Fix imports --------- Co-authored-by: Oussama Teffahi --- .gitignore | 2 + Cargo.lock | 113 ++ commons/zenoh-config/src/lib.rs | 4 + io/zenoh-link-commons/src/lib.rs | 3 + io/zenoh-link-commons/src/unicast.rs | 68 + io/zenoh-links/zenoh-link-quic/Cargo.toml | 7 +- io/zenoh-links/zenoh-link-quic/src/unicast.rs | 50 +- .../zenoh-link-serial/src/unicast.rs | 8 +- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 5 +- io/zenoh-links/zenoh-link-tls/Cargo.toml | 2 + io/zenoh-links/zenoh-link-tls/src/unicast.rs | 93 +- io/zenoh-links/zenoh-link-udp/src/unicast.rs | 8 +- .../zenoh-link-unixpipe/src/unix/unicast.rs | 8 +- .../zenoh-link-unixsock_stream/src/unicast.rs | 6 +- .../zenoh-link-vsock/src/unicast.rs | 6 +- io/zenoh-links/zenoh-link-ws/src/unicast.rs | 6 +- .../src/unicast/authentication.rs | 43 + .../src/unicast/establishment/accept.rs | 26 +- .../src/unicast/establishment/ext/auth/mod.rs | 15 +- .../unicast/establishment/ext/auth/usrpwd.rs | 8 +- .../src/unicast/establishment/open.rs | 4 + .../src/unicast/lowlatency/transport.rs | 5 + io/zenoh-transport/src/unicast/mod.rs | 16 +- .../src/unicast/transport_unicast_inner.rs | 1 + .../src/unicast/universal/transport.rs | 13 + .../net/routing/interceptor/access_control.rs | 64 +- .../net/routing/interceptor/authorization.rs | 85 +- zenoh/tests/acl.rs | 6 +- zenoh/tests/authentication.rs | 1245 +++++++++++++++++ 29 files changed, 1858 insertions(+), 62 deletions(-) create mode 100644 io/zenoh-transport/src/unicast/authentication.rs create mode 100644 zenoh/tests/authentication.rs diff --git a/.gitignore b/.gitignore index 105dae1aa7..bf5a1656d3 100644 --- a/.gitignore +++ b/.gitignore @@ -21,4 +21,6 @@ cargo-timing*.html +#ignore test data +testfiles ci/valgrind-check/*.log diff --git a/Cargo.lock b/Cargo.lock index aff6c4950a..91ad98ce8c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -235,6 +235,45 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +[[package]] +name = "asn1-rs" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror", + "time 0.3.28", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] + [[package]] name = "async-attributes" version = "1.1.2" @@ -1004,6 +1043,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "der-parser" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + [[package]] name = "deranged" version = "0.3.8" @@ -1082,6 +1135,17 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" +[[package]] +name = "displaydoc" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] + [[package]] name = "dyn-clone" version = "1.0.13" @@ -2305,6 +2369,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "oid-registry" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" +dependencies = [ + "asn1-rs", +] + [[package]] name = "once_cell" version = "1.19.0" @@ -3149,6 +3222,15 @@ dependencies = [ "semver 1.0.18", ] +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + [[package]] name = "rustix" version = "0.37.25" @@ -3951,6 +4033,17 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] + [[package]] name = "system-configuration" version = "0.5.1" @@ -4073,6 +4166,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" dependencies = [ "deranged", + "itoa", "serde", "time-core", "time-macros 0.2.14", @@ -5048,6 +5142,23 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "x509-parser" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror", + "time 0.3.28", +] + [[package]] name = "yasna" version = "0.5.2" @@ -5350,6 +5461,7 @@ dependencies = [ "tokio-util", "tracing", "webpki-roots", + "x509-parser", "zenoh-collections", "zenoh-config", "zenoh-core", @@ -5416,6 +5528,7 @@ dependencies = [ "tokio-util", "tracing", "webpki-roots", + "x509-parser", "zenoh-collections", "zenoh-config", "zenoh-core", diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index e57660800f..97c72ce579 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -104,6 +104,8 @@ pub struct DownsamplingItemConf { #[derive(Serialize, Debug, Deserialize, Clone)] pub struct AclConfigRules { pub interfaces: Option>, + pub cert_common_names: Option>, + pub usernames: Option>, pub key_exprs: Vec, pub actions: Vec, pub flows: Option>, @@ -124,6 +126,8 @@ pub struct PolicyRule { #[serde(rename_all = "snake_case")] pub enum Subject { Interface(String), + CertCommonName(String), + Username(String), } #[derive(Clone, Copy, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] diff --git a/io/zenoh-link-commons/src/lib.rs b/io/zenoh-link-commons/src/lib.rs index 5a41050e94..6b2ec14c69 100644 --- a/io/zenoh-link-commons/src/lib.rs +++ b/io/zenoh-link-commons/src/lib.rs @@ -50,6 +50,7 @@ pub struct Link { pub is_reliable: bool, pub is_streamed: bool, pub interfaces: Vec, + pub auth_identifier: LinkAuthId, } #[async_trait] @@ -78,6 +79,7 @@ impl From<&LinkUnicast> for Link { is_reliable: link.is_reliable(), is_streamed: link.is_streamed(), interfaces: link.get_interface_names(), + auth_identifier: link.get_auth_identifier(), } } } @@ -98,6 +100,7 @@ impl From<&LinkMulticast> for Link { is_reliable: link.is_reliable(), is_streamed: false, interfaces: vec![], + auth_identifier: LinkAuthId::default(), } } } diff --git a/io/zenoh-link-commons/src/unicast.rs b/io/zenoh-link-commons/src/unicast.rs index add4c3a27b..cd8c550503 100644 --- a/io/zenoh-link-commons/src/unicast.rs +++ b/io/zenoh-link-commons/src/unicast.rs @@ -20,6 +20,7 @@ use core::{ use std::net::SocketAddr; use async_trait::async_trait; +use serde::Serialize; use zenoh_protocol::{ core::{EndPoint, Locator}, transport::BatchSize, @@ -51,6 +52,7 @@ pub trait LinkUnicastTrait: Send + Sync { fn is_reliable(&self) -> bool; fn is_streamed(&self) -> bool; fn get_interface_names(&self) -> Vec; + fn get_auth_identifier(&self) -> LinkAuthId; async fn write(&self, buffer: &[u8]) -> ZResult; async fn write_all(&self, buffer: &[u8]) -> ZResult<()>; async fn read(&self, buffer: &mut [u8]) -> ZResult; @@ -118,3 +120,69 @@ pub fn get_ip_interface_names(addr: &SocketAddr) -> Vec { } } } +#[derive(Clone, Debug, Serialize, Hash, PartialEq, Eq)] + +pub enum LinkAuthType { + Tls, + Quic, + None, +} +#[derive(Clone, Debug, Serialize, Hash, PartialEq, Eq)] + +pub struct LinkAuthId { + auth_type: LinkAuthType, + auth_value: Option, +} + +impl LinkAuthId { + pub fn get_type(&self) -> &LinkAuthType { + &self.auth_type + } + pub fn get_value(&self) -> &Option { + &self.auth_value + } +} +impl Default for LinkAuthId { + fn default() -> Self { + LinkAuthId { + auth_type: LinkAuthType::None, + auth_value: None, + } + } +} + +#[derive(Debug)] +pub struct LinkAuthIdBuilder { + pub auth_type: LinkAuthType, //HAS to be provided when building + pub auth_value: Option, //actual value added to the above type; is None for None type +} +impl Default for LinkAuthIdBuilder { + fn default() -> Self { + Self::new() + } +} + +impl LinkAuthIdBuilder { + pub fn new() -> LinkAuthIdBuilder { + LinkAuthIdBuilder { + auth_type: LinkAuthType::None, + auth_value: None, + } + } + + pub fn auth_type(&mut self, auth_type: LinkAuthType) -> &mut Self { + self.auth_type = auth_type; + self + } + pub fn auth_value(&mut self, auth_value: Option) -> &mut Self { + self.auth_value = auth_value; + self + } + + pub fn build(&self) -> LinkAuthId { + LinkAuthId { + auth_type: self.auth_type.clone(), + auth_value: self.auth_value.clone(), + } + } +} diff --git a/io/zenoh-links/zenoh-link-quic/Cargo.toml b/io/zenoh-links/zenoh-link-quic/Cargo.toml index 63bfc1f839..e10eed71a1 100644 --- a/io/zenoh-links/zenoh-link-quic/Cargo.toml +++ b/io/zenoh-links/zenoh-link-quic/Cargo.toml @@ -30,13 +30,14 @@ base64 = { workspace = true } futures = { workspace = true } quinn = { workspace = true } rustls-native-certs = { workspace = true } -rustls-pki-types = { workspace = true } +rustls-pki-types = { workspace = true } rustls-webpki = { workspace = true } + secrecy = { workspace = true } tokio = { workspace = true, features = [ - "fs", "io-util", "net", + "fs", "sync", "time", ] } @@ -56,3 +57,5 @@ zenoh-util = { workspace = true } rustls = { version = "0.21", features = ["dangerous_configuration", "quic"] } tokio-rustls = "0.24.1" rustls-pemfile = { version = "1" } + +x509-parser = "0.16.0" diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index a3b2687b6f..cd9cad071f 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -22,10 +22,11 @@ use std::{ use async_trait::async_trait; use tokio::sync::Mutex as AsyncMutex; use tokio_util::sync::CancellationToken; +use x509_parser::prelude::*; use zenoh_core::zasynclock; use zenoh_link_commons::{ - get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, - ListenersUnicastIP, NewLinkChannelSender, + get_ip_interface_names, LinkAuthId, LinkAuthIdBuilder, LinkAuthType, LinkManagerUnicastTrait, + LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -46,6 +47,7 @@ pub struct LinkUnicastQuic { dst_locator: Locator, send: AsyncMutex, recv: AsyncMutex, + auth_identifier: LinkAuthId, } impl LinkUnicastQuic { @@ -55,6 +57,7 @@ impl LinkUnicastQuic { dst_locator: Locator, send: quinn::SendStream, recv: quinn::RecvStream, + auth_identifier: LinkAuthId, ) -> LinkUnicastQuic { // Build the Quic object LinkUnicastQuic { @@ -64,6 +67,7 @@ impl LinkUnicastQuic { dst_locator, send: AsyncMutex::new(send), recv: AsyncMutex::new(recv), + auth_identifier, } } } @@ -156,6 +160,10 @@ impl LinkUnicastTrait for LinkUnicastQuic { fn is_streamed(&self) -> bool { true } + #[inline(always)] + fn get_auth_identifier(&self) -> LinkAuthId { + self.auth_identifier.clone() + } } impl Drop for LinkUnicastQuic { @@ -254,6 +262,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { .open_bi() .await .map_err(|e| zerror!("Can not create a new QUIC link bound to {}: {}", host, e))?; + let auth_id = get_cert_common_name(quic_conn.clone())?; let link = Arc::new(LinkUnicastQuic::new( quic_conn, @@ -261,6 +270,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { endpoint.into(), send, recv, + auth_id.into(), )); Ok(LinkUnicast(link)) @@ -388,12 +398,15 @@ async fn accept_task( let dst_addr = quic_conn.remote_address(); tracing::debug!("Accepted QUIC connection on {:?}: {:?}", src_addr, dst_addr); // Create the new link object + let auth_id = get_cert_common_name(quic_conn.clone())?; + let link = Arc::new(LinkUnicastQuic::new( quic_conn, src_addr, Locator::new(QUIC_LOCATOR_PREFIX, dst_addr.to_string(), "")?, send, recv, + auth_id.into() )); // Communicate the new link to the initial transport manager @@ -418,3 +431,36 @@ async fn accept_task( } Ok(()) } + +fn get_cert_common_name(conn: quinn::Connection) -> ZResult { + let mut auth_id = QuicAuthId { auth_value: None }; + if let Some(pi) = conn.peer_identity() { + let serv_certs = pi.downcast::>().unwrap(); + if let Some(item) = serv_certs.iter().next() { + let (_, cert) = X509Certificate::from_der(item.as_ref()).unwrap(); + let subject_name = cert + .subject + .iter_common_name() + .next() + .and_then(|cn| cn.as_str().ok()) + .unwrap(); + auth_id = QuicAuthId { + auth_value: Some(subject_name.to_string()), + }; + } + } + Ok(auth_id) +} + +#[derive(Debug, Clone)] +struct QuicAuthId { + auth_value: Option, +} +impl From for LinkAuthId { + fn from(value: QuicAuthId) -> Self { + LinkAuthIdBuilder::new() + .auth_type(LinkAuthType::Quic) + .auth_value(value.auth_value.clone()) + .build() + } +} diff --git a/io/zenoh-links/zenoh-link-serial/src/unicast.rs b/io/zenoh-links/zenoh-link-serial/src/unicast.rs index ca4efacdc6..31213f5c43 100644 --- a/io/zenoh-links/zenoh-link-serial/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-serial/src/unicast.rs @@ -32,8 +32,8 @@ use tokio_util::sync::CancellationToken; use z_serial::ZSerial; use zenoh_core::{zasynclock, zasyncread, zasyncwrite}; use zenoh_link_commons::{ - ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, - NewLinkChannelSender, + ConstructibleLinkManagerUnicast, LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, + LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -212,6 +212,10 @@ impl LinkUnicastTrait for LinkUnicastSerial { fn is_streamed(&self) -> bool { false } + #[inline(always)] + fn get_auth_identifier(&self) -> LinkAuthId { + LinkAuthId::default() + } } impl fmt::Display for LinkUnicastSerial { diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index 79812c526e..bf2e66c863 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -20,7 +20,7 @@ use tokio::{ }; use tokio_util::sync::CancellationToken; use zenoh_link_commons::{ - get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, + get_ip_interface_names, LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; use zenoh_protocol::{ @@ -164,6 +164,9 @@ impl LinkUnicastTrait for LinkUnicastTcp { fn is_streamed(&self) -> bool { true } + fn get_auth_identifier(&self) -> LinkAuthId { + LinkAuthId::default() + } } // // WARN: This sometimes causes timeout in routing test diff --git a/io/zenoh-links/zenoh-link-tls/Cargo.toml b/io/zenoh-links/zenoh-link-tls/Cargo.toml index 3025e3d7d7..00f7207bb0 100644 --- a/io/zenoh-links/zenoh-link-tls/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tls/Cargo.toml @@ -47,3 +47,5 @@ zenoh-result = { workspace = true } zenoh-runtime = { workspace = true } zenoh-sync = { workspace = true } zenoh-util = { workspace = true } + +x509-parser = "0.16.0" diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 1ced1a26b1..2e40f23dae 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -21,10 +21,12 @@ use tokio::{ }; use tokio_rustls::{TlsAcceptor, TlsConnector, TlsStream}; use tokio_util::sync::CancellationToken; +//use webpki::anchor_from_trusted_cert; +use x509_parser::prelude::*; use zenoh_core::zasynclock; use zenoh_link_commons::{ - get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, - ListenersUnicastIP, NewLinkChannelSender, + get_ip_interface_names, LinkAuthId, LinkAuthIdBuilder, LinkAuthType, LinkManagerUnicastTrait, + LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -37,6 +39,10 @@ use crate::{ TLS_ACCEPT_THROTTLE_TIME, TLS_DEFAULT_MTU, TLS_LINGER_TIMEOUT, TLS_LOCATOR_PREFIX, }; +#[derive(Default, Debug, PartialEq, Eq, Hash)] +pub struct TlsCommonName(String); + +//impl pub struct LinkUnicastTls { // The underlying socket as returned from the async-rustls library // NOTE: TlsStream requires &mut for read and write operations. This means @@ -56,6 +62,7 @@ pub struct LinkUnicastTls { // Make sure there are no concurrent read or writes write_mtx: AsyncMutex<()>, read_mtx: AsyncMutex<()>, + auth_identifier: LinkAuthId, } unsafe impl Send for LinkUnicastTls {} @@ -66,6 +73,7 @@ impl LinkUnicastTls { socket: TlsStream, src_addr: SocketAddr, dst_addr: SocketAddr, + auth_identifier: LinkAuthId, ) -> LinkUnicastTls { let (tcp_stream, _) = socket.get_ref(); // Set the TLS nodelay option @@ -99,6 +107,7 @@ impl LinkUnicastTls { dst_locator: Locator::new(TLS_LOCATOR_PREFIX, dst_addr.to_string(), "").unwrap(), write_mtx: AsyncMutex::new(()), read_mtx: AsyncMutex::new(()), + auth_identifier, } } @@ -189,6 +198,10 @@ impl LinkUnicastTrait for LinkUnicastTls { fn is_streamed(&self) -> bool { true } + #[inline(always)] + fn get_auth_identifier(&self) -> LinkAuthId { + self.auth_identifier.clone() + } } impl Drop for LinkUnicastTls { @@ -282,9 +295,19 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastTls { e ) })?; + + let (_, tls_conn) = tls_stream.get_ref(); + + let auth_identifier = get_server_cert_common_name(tls_conn)?; + let tls_stream = TlsStream::Client(tls_stream); - let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); + let link = Arc::new(LinkUnicastTls::new( + tls_stream, + src_addr, + dst_addr, + auth_identifier.into(), + )); Ok(LinkUnicast(link)) } @@ -384,8 +407,16 @@ async fn accept_task( }; tracing::debug!("Accepted TLS connection on {:?}: {:?}", src_addr, dst_addr); - // Create the new link object - let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); + let (_, tls_conn) = tls_stream.get_ref(); + let auth_identifier = get_client_cert_common_name(tls_conn)?; + tracing::debug!("Accepted TLS connection on {:?}: {:?}", src_addr, dst_addr); + // Create the new link object + let link = Arc::new(LinkUnicastTls::new( + tls_stream, + src_addr, + dst_addr, + auth_identifier.into(), + )); // Communicate the new link to the initial transport manager if let Err(e) = manager.send_async(LinkUnicast(link)).await { @@ -409,3 +440,55 @@ async fn accept_task( Ok(()) } + +fn get_client_cert_common_name(tls_conn: &rustls::CommonState) -> ZResult { + if let Some(serv_certs) = tls_conn.peer_certificates() { + let (_, cert) = X509Certificate::from_der(serv_certs[0].as_ref())?; + let subject_name = &cert + .subject + .iter_common_name() + .next() + .and_then(|cn| cn.as_str().ok()) + .unwrap(); + + Ok(TlsAuthId { + auth_value: Some(subject_name.to_string()), + }) + } else { + Ok(TlsAuthId { auth_value: None }) + } +} + +fn get_server_cert_common_name(tls_conn: &rustls::ClientConnection) -> ZResult { + let serv_certs = tls_conn.peer_certificates().unwrap(); + let mut auth_id = TlsAuthId { auth_value: None }; + + //need the first certificate in the chain os no need for looping + if let Some(item) = serv_certs.iter().next() { + let (_, cert) = X509Certificate::from_der(item.as_ref())?; + let subject_name = &cert + .subject + .iter_common_name() + .next() + .and_then(|cn| cn.as_str().ok()) + .unwrap(); + + auth_id = TlsAuthId { + auth_value: Some(subject_name.to_string()), + }; + return Ok(auth_id); + } + Ok(auth_id) +} + +struct TlsAuthId { + auth_value: Option, +} +impl From for LinkAuthId { + fn from(value: TlsAuthId) -> Self { + LinkAuthIdBuilder::new() + .auth_type(LinkAuthType::Tls) + .auth_value(value.auth_value.clone()) + .build() + } +} diff --git a/io/zenoh-links/zenoh-link-udp/src/unicast.rs b/io/zenoh-links/zenoh-link-udp/src/unicast.rs index 79f980ca96..760ed2209c 100644 --- a/io/zenoh-links/zenoh-link-udp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/unicast.rs @@ -24,8 +24,8 @@ use tokio::{net::UdpSocket, sync::Mutex as AsyncMutex}; use tokio_util::sync::CancellationToken; use zenoh_core::{zasynclock, zlock}; use zenoh_link_commons::{ - get_ip_interface_names, ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, - LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, + get_ip_interface_names, ConstructibleLinkManagerUnicast, LinkAuthId, LinkManagerUnicastTrait, + LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -224,6 +224,10 @@ impl LinkUnicastTrait for LinkUnicastUdp { fn is_streamed(&self) -> bool { false } + #[inline(always)] + fn get_auth_identifier(&self) -> LinkAuthId { + LinkAuthId::default() + } } impl fmt::Display for LinkUnicastUdp { diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index 1b30ceb553..7dea524ca1 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -36,8 +36,8 @@ use tokio_util::sync::CancellationToken; use unix_named_pipe::{create, open_write}; use zenoh_core::{zasyncread, zasyncwrite, ResolveFuture, Wait}; use zenoh_link_commons::{ - ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, - NewLinkChannelSender, + ConstructibleLinkManagerUnicast, LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, + LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -525,6 +525,10 @@ impl LinkUnicastTrait for UnicastPipe { fn is_streamed(&self) -> bool { true } + #[inline(always)] + fn get_auth_identifier(&self) -> LinkAuthId { + LinkAuthId::default() + } } impl fmt::Display for UnicastPipe { diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index cc7147c9e0..7adbb3ab30 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -27,7 +27,7 @@ use tokio_util::sync::CancellationToken; use uuid::Uuid; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ - LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, + LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -143,6 +143,10 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { fn is_streamed(&self) -> bool { true } + #[inline(always)] + fn get_auth_identifier(&self) -> LinkAuthId { + LinkAuthId::default() + } } impl Drop for LinkUnicastUnixSocketStream { diff --git a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs index 605f114173..32b292ca7e 100644 --- a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs @@ -28,7 +28,7 @@ use tokio_vsock::{ }; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ - LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, + LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::{ core::{endpoint::Address, EndPoint, Locator}, @@ -189,6 +189,10 @@ impl LinkUnicastTrait for LinkUnicastVsock { fn is_streamed(&self) -> bool { true } + #[inline(always)] + fn get_auth_identifier(&self) -> LinkAuthId { + LinkAuthId::default() + } } impl fmt::Display for LinkUnicastVsock { diff --git a/io/zenoh-links/zenoh-link-ws/src/unicast.rs b/io/zenoh-links/zenoh-link-ws/src/unicast.rs index b671bf67f2..336e8af975 100644 --- a/io/zenoh-links/zenoh-link-ws/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-ws/src/unicast.rs @@ -34,7 +34,7 @@ use tokio_tungstenite::{accept_async, tungstenite::Message, MaybeTlsStream, WebS use tokio_util::sync::CancellationToken; use zenoh_core::{zasynclock, zasyncread, zasyncwrite}; use zenoh_link_commons::{ - LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, + LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -226,6 +226,10 @@ impl LinkUnicastTrait for LinkUnicastWs { fn is_streamed(&self) -> bool { false } + #[inline(always)] + fn get_auth_identifier(&self) -> LinkAuthId { + LinkAuthId::default() + } } impl Drop for LinkUnicastWs { diff --git a/io/zenoh-transport/src/unicast/authentication.rs b/io/zenoh-transport/src/unicast/authentication.rs new file mode 100644 index 0000000000..b66289983e --- /dev/null +++ b/io/zenoh-transport/src/unicast/authentication.rs @@ -0,0 +1,43 @@ +use zenoh_link::{LinkAuthId, LinkAuthType}; + +#[cfg(feature = "auth_usrpwd")] +use super::establishment::ext::auth::UsrPwdId; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum AuthId { + CertCommonName(String), + Username(String), + None, +} + +impl From for AuthId { + fn from(lid: LinkAuthId) -> Self { + match (lid.get_type(), lid.get_value()) { + (LinkAuthType::Tls | LinkAuthType::Quic, Some(auth_value)) => { + AuthId::CertCommonName(auth_value.clone()) + } + _ => AuthId::None, + } + } +} + +#[cfg(feature = "auth_usrpwd")] +impl From for AuthId { + fn from(user_password_id: UsrPwdId) -> Self { + // pub(crate) struct UsrPwdId(pub Option>); + match user_password_id.0 { + Some(username) => { + //do something + //convert username from vecu8 to string + match std::str::from_utf8(&username) { + Ok(name) => AuthId::Username(name.to_owned()), + Err(e) => { + tracing::error!("Error in extracting username {}", e); + AuthId::None + } + } + } + None => AuthId::None, + } + } +} diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index d074ea9642..9a7151252d 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -31,6 +31,8 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; +#[cfg(feature = "auth_usrpwd")] +use super::ext::auth::UsrPwdId; #[cfg(feature = "shared-memory")] use super::ext::shm::AuthSegment; #[cfg(feature = "shared-memory")] @@ -111,6 +113,8 @@ struct RecvOpenSynOut { other_whatami: WhatAmI, other_lease: Duration, other_initial_sn: TransportSn, + #[cfg(feature = "auth_usrpwd")] + other_auth_id: UsrPwdId, } // OpenAck @@ -486,11 +490,18 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { } // Extension Auth - #[cfg(feature = "transport_auth")] - self.ext_auth - .recv_open_syn((&mut state.link.ext_auth, open_syn.ext_auth)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; + #[allow(unused_mut, unused_assignments)] + #[cfg(feature = "auth_usrpwd")] + let mut user_password_id = UsrPwdId(None); + + #[cfg(feature = "auth_usrpwd")] + { + user_password_id = self + .ext_auth + .recv_open_syn((&mut state.link.ext_auth, open_syn.ext_auth)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + } // Extension MultiLink #[cfg(feature = "transport_multilink")] @@ -517,6 +528,8 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { other_whatami: cookie.whatami, other_lease: open_syn.lease, other_initial_sn: open_syn.initial_sn, + #[cfg(feature = "auth_usrpwd")] + other_auth_id: user_password_id, }; Ok((state, output)) } @@ -711,7 +724,6 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - cookie_nonce: iack_out.cookie_nonce, }; let (mut state, osyn_out) = step!(fsm.recv_open_syn(osyn_in).await); - // Create the OpenAck but not send it yet let oack_in = SendOpenAckIn { mine_zid: manager.config.zid, @@ -735,6 +747,8 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - false => None, }, is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), + #[cfg(feature = "auth_usrpwd")] + auth_id: osyn_out.other_auth_id, }; let a_config = TransportLinkUnicastConfig { diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs index 8d57434bc3..0bc46c6edc 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs @@ -571,7 +571,12 @@ impl<'a> AcceptFsm for &'a AuthFsm<'a> { } type RecvOpenSynIn = (&'a mut StateAccept, Option); + + #[cfg(not(feature = "auth_usrpwd"))] type RecvOpenSynOut = (); + #[cfg(feature = "auth_usrpwd")] + type RecvOpenSynOut = UsrPwdId; + async fn recv_open_syn( self, input: Self::RecvOpenSynIn, @@ -604,13 +609,17 @@ impl<'a> AcceptFsm for &'a AuthFsm<'a> { match (self.usrpwd.as_ref(), state.usrpwd.as_mut()) { (Some(e), Some(s)) => { let x = ztake!(exts, id::USRPWD); - e.recv_open_syn((s, ztryinto!(x, S))).await?; + let username = e.recv_open_syn((s, ztryinto!(x, S))).await?; + let user_passwd_id = UsrPwdId(Some(username)); + return Ok(user_passwd_id); + } + (None, None) => { + return Ok(UsrPwdId(None)); } - (None, None) => {} _ => bail!("{S} Invalid UsrPwd configuration."), } } - + #[cfg(not(feature = "auth_usrpwd"))] Ok(()) } diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs index be24337fad..22d7a86817 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs @@ -162,6 +162,8 @@ impl StateOpen { pub(crate) struct StateAccept { nonce: u64, } +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct UsrPwdId(pub Option>); impl StateAccept { pub(crate) fn new(prng: &mut R) -> Self @@ -406,7 +408,7 @@ impl<'a> AcceptFsm for &'a AuthUsrPwdFsm<'a> { } type RecvOpenSynIn = (&'a mut StateAccept, Option); - type RecvOpenSynOut = (); + type RecvOpenSynOut = Vec; //value of userid is returned if recvopensynout is processed as valid async fn recv_open_syn( self, input: Self::RecvOpenSynIn, @@ -436,8 +438,8 @@ impl<'a> AcceptFsm for &'a AuthUsrPwdFsm<'a> { if hmac != open_syn.hmac { bail!("{S} Invalid password."); } - - Ok(()) + let username = open_syn.user.to_owned(); + Ok(username) } type SendOpenAckIn = &'a StateAccept; diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index 49c57d9e9a..2d50d465bf 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -32,6 +32,8 @@ use zenoh_result::ZResult; use super::ext::shm::AuthSegment; #[cfg(feature = "shared-memory")] use crate::shm::TransportShmConfig; +#[cfg(feature = "auth_usrpwd")] +use crate::unicast::establishment::ext::auth::UsrPwdId; use crate::{ common::batch::BatchConfig, unicast::{ @@ -644,6 +646,8 @@ pub(crate) async fn open_link( false => None, }, is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), + #[cfg(feature = "auth_usrpwd")] + auth_id: UsrPwdId(None), }; let o_config = TransportLinkUnicastConfig { diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index 9c46b55174..abffb665b7 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -32,6 +32,7 @@ use zenoh_result::{zerror, ZResult}; use crate::stats::TransportStats; use crate::{ unicast::{ + authentication::AuthId, link::{LinkUnicastWithOpenAck, TransportLinkUnicast}, transport_unicast_inner::{AddLinkResult, TransportUnicastTrait}, TransportConfigUnicast, @@ -187,6 +188,10 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { self.config.zid } + fn get_auth_ids(&self) -> Vec { + vec![] + } + fn get_whatami(&self) -> WhatAmI { self.config.whatami } diff --git a/io/zenoh-transport/src/unicast/mod.rs b/io/zenoh-transport/src/unicast/mod.rs index 1726ba2559..973d0bf09a 100644 --- a/io/zenoh-transport/src/unicast/mod.rs +++ b/io/zenoh-transport/src/unicast/mod.rs @@ -11,15 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // +pub mod authentication; pub mod establishment; pub(crate) mod link; pub(crate) mod lowlatency; pub(crate) mod manager; -pub(crate) mod transport_unicast_inner; -pub(crate) mod universal; - #[cfg(feature = "test")] pub mod test_helpers; +pub(crate) mod transport_unicast_inner; +pub(crate) mod universal; use std::{ fmt, @@ -42,6 +42,9 @@ use self::transport_unicast_inner::TransportUnicastTrait; use super::{TransportPeer, TransportPeerEventHandler}; #[cfg(feature = "shared-memory")] use crate::shm::TransportShmConfig; +use crate::unicast::authentication::AuthId; +#[cfg(feature = "auth_usrpwd")] +use crate::unicast::establishment::ext::auth::UsrPwdId; /*************************************/ /* TRANSPORT UNICAST */ @@ -58,6 +61,8 @@ pub(crate) struct TransportConfigUnicast { #[cfg(feature = "shared-memory")] pub(crate) shm: Option, pub(crate) is_lowlatency: bool, + #[cfg(feature = "auth_usrpwd")] + pub(crate) auth_id: UsrPwdId, } /// [`TransportUnicast`] is the transport handler returned @@ -117,6 +122,11 @@ impl TransportUnicast { Ok(transport.get_links()) } + pub fn get_auth_ids(&self) -> ZResult> { + let transport = self.get_inner()?; + Ok(transport.get_auth_ids()) + } + #[inline(always)] pub fn schedule(&self, message: NetworkMessage) -> ZResult<()> { let transport = self.get_inner()?; diff --git a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs index c687a6aa16..bc0c34b7e8 100644 --- a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs +++ b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs @@ -56,6 +56,7 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { fn get_whatami(&self) -> WhatAmI; fn get_callback(&self) -> Option>; fn get_links(&self) -> Vec; + fn get_auth_ids(&self) -> Vec; #[cfg(feature = "shared-memory")] fn is_shm(&self) -> bool; fn is_qos(&self) -> bool; diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index 538756f6ee..e7b0d52458 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -28,6 +28,7 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, zerror, ZResult}; +use super::super::authentication::AuthId; #[cfg(feature = "stats")] use crate::stats::TransportStats; use crate::{ @@ -381,6 +382,18 @@ impl TransportUnicastTrait for TransportUnicastUniversal { zread!(self.links).iter().map(|l| l.link.link()).collect() } + fn get_auth_ids(&self) -> Vec { + //convert link level auth ids to AuthId + #[allow(unused_mut)] + let mut auth_ids: Vec = zread!(self.links) + .iter() + .map(|l| l.link.link().auth_identifier.into()) + .collect(); + // convert usrpwd auth id to AuthId + #[cfg(feature = "auth_usrpwd")] + auth_ids.push(self.config.auth_id.clone().into()); + auth_ids + } /*************************************/ /* TX */ /*************************************/ diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index fe78ce8aed..885752e2c6 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -26,7 +26,10 @@ use zenoh_protocol::{ zenoh::{PushBody, RequestBody}, }; use zenoh_result::ZResult; -use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; +use zenoh_transport::{ + multicast::TransportMulticast, + unicast::{authentication::AuthId, TransportUnicast}, +}; use super::{ authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, @@ -37,18 +40,19 @@ pub struct AclEnforcer { enforcer: Arc, } #[derive(Clone, Debug)] -pub struct Interface { +pub struct AuthSubject { id: usize, - name: String, + name: String, //make Subject } + struct EgressAclEnforcer { policy_enforcer: Arc, - interface_list: Vec, + subject: Vec, zid: ZenohId, } struct IngressAclEnforcer { policy_enforcer: Arc, - interface_list: Vec, + subject: Vec, zid: ZenohId, } @@ -80,9 +84,29 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, transport: &TransportUnicast, ) -> (Option, Option) { + let mut authn_ids = vec![]; + if let Ok(ids) = transport.get_auth_ids() { + let enforcer = self.enforcer.clone(); + for auth_id in ids { + match auth_id { + AuthId::CertCommonName(name) => { + let subject = &Subject::CertCommonName(name.clone()); + if let Some(val) = enforcer.subject_map.get(subject) { + authn_ids.push(AuthSubject { id: *val, name }); + } + } + AuthId::Username(name) => { + let subject = &Subject::Username(name.clone()); + if let Some(val) = enforcer.subject_map.get(subject) { + authn_ids.push(AuthSubject { id: *val, name }); + } + } + AuthId::None => {} + } + } + } match transport.get_zid() { Ok(zid) => { - let mut interface_list: Vec = Vec::new(); match transport.get_links() { Ok(links) => { for link in links { @@ -90,7 +114,7 @@ impl InterceptorFactoryTrait for AclEnforcer { for face in link.interfaces { let subject = &Subject::Interface(face.clone()); if let Some(val) = enforcer.subject_map.get(subject) { - interface_list.push(Interface { + authn_ids.push(AuthSubject { id: *val, name: face, }); @@ -105,13 +129,13 @@ impl InterceptorFactoryTrait for AclEnforcer { } let ingress_interceptor = Box::new(IngressAclEnforcer { policy_enforcer: self.enforcer.clone(), - interface_list: interface_list.clone(), zid, + subject: authn_ids.clone(), }); let egress_interceptor = Box::new(EgressAclEnforcer { policy_enforcer: self.enforcer.clone(), - interface_list: interface_list.clone(), zid, + subject: authn_ids, }); match ( self.enforcer.interface_enabled.ingress, @@ -282,15 +306,15 @@ impl InterceptorTrait for EgressAclEnforcer { } pub trait AclActionMethods { fn policy_enforcer(&self) -> Arc; - fn interface_list(&self) -> Vec; fn zid(&self) -> ZenohId; fn flow(&self) -> InterceptorFlow; + fn authn_ids(&self) -> Vec; fn action(&self, action: Action, log_msg: &str, key_expr: &str) -> Permission { let policy_enforcer = self.policy_enforcer(); - let interface_list = self.interface_list(); + let authn_ids: Vec = self.authn_ids(); let zid = self.zid(); let mut decision = policy_enforcer.default_permission; - for subject in &interface_list { + for subject in &authn_ids { match policy_enforcer.policy_decision_point(subject.id, self.flow(), action, key_expr) { Ok(Permission::Allow) => { tracing::trace!( @@ -336,32 +360,28 @@ impl AclActionMethods for EgressAclEnforcer { fn policy_enforcer(&self) -> Arc { self.policy_enforcer.clone() } - - fn interface_list(&self) -> Vec { - self.interface_list.clone() - } - fn zid(&self) -> ZenohId { self.zid } fn flow(&self) -> InterceptorFlow { InterceptorFlow::Egress } + fn authn_ids(&self) -> Vec { + self.subject.clone() + } } impl AclActionMethods for IngressAclEnforcer { fn policy_enforcer(&self) -> Arc { self.policy_enforcer.clone() } - - fn interface_list(&self) -> Vec { - self.interface_list.clone() - } - fn zid(&self) -> ZenohId { self.zid } fn flow(&self) -> InterceptorFlow { InterceptorFlow::Ingress } + fn authn_ids(&self) -> Vec { + self.subject.clone() + } } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 4ff36b1ce3..78185c9405 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -177,6 +177,20 @@ impl PolicyEnforcer { ); } } + match rule.usernames { + Some(_) => (), + None => { + tracing::warn!("ACL config usernames list is empty. Applying rule #{} to all usernames", rule_offset); + rule.usernames = Some(Vec::new()); + } + } + match rule.cert_common_names { + Some(_) => (), + None => { + tracing::warn!("ACL config cert_common_names list is empty. Applying rule #{} to all certificate common names", rule_offset); + rule.cert_common_names = Some(Vec::new()); + } + } } let policy_information = self.policy_information_point(&rules)?; let subject_map = policy_information.subject_map; @@ -229,9 +243,7 @@ impl PolicyEnforcer { for config_rule in config_rule_set { // config validation let mut validation_err = String::new(); - if config_rule.interfaces.as_ref().unwrap().is_empty() { - validation_err.push_str("ACL config interfaces list is empty. "); - } + if config_rule.actions.is_empty() { validation_err.push_str("ACL config actions list is empty. "); } @@ -244,6 +256,28 @@ impl PolicyEnforcer { if !validation_err.is_empty() { bail!("{}", validation_err); } + + //for when at least one is not empty + let mut subject_validation_err: usize = 0; + validation_err = String::new(); + + if config_rule.interfaces.as_ref().unwrap().is_empty() { + subject_validation_err += 1; + validation_err.push_str("ACL config interfaces list is empty. "); + } + if config_rule.cert_common_names.as_ref().unwrap().is_empty() { + subject_validation_err += 1; + validation_err.push_str("ACL config certificate common names list is empty. "); + } + if config_rule.usernames.as_ref().unwrap().is_empty() { + subject_validation_err += 1; + validation_err.push_str("ACL config usernames list is empty. "); + } + + if subject_validation_err == 3 { + bail!("{}", validation_err); + } + for subject in config_rule.interfaces.as_ref().unwrap() { if subject.trim().is_empty() { bail!("found an empty interface value in interfaces list"); @@ -265,6 +299,48 @@ impl PolicyEnforcer { } } } + for subject in config_rule.cert_common_names.as_ref().unwrap() { + if subject.trim().is_empty() { + bail!("found an empty value in certificate common names list"); + } + for flow in config_rule.flows.as_ref().unwrap() { + for action in &config_rule.actions { + for key_expr in &config_rule.key_exprs { + if key_expr.trim().is_empty() { + bail!("found an empty key-expression value in key_exprs list"); + } + policy_rules.push(PolicyRule { + subject: Subject::CertCommonName(subject.clone()), + key_expr: key_expr.clone(), + action: *action, + permission: config_rule.permission, + flow: *flow, + }) + } + } + } + } + for subject in config_rule.usernames.as_ref().unwrap() { + if subject.trim().is_empty() { + bail!("found an empty value in usernames list"); + } + for flow in config_rule.flows.as_ref().unwrap() { + for action in &config_rule.actions { + for key_expr in &config_rule.key_exprs { + if key_expr.trim().is_empty() { + bail!("found an empty key-expression value in key_exprs list"); + } + policy_rules.push(PolicyRule { + subject: Subject::Username(subject.clone()), + key_expr: key_expr.clone(), + action: *action, + permission: config_rule.permission, + flow: *flow, + }) + } + } + } + } } let mut subject_map = SubjectMap::default(); let mut counter = 1; @@ -293,6 +369,9 @@ impl PolicyEnforcer { key_expr: &str, ) -> ZResult { let policy_map = &self.policy_map; + if policy_map.is_empty() { + return Ok(self.default_permission); + } match policy_map.get(&subject) { Some(single_policy) => { let deny_result = single_policy diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index b78a9ac888..3aed0e6541 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -47,7 +47,7 @@ mod test { async fn get_basic_router_config() -> Config { let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec!["tcp/127.0.0.1:7447".parse().unwrap()]; + config.listen.endpoints = vec!["tcp/127.0.0.1:27447".parse().unwrap()]; config.scouting.multicast.set_enabled(Some(false)).unwrap(); config } @@ -59,9 +59,9 @@ mod test { async fn get_client_sessions() -> (Session, Session) { println!("Opening client sessions"); - let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); + let config = config::client(["tcp/127.0.0.1:27447".parse::().unwrap()]); let s01 = ztimeout!(zenoh::open(config)).unwrap(); - let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); + let config = config::client(["tcp/127.0.0.1:27447".parse::().unwrap()]); let s02 = ztimeout!(zenoh::open(config)).unwrap(); (s01, s02) } diff --git a/zenoh/tests/authentication.rs b/zenoh/tests/authentication.rs new file mode 100644 index 0000000000..e4b15d5771 --- /dev/null +++ b/zenoh/tests/authentication.rs @@ -0,0 +1,1245 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +mod test { + use std::{ + fs, + path::Path, + sync::{Arc, Mutex}, + time::Duration, + }; + + use tokio::runtime::Handle; + use zenoh::{ + config, + config::{EndPoint, WhatAmI}, + prelude::*, + Config, Session, + }; + use zenoh_core::{zlock, ztimeout}; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const KEY_EXPR: &str = "test/demo"; + const VALUE: &str = "zenoh"; + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_authentication() { + zenoh_util::try_init_log_from_env(); + let path = "./tests/testfiles"; + create_new_files(path).await.unwrap(); + println!("testfiles created successfully."); + + test_pub_sub_deny_then_allow_usrpswd().await; + test_pub_sub_allow_then_deny_usrpswd().await; + test_get_qbl_allow_then_deny_usrpswd().await; + test_get_qbl_deny_then_allow_usrpswd().await; + + test_pub_sub_deny_then_allow_tls(3774).await; + test_pub_sub_allow_then_deny_tls(3775).await; + test_get_qbl_allow_then_deny_tls(3776).await; + test_get_qbl_deny_then_allow_tls(3777).await; + + test_pub_sub_deny_then_allow_quic(3774).await; + test_pub_sub_allow_then_deny_quic(3775).await; + test_get_qbl_deny_then_allow_quic(3776).await; + test_get_qbl_allow_then_deny_quic(3777).await; + + std::fs::remove_dir_all(path).unwrap(); + println!("testfiles removed successfully."); + } + + #[allow(clippy::all)] + async fn create_new_files(file_path: &str) -> std::io::Result<()> { + use std::io::prelude::*; + let ca_pem = b"-----BEGIN CERTIFICATE----- +MIIDiTCCAnGgAwIBAgIUO1x6LAlICgKs5+pYUTo4CughfKEwDQYJKoZIhvcNAQEL +BQAwVDELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G +A1UECgwIenMsIEluYy4xGDAWBgNVBAMMD3pzX3Rlc3Rfcm9vdF9jYTAeFw0yNDAz +MTExNDM0MjNaFw0yNTAzMTExNDM0MjNaMFQxCzAJBgNVBAYTAkZSMQswCQYDVQQI +DAJJRjELMAkGA1UEBwwCUFIxETAPBgNVBAoMCHpzLCBJbmMuMRgwFgYDVQQDDA96 +c190ZXN0X3Jvb3RfY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3 +pFWM+IJNsRCYHt1v/TliecppwVZV+ZHfFw9JKN9ev4K/fWHUiAOwp91MOLxbaYKd +C6dxW28YVGltoGz3kUZJZcJRQVso1jXv24Op4muOsiYXukLc4TU2F6dG1XqkLt5t +svsYAQFf1uK3//QZFVRBosJEn+jjiJ4XCvt49mnPRolp1pNKX0z31mZO6bSly6c9 +OVlJMjWpDCYSOuf6qZZ36fa9eSut2bRJIPY0QCsgnqYBTnIEhksS+3jy6Qt+QpLz +95pFdLbW/MW4XKpaDltyYkO6QrBekF6uWRlvyAHU+NqvXZ4F/3Z5l26qLuBcsLPJ +kyawkO+yNIDxORmQgMczAgMBAAGjUzBRMB0GA1UdDgQWBBThgotd9ws2ryEEaKp2 ++RMOWV8D7jAfBgNVHSMEGDAWgBThgotd9ws2ryEEaKp2+RMOWV8D7jAPBgNVHRMB +Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQA9QoPv78hGmvmqF4GZeqrOBKQB +N/H5wL7f8H6BXU/wpNo2nnWOJn3u37lT+zivAdGEv+x+GeKekcugKBCSluhBLpVb +VNXe4WwMm5FBuO2NRBN2nblTMm1kEO00nVk1/yNo4hI8mj7d4YLU62d7324osNpF +wHqu6B0/c99JeKRvODGswyff1i8rJ1jpcgk/JmHg7UQBHEIkn0cRR0f9W3Mxv6b5 +ZeowRe81neWNkC6IMiMmzA0iHGkhoUMA15qG1ZKOr1XR364LH5BfNNpzAWYwkvJs +0JFrrdw+rm+cRJWs55yiyCCs7pyg1IJkY/o8bifdCOUgIyonzffwREk3+kZR +-----END CERTIFICATE-----"; + + let client_side_pem = b"-----BEGIN CERTIFICATE----- +MIIDjDCCAnSgAwIBAgIUOi9jKILrOzfRNGIkQ48S90NehpkwDQYJKoZIhvcNAQEL +BQAwVDELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G +A1UECgwIenMsIEluYy4xGDAWBgNVBAMMD3pzX3Rlc3Rfcm9vdF9jYTAeFw0yNDAz +MTkxMTMxNDhaFw0yNTAzMTkxMTMxNDhaMFAxCzAJBgNVBAYTAkZSMQswCQYDVQQI +DAJJRjELMAkGA1UEBwwCUFIxETAPBgNVBAoMCHpzLCBJbmMuMRQwEgYDVQQDDAtj +bGllbnRfc2lkZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMzU2p1a +ly/1bi2TDZ8+Qlvk9/3KyHqrg2BGZUxB3Pj/lufDuYNwOHkss99wp8gzMsT28mD4 +y6X7nCgEN8WeHl+/xfLuGsWIBa1OOr6dz0qewoWFsor01cQ8+nwAKlgnz6IvHfkQ +OJZD/QYSdyn6c1AcIyS60vo4qMjyI4OVb1Dl4WpC4vCmWvDT0WjBZ5GckCnuQ8wS +wZ5MtPuMQf8kYX95ll7eBtDfEXF9Oja0l1/5SmlHuKyqDy4sIKovxtFHTqgb8PUc +yT33pUHOsBXruNBxl1MKq1outdMqcQknT6FAC+aVZ7bTlwhnH8p5Apn57g+dJYTI +9dCr1e2oK5NohhkCAwEAAaNaMFgwFgYDVR0RBA8wDYILY2xpZW50X3NpZGUwHQYD +VR0OBBYEFHDUYYfQacLj1tp49OG9NbPuL0N/MB8GA1UdIwQYMBaAFOGCi133Czav +IQRoqnb5Ew5ZXwPuMA0GCSqGSIb3DQEBCwUAA4IBAQB+nFAe6QyD2AaFdgrFOyEE +MeYb97sy9p5ylhMYyU62AYsIzzpTY74wBG78qYPIw3lAYzNcN0L6T6kBQ4lu6gFm +XB0SqCZ2AkwvV8tTlbLkZeoO6rONeke6c8cJsxYN7NiknDvTMrkTTgiyvbCWfEVX +Htnc4j/KzSBX3UjVcbPM3L/6KwMRw050/6RCiOIPFjTOCfTGoDx5fIyBk3ch/Plw +TkH2juHxX0/aCxr8hRE1v9+pXXlGnGoKbsDMLN9Aziu6xzdT/kD7BvyoM8rh7CE5 +ae7/R4sd13cZ2WGDPimqO0z1kItMOIdiYvk4DgOg+J8hZSkKT56erafdDa2LPBE6 +-----END CERTIFICATE-----"; + + let client_side_key = b"-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDM1NqdWpcv9W4t +kw2fPkJb5Pf9ysh6q4NgRmVMQdz4/5bnw7mDcDh5LLPfcKfIMzLE9vJg+Mul+5wo +BDfFnh5fv8Xy7hrFiAWtTjq+nc9KnsKFhbKK9NXEPPp8ACpYJ8+iLx35EDiWQ/0G +Encp+nNQHCMkutL6OKjI8iODlW9Q5eFqQuLwplrw09FowWeRnJAp7kPMEsGeTLT7 +jEH/JGF/eZZe3gbQ3xFxfTo2tJdf+UppR7isqg8uLCCqL8bRR06oG/D1HMk996VB +zrAV67jQcZdTCqtaLrXTKnEJJ0+hQAvmlWe205cIZx/KeQKZ+e4PnSWEyPXQq9Xt +qCuTaIYZAgMBAAECggEAAlqVVw7UEzLjtN4eX1S6tD3jvCzFBETdjgENF7TfjlR4 +lln9UyV6Xqkc+Y28vdwZwqHwW90sEPCc5ShUQD7+jBzi8FVcZSX4o7rVCbz8RXgg +1eI5EKf632YQflWNpwTxGcTnGCY/sjleil/yst6sDdD+9eR4OXQme2Wt8wyH8pLm +bf1OensGrFu3kJaPMOfP6jXnqEqkUPqmaCNW7+Ans8E+4J9oksRVPQJEuxwSjdJu +BlG50KKpl0XwZ/u/hkkj8/BlRDa62YMGJkFOwaaGUu2/0UU139XaJiMSPoL6t/BU +1H15dtW9liEtnHIssXMRzc9cg+xPgCs79ABXSZaFUQKBgQD4mH/DcEFwkZQcr08i +GUk0RE5arAqHui4eiujcPZVV6j/L7PHHmabKRPBlsndFP7KUCtvzNRmHq7JWDkpF +S36OE4e94CBYb0CIrO8OO5zl1vGAn5qa9ckefSFz9AMWW+hSuo185hFjt67BMaI0 +8CxfYDH+QY5D4JE5RhSwsOmiUQKBgQDS7qjq+MQKPHHTztyHK8IbAfEGlrBdCAjf +K1bDX2BdfbRJMZ+y8LgK5HxDPlNx2/VauBLsIyU1Zirepd8hOsbCVoK1fOq+T7bY +KdB1oqLK1Rq1sMBc26F24LBaZ3Pw5XgYEcvaOW0JFQ9Oc4VjcIXKjTNhobNOegfK +QDnw8fEtSQKBgQDrCuTh2GVHFZ3AcVCUoOvB60NaH4flRHcOkbARbHihvtWK7gC8 +A97bJ8tTnCWA5/TkXFAR54a36/K1wtUeJ38Evhp9wEdU1ftiPn/YKSzzcwLr5fu7 +v9/kX9MdWv0ASu2iKphUGwMeETG9oDwJaXvKwZ0DFOB59P3Z9RTi6qI7wQKBgQCp +uBZ6WgeDJPeBsaSHrpHUIU/KOV1WvaxFxR1evlNPZmG1sxQIat/rA8VoZbHGn3Ff +uVSgY/cAbGB6HYTXu+9JV0p8tTI8Ru+cJqjwvhe2lJmVL87X6HCWsluzoiIL5tcm +pssbn7E36ZYTTag6RsOgItUA7ZbUwiOafOsiD8o64QKBgE6nOkAfy5mbp7X+q9uD +J5y6IXpY/Oia/RwveLWFbI/aum4Nnhb6L9Y0XlrYjm4cJOchQyDR7FF6f4EuAiYb +wdxBbkxXpwXnfKCtNvMF/wZMvPVaS5HTQga8hXMrtlW6jtTJ4HmkTTB/MILAXVkJ +EHi+N70PcrYg6li415TGfgDz +-----END PRIVATE KEY-----"; + + let server_side_pem = b"-----BEGIN CERTIFICATE----- +MIIDjDCCAnSgAwIBAgIUOi9jKILrOzfRNGIkQ48S90NehpgwDQYJKoZIhvcNAQEL +BQAwVDELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G +A1UECgwIenMsIEluYy4xGDAWBgNVBAMMD3pzX3Rlc3Rfcm9vdF9jYTAeFw0yNDAz +MTkxMTMxMDRaFw0yNTAzMTkxMTMxMDRaMFAxCzAJBgNVBAYTAkZSMQswCQYDVQQI +DAJJRjELMAkGA1UEBwwCUFIxETAPBgNVBAoMCHpzLCBJbmMuMRQwEgYDVQQDDAtz +ZXJ2ZXJfc2lkZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKw4eKzt +T1inzuEIPBaPksWyjoD9n6uJx9jAQ2wRB6rXiAsXVLRSuczdGDpb1MwAqoIi6ozw +tzDRwkr58vUNaTCswxadlAmB44JEVYKZoublHjlVj5ygr0R4R5F2T9tIV+jpqZuK +HR4dHe8PiDCiWVzWvYwOLVKXQKSeaE2Z143ukVIJ85qmNykJ066AVhgWnIYSCR9c +s7WPBdTWAW3L4yNlast9hfvxdQNDs5AtUnJKfAX+7DylPAm8V7YjU1k9AtTNPbpy +kb9X97ErsB8891MmZaGZp0J6tnuucDkk0dlowMVvi2aUCsYoKF5DgGxtyVAeLhTP +70GenaLe2uwG8fMCAwEAAaNaMFgwFgYDVR0RBA8wDYILc2VydmVyX3NpZGUwHQYD +VR0OBBYEFBKms1sOw8nM/O5SN1EZIH+LsWaPMB8GA1UdIwQYMBaAFOGCi133Czav +IQRoqnb5Ew5ZXwPuMA0GCSqGSIb3DQEBCwUAA4IBAQA6H/sfm8YUn86+GwxNR9i9 +MCL7WHVRx3gS9ENK87+HtZNL2TVvhPJtupG3Pjgqi33FOHrM4rMUcWSZeCEycVgy +5cjimQLwfDljIBRQE6sem3gKf0obdWl5AlPDLTL/iKj5Su7NycrjZFYqkjZjn+58 +fe8lzHNeP/3RQTgjJ98lQI0bdzGDG1+QoxTgPEc77vgN0P4MHJYx2auz/7jYBqNJ +ko8nugIQsd4kOhmOIBUQ8aXkXFktSQIerEGB8uw5iF2cCdH/sTCvhzhxLb4IWo/O +0cAZ+Vs4FW3KUn/Y44yrVAWl1H6xdFsNXBqbzVEMzlt/RV3rH70RDCc20XhP+w+g +-----END CERTIFICATE-----"; + + let server_side_key = b"-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCsOHis7U9Yp87h +CDwWj5LFso6A/Z+ricfYwENsEQeq14gLF1S0UrnM3Rg6W9TMAKqCIuqM8Lcw0cJK ++fL1DWkwrMMWnZQJgeOCRFWCmaLm5R45VY+coK9EeEeRdk/bSFfo6ambih0eHR3v +D4gwollc1r2MDi1Sl0CknmhNmdeN7pFSCfOapjcpCdOugFYYFpyGEgkfXLO1jwXU +1gFty+MjZWrLfYX78XUDQ7OQLVJySnwF/uw8pTwJvFe2I1NZPQLUzT26cpG/V/ex +K7AfPPdTJmWhmadCerZ7rnA5JNHZaMDFb4tmlArGKCheQ4BsbclQHi4Uz+9Bnp2i +3trsBvHzAgMBAAECggEAUjpIS/CmkOLWYRVoczEr197QMYBnCyUm2TO7PU7IRWbR +GtKR6+MPuWPbHIoaCSlMQARhztdj8BhG1zuOKDi1/7qNDzA/rWZp9RmhZlDquamt +i5xxjEwgQuXW7fn6WO2qo5dlFtGT43vtfeYBlY7+cdhJ+iQOub9j6vWDQYHxrF7x +yM8xvNzomHThvLFzWXJV/nGjX5pqPraMmwJUW+MGX0YaEr6tClqsc1Kmxhs3iIUo +1JCqh3FpVu2i/mR9fdcQ0ONT/s1UHzy+1Bhmh3j2Fuk4+ZeLMfxTfFxk5U0BeMQY +sES3qmd+pG5iqPW+AmXy299G89jf5+1Q4J2Km5KOUQKBgQDidifoeknpi9hRHLLD +w/7KMMe8yYg3c3dv5p0iUQQ2pXd1lJIFQ+B2/D+hfOXhnN/iCDap89ll2LoQ2Q9L +38kQXH06HCM2q11RP0BEsZCG0CnluS+JVNnjs/ALi+yc4HSpzKPs3zXIC3dLOUbq +ov5Esa5h/RU6+NO+DH72TWTv6wKBgQDCryPKtOcLp1eqdwIBRoXdPZeUdZdnwT8+ +70DnC+YdOjFkqTbaoYE5ePa3ziGOZyTFhJbPgiwEdj9Ez1JSgqLLv5hBc4s6FigK +D7fOnn7Q7+al/kEW7+X5yoSl1bFuPCqGL1xxzxmpDY8Gf3nyZ+QGfWIenbk3nq12 +nTgINyWMGQKBgQDSrxBDxXl8EMGH/MYHQRGKs8UvSuMyi3bjoU4w/eSInno75qPO +yC5NJDJin9sSgar8E54fkSCBExdP01DayvC5CwLqDAFqvBTOIKU/A18tPP6tnRKv +lkQ8Bkxdwai47k07J4qeNa9IU/qA/mGOq2MZL6DHwvd8bMA5gFCh/rDYTwKBgAPm +gGASScK5Ao+evMKLyCjLkBrgVD026O542qMGYQDa5pxuq3Or4qvlGYRLM+7ncBwo +8OCNahZYzCGzyaFvjpVobEN7biGmyfyRngwcrsu+0q8mreUov0HG5etwoZJk0DFK +B58cGBaD+AaYTTgnDrF2l52naUuM+Uq0EahQeocZAoGBAMJEGUFyEdm1JATkNhBv +ruDzj07PCjdvq3lUJix2ZlKlabsi5V+oYxMmrUSU8Nkaxy6O+qETNRNWQeWbPQHL +IZx/qrP32PmWX0IVj3pbfKHQSpOKNGzL9xUJ/FIycZWyT3yGf24KBuJwIx7xSrRx +qNsoty1gY/y3n7SN/iMZo8lO +-----END PRIVATE KEY-----"; + + let credentials_txt = b"client1name:client1passwd +client2name:client2passwd"; + + let certs_dir = Path::new(file_path); + if !certs_dir.exists() { + fs::create_dir(certs_dir)?; + } + struct Testfile<'a> { + name: &'a str, + value: &'a [u8], + } + + let test_files = vec![ + Testfile { + name: "ca.pem", + value: ca_pem, + }, + Testfile { + name: "clientsidekey.pem", + value: client_side_key, + }, + Testfile { + name: "clientside.pem", + value: client_side_pem, + }, + Testfile { + name: "serversidekey.pem", + value: server_side_key, + }, + Testfile { + name: "serverside.pem", + value: server_side_pem, + }, + Testfile { + name: "credentials.txt", + value: credentials_txt, + }, + ]; + for test_file in test_files.iter() { + let file_path = certs_dir.join(test_file.name); + let mut file = fs::File::create(&file_path)?; + file.write_all(test_file.value)?; + } + + Ok(()) + } + + async fn get_basic_router_config_tls(port: u16) -> Config { + let mut config = config::default(); + config.set_mode(Some(WhatAmI::Router)).unwrap(); + config.listen.endpoints = vec![format!("tls/127.0.0.1:{}", port).parse().unwrap()]; + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "tls" + ], + "tls": { + "server_private_key": "tests/testfiles/serversidekey.pem", + "server_certificate": "tests/testfiles/serverside.pem", + "root_ca_certificate": "tests/testfiles/ca.pem", + "client_auth": true, + "server_name_verification": false + }, + }, + }"#, + ) + .unwrap(); + config + } + async fn get_basic_router_config_quic(port: u16) -> Config { + let mut config = config::default(); + config.set_mode(Some(WhatAmI::Router)).unwrap(); + config.listen.endpoints = vec![format!("quic/127.0.0.1:{}", port).parse().unwrap()]; + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "quic" + ], + "tls": { + "server_private_key": "tests/testfiles/serversidekey.pem", + "server_certificate": "tests/testfiles/serverside.pem", + "root_ca_certificate": "tests/testfiles/ca.pem", + "client_auth": true, + "server_name_verification": false + }, + }, + }"#, + ) + .unwrap(); + config + } + + async fn get_basic_router_config_usrpswd() -> Config { + let mut config = config::default(); + config.set_mode(Some(WhatAmI::Router)).unwrap(); + config.listen.endpoints = vec!["tcp/127.0.0.1:37447".parse().unwrap()]; + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config + .insert_json5( + "transport", + r#"{ + "auth": { + usrpwd: { + user: "routername", + password: "routerpasswd", + dictionary_file: "tests/testfiles/credentials.txt", + }, + }, + }"#, + ) + .unwrap(); + config + } + async fn close_router_session(s: Session) { + println!("Closing router session"); + ztimeout!(s.close()).unwrap(); + } + + async fn get_client_sessions_tls(port: u16) -> (Session, Session) { + println!("Opening client sessions"); + let mut config = config::client([format!("tls/127.0.0.1:{}", port) + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "tls" + ], + "tls": { + "root_ca_certificate": "tests/testfiles/ca.pem", + "client_private_key": "tests/testfiles/clientsidekey.pem", + "client_certificate": "tests/testfiles/clientside.pem", + "client_auth": true, + "server_name_verification": false + } + } + }"#, + ) + .unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); + let mut config = config::client([format!("tls/127.0.0.1:{}", port) + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "tls" + ], + "tls": { + "root_ca_certificate": "tests/testfiles/ca.pem", + "client_private_key": "tests/testfiles/clientsidekey.pem", + "client_certificate": "tests/testfiles/clientside.pem", + "client_auth": true, + "server_name_verification": false + } + } + }"#, + ) + .unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); + (s01, s02) + } + + async fn get_client_sessions_quic(port: u16) -> (Session, Session) { + println!("Opening client sessions"); + let mut config = config::client([format!("quic/127.0.0.1:{}", port) + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "quic" + ], + "tls": { + "root_ca_certificate": "tests/testfiles/ca.pem", + "client_private_key": "tests/testfiles/clientsidekey.pem", + "client_certificate": "tests/testfiles/clientside.pem", + "client_auth": true, + "server_name_verification": false + } + } + }"#, + ) + .unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); + let mut config = config::client([format!("quic/127.0.0.1:{}", port) + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "quic" + ], + "tls": { + "root_ca_certificate": "tests/testfiles/ca.pem", + "client_private_key": "tests/testfiles/clientsidekey.pem", + "client_certificate": "tests/testfiles/clientside.pem", + "client_auth": true, + "server_name_verification": false + } + } + }"#, + ) + .unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); + (s01, s02) + } + + async fn get_client_sessions_usrpswd() -> (Session, Session) { + println!("Opening client sessions"); + let mut config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "auth": { + usrpwd: { + user: "client1name", + password: "client1passwd", + }, + } + }"#, + ) + .unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); + let mut config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "auth": { + usrpwd: { + user: "client2name", + password: "client2passwd", + }, + } + }"#, + ) + .unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); + (s01, s02) + } + + async fn close_sessions(s01: Session, s02: Session) { + println!("Closing client sessions"); + ztimeout!(s01.close()).unwrap(); + ztimeout!(s02.close()).unwrap(); + } + + async fn test_pub_sub_deny_then_allow_tls(port: u16) { + println!("test_pub_sub_deny_then_allow_tls"); + + let mut config_router = get_basic_router_config_tls(port).await; + + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": false, + "default_permission": "deny", + "rules": [ + { + "permission": "allow", + "flows": ["ingress","egress"], + "actions": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (sub_session, pub_session) = get_client_sessions_tls(port).await; + { + let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + }) + .await + .unwrap(); + + tokio::time::sleep(SLEEP).await; + publisher.put(VALUE).await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + async fn test_pub_sub_allow_then_deny_tls(port: u16) { + println!("test_pub_sub_allow_then_deny_tls"); + let mut config_router = get_basic_router_config_tls(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "permission": "deny", + "flows": ["egress"], + "actions": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions_tls(port).await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + async fn test_get_qbl_deny_then_allow_tls(port: u16) { + println!("test_get_qbl_deny_then_allow_tls"); + + let mut config_router = get_basic_router_config_tls(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "permission": "allow", + "flows": ["egress","ingress"], + "actions": [ + "get", + "declare_queryable" + ], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_tls(port).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_eq!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + async fn test_get_qbl_allow_then_deny_tls(port: u16) { + println!("test_get_qbl_allow_then_deny_tls"); + + let mut config_router = get_basic_router_config_tls(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "permission": "deny", + "flows": ["egress"], + "actions": [ + "get", + "declare_queryable" + ], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_tls(port).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + async fn test_pub_sub_deny_then_allow_quic(port: u16) { + println!("test_pub_sub_deny_then_allow_quic"); + + let mut config_router = get_basic_router_config_quic(port).await; + + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": false, + "default_permission": "deny", + "rules": [ + { + "permission": "allow", + "flows": ["ingress","egress"], + "actions": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (sub_session, pub_session) = get_client_sessions_quic(port).await; + { + let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + }) + .await + .unwrap(); + + tokio::time::sleep(SLEEP).await; + publisher.put(VALUE).await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + #[allow(unused)] + async fn test_pub_sub_allow_then_deny_quic(port: u16) { + println!("test_pub_sub_allow_then_deny_quic"); + + let mut config_router = get_basic_router_config_quic(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "permission": "deny", + "flows": ["egress"], + "actions": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions_quic(port).await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + #[allow(unused)] + async fn test_get_qbl_deny_then_allow_quic(port: u16) { + println!("test_get_qbl_deny_then_allow_quic"); + + let mut config_router = get_basic_router_config_quic(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "permission": "allow", + "flows": ["egress","ingress"], + "actions": [ + "get", + "declare_queryable"], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_quic(port).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_eq!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + #[allow(unused)] + async fn test_get_qbl_allow_then_deny_quic(port: u16) { + println!("test_get_qbl_allow_then_deny_quic"); + + let mut config_router = get_basic_router_config_quic(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": + [ + { + "permission": "deny", + "flows": ["egress"], + "actions": [ + "get", + "declare_queryable" + ], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_quic(port).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + async fn test_pub_sub_deny_then_allow_usrpswd() { + println!("test_pub_sub_deny_then_allow_usrpswd"); + + let mut config_router = get_basic_router_config_usrpswd().await; + + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": false, + "default_permission": "deny", + "rules": [ + { + "permission": "allow", + "flows": ["ingress","egress"], + "actions": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + "usernames": [ + "client1name", + "client2name" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (sub_session, pub_session) = get_client_sessions_usrpswd().await; + { + let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + }) + .await + .unwrap(); + + tokio::time::sleep(SLEEP).await; + publisher.put(VALUE).await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + async fn test_pub_sub_allow_then_deny_usrpswd() { + println!("test_pub_sub_allow_then_deny_usrpswd"); + + let mut config_router = get_basic_router_config_usrpswd().await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "permission": "deny", + "flows": ["egress"], + "actions": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + "usernames": [ + "client1name", + "client2name" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions_usrpswd().await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + async fn test_get_qbl_deny_then_allow_usrpswd() { + println!("test_get_qbl_deny_then_allow_usrpswd"); + + let mut config_router = get_basic_router_config_usrpswd().await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "permission": "allow", + "flows": ["egress","ingress"], + "actions": [ + "get", + "declare_queryable" + ], + "key_exprs": [ + "test/demo" + ], + "usernames": [ + "client1name", + "client2name" + ] + }, + ] + }"#, + ) + .unwrap(); + + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_usrpswd().await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_eq!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + async fn test_get_qbl_allow_then_deny_usrpswd() { + println!("test_get_qbl_allow_then_deny_usrpswd"); + + let mut config_router = get_basic_router_config_usrpswd().await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "permission": "deny", + "flows": ["egress"], + "actions": [ + "get", + "declare_queryable" + ], + "key_exprs": [ + "test/demo" + ], + "usernames": [ + "client1name", + "client2name" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_usrpswd().await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } +} From 382658a1e3d117bdb264a6d6aa09ecea6f2d87be Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Fri, 31 May 2024 15:44:09 +0200 Subject: [PATCH 348/598] fix: remove `zenoh::scouting::WhatAmI` reexport (#1062) `WhatAmI` is already present in `zenoh::config`. --- examples/src/lib.rs | 6 +++--- zenoh-ext/examples/src/lib.rs | 6 +++--- zenoh/src/lib.rs | 2 -- zenohd/src/main.rs | 3 +-- 4 files changed, 7 insertions(+), 10 deletions(-) diff --git a/examples/src/lib.rs b/examples/src/lib.rs index 0be3809cf2..1ab27dfc8f 100644 --- a/examples/src/lib.rs +++ b/examples/src/lib.rs @@ -49,9 +49,9 @@ impl From<&CommonArgs> for Config { None => Config::default(), }; match value.mode { - Some(Wai::Peer) => config.set_mode(Some(zenoh::scouting::WhatAmI::Peer)), - Some(Wai::Client) => config.set_mode(Some(zenoh::scouting::WhatAmI::Client)), - Some(Wai::Router) => config.set_mode(Some(zenoh::scouting::WhatAmI::Router)), + Some(Wai::Peer) => config.set_mode(Some(zenoh::config::WhatAmI::Peer)), + Some(Wai::Client) => config.set_mode(Some(zenoh::config::WhatAmI::Client)), + Some(Wai::Router) => config.set_mode(Some(zenoh::config::WhatAmI::Router)), None => Ok(None), } .unwrap(); diff --git a/zenoh-ext/examples/src/lib.rs b/zenoh-ext/examples/src/lib.rs index 4c203d5cff..b3e675b046 100644 --- a/zenoh-ext/examples/src/lib.rs +++ b/zenoh-ext/examples/src/lib.rs @@ -43,9 +43,9 @@ impl From<&CommonArgs> for Config { None => Config::default(), }; match value.mode { - Some(Wai::Peer) => config.set_mode(Some(zenoh::scouting::WhatAmI::Peer)), - Some(Wai::Client) => config.set_mode(Some(zenoh::scouting::WhatAmI::Client)), - Some(Wai::Router) => config.set_mode(Some(zenoh::scouting::WhatAmI::Router)), + Some(Wai::Peer) => config.set_mode(Some(zenoh::config::WhatAmI::Peer)), + Some(Wai::Client) => config.set_mode(Some(zenoh::config::WhatAmI::Client)), + Some(Wai::Router) => config.set_mode(Some(zenoh::config::WhatAmI::Router)), None => Ok(None), } .unwrap(); diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c033c7feee..5f4a6c050a 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -321,8 +321,6 @@ pub mod handlers { /// Scouting primitives pub mod scouting { - /// Constants and helpers for zenoh `whatami` flags. - pub use zenoh_protocol::core::WhatAmI; /// A zenoh Hello message. pub use zenoh_protocol::scouting::Hello; diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 229352e5db..123f6fc656 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -18,9 +18,8 @@ use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilte #[cfg(feature = "loki")] use url::Url; use zenoh::{ - config::{Config, EndPoint, ModeDependentValue, PermissionsConf, ValidatedMap}, + config::{Config, EndPoint, ModeDependentValue, PermissionsConf, ValidatedMap, WhatAmI}, core::Result, - scouting::WhatAmI, }; #[cfg(feature = "loki")] From 9bcfdc78ba73957cb49fa475e9192221c75baa58 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 31 May 2024 15:52:30 +0200 Subject: [PATCH 349/598] Revert "Adding TLS authentication (#840)" (#1072) This reverts commit a448215cc60e77b50cb80ae9d1305bb250af9c7f. --- .gitignore | 2 - Cargo.lock | 113 -- commons/zenoh-config/src/lib.rs | 4 - io/zenoh-link-commons/src/lib.rs | 3 - io/zenoh-link-commons/src/unicast.rs | 68 - io/zenoh-links/zenoh-link-quic/Cargo.toml | 7 +- io/zenoh-links/zenoh-link-quic/src/unicast.rs | 50 +- .../zenoh-link-serial/src/unicast.rs | 8 +- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 5 +- io/zenoh-links/zenoh-link-tls/Cargo.toml | 2 - io/zenoh-links/zenoh-link-tls/src/unicast.rs | 93 +- io/zenoh-links/zenoh-link-udp/src/unicast.rs | 8 +- .../zenoh-link-unixpipe/src/unix/unicast.rs | 8 +- .../zenoh-link-unixsock_stream/src/unicast.rs | 6 +- .../zenoh-link-vsock/src/unicast.rs | 6 +- io/zenoh-links/zenoh-link-ws/src/unicast.rs | 6 +- .../src/unicast/authentication.rs | 43 - .../src/unicast/establishment/accept.rs | 26 +- .../src/unicast/establishment/ext/auth/mod.rs | 15 +- .../unicast/establishment/ext/auth/usrpwd.rs | 8 +- .../src/unicast/establishment/open.rs | 4 - .../src/unicast/lowlatency/transport.rs | 5 - io/zenoh-transport/src/unicast/mod.rs | 16 +- .../src/unicast/transport_unicast_inner.rs | 1 - .../src/unicast/universal/transport.rs | 13 - .../net/routing/interceptor/access_control.rs | 64 +- .../net/routing/interceptor/authorization.rs | 85 +- zenoh/tests/acl.rs | 6 +- zenoh/tests/authentication.rs | 1245 ----------------- 29 files changed, 62 insertions(+), 1858 deletions(-) delete mode 100644 io/zenoh-transport/src/unicast/authentication.rs delete mode 100644 zenoh/tests/authentication.rs diff --git a/.gitignore b/.gitignore index bf5a1656d3..105dae1aa7 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,4 @@ cargo-timing*.html -#ignore test data -testfiles ci/valgrind-check/*.log diff --git a/Cargo.lock b/Cargo.lock index 91ad98ce8c..aff6c4950a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -235,45 +235,6 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" -[[package]] -name = "asn1-rs" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" -dependencies = [ - "asn1-rs-derive", - "asn1-rs-impl", - "displaydoc", - "nom", - "num-traits", - "rusticata-macros", - "thiserror", - "time 0.3.28", -] - -[[package]] -name = "asn1-rs-derive" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.52", - "synstructure", -] - -[[package]] -name = "asn1-rs-impl" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.52", -] - [[package]] name = "async-attributes" version = "1.1.2" @@ -1043,20 +1004,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "der-parser" -version = "9.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" -dependencies = [ - "asn1-rs", - "displaydoc", - "nom", - "num-bigint", - "num-traits", - "rusticata-macros", -] - [[package]] name = "deranged" version = "0.3.8" @@ -1135,17 +1082,6 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" -[[package]] -name = "displaydoc" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.52", -] - [[package]] name = "dyn-clone" version = "1.0.13" @@ -2369,15 +2305,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "oid-registry" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" -dependencies = [ - "asn1-rs", -] - [[package]] name = "once_cell" version = "1.19.0" @@ -3222,15 +3149,6 @@ dependencies = [ "semver 1.0.18", ] -[[package]] -name = "rusticata-macros" -version = "4.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" -dependencies = [ - "nom", -] - [[package]] name = "rustix" version = "0.37.25" @@ -4033,17 +3951,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" -[[package]] -name = "synstructure" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.52", -] - [[package]] name = "system-configuration" version = "0.5.1" @@ -4166,7 +4073,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" dependencies = [ "deranged", - "itoa", "serde", "time-core", "time-macros 0.2.14", @@ -5142,23 +5048,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "x509-parser" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" -dependencies = [ - "asn1-rs", - "data-encoding", - "der-parser", - "lazy_static", - "nom", - "oid-registry", - "rusticata-macros", - "thiserror", - "time 0.3.28", -] - [[package]] name = "yasna" version = "0.5.2" @@ -5461,7 +5350,6 @@ dependencies = [ "tokio-util", "tracing", "webpki-roots", - "x509-parser", "zenoh-collections", "zenoh-config", "zenoh-core", @@ -5528,7 +5416,6 @@ dependencies = [ "tokio-util", "tracing", "webpki-roots", - "x509-parser", "zenoh-collections", "zenoh-config", "zenoh-core", diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 97c72ce579..e57660800f 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -104,8 +104,6 @@ pub struct DownsamplingItemConf { #[derive(Serialize, Debug, Deserialize, Clone)] pub struct AclConfigRules { pub interfaces: Option>, - pub cert_common_names: Option>, - pub usernames: Option>, pub key_exprs: Vec, pub actions: Vec, pub flows: Option>, @@ -126,8 +124,6 @@ pub struct PolicyRule { #[serde(rename_all = "snake_case")] pub enum Subject { Interface(String), - CertCommonName(String), - Username(String), } #[derive(Clone, Copy, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] diff --git a/io/zenoh-link-commons/src/lib.rs b/io/zenoh-link-commons/src/lib.rs index 6b2ec14c69..5a41050e94 100644 --- a/io/zenoh-link-commons/src/lib.rs +++ b/io/zenoh-link-commons/src/lib.rs @@ -50,7 +50,6 @@ pub struct Link { pub is_reliable: bool, pub is_streamed: bool, pub interfaces: Vec, - pub auth_identifier: LinkAuthId, } #[async_trait] @@ -79,7 +78,6 @@ impl From<&LinkUnicast> for Link { is_reliable: link.is_reliable(), is_streamed: link.is_streamed(), interfaces: link.get_interface_names(), - auth_identifier: link.get_auth_identifier(), } } } @@ -100,7 +98,6 @@ impl From<&LinkMulticast> for Link { is_reliable: link.is_reliable(), is_streamed: false, interfaces: vec![], - auth_identifier: LinkAuthId::default(), } } } diff --git a/io/zenoh-link-commons/src/unicast.rs b/io/zenoh-link-commons/src/unicast.rs index cd8c550503..add4c3a27b 100644 --- a/io/zenoh-link-commons/src/unicast.rs +++ b/io/zenoh-link-commons/src/unicast.rs @@ -20,7 +20,6 @@ use core::{ use std::net::SocketAddr; use async_trait::async_trait; -use serde::Serialize; use zenoh_protocol::{ core::{EndPoint, Locator}, transport::BatchSize, @@ -52,7 +51,6 @@ pub trait LinkUnicastTrait: Send + Sync { fn is_reliable(&self) -> bool; fn is_streamed(&self) -> bool; fn get_interface_names(&self) -> Vec; - fn get_auth_identifier(&self) -> LinkAuthId; async fn write(&self, buffer: &[u8]) -> ZResult; async fn write_all(&self, buffer: &[u8]) -> ZResult<()>; async fn read(&self, buffer: &mut [u8]) -> ZResult; @@ -120,69 +118,3 @@ pub fn get_ip_interface_names(addr: &SocketAddr) -> Vec { } } } -#[derive(Clone, Debug, Serialize, Hash, PartialEq, Eq)] - -pub enum LinkAuthType { - Tls, - Quic, - None, -} -#[derive(Clone, Debug, Serialize, Hash, PartialEq, Eq)] - -pub struct LinkAuthId { - auth_type: LinkAuthType, - auth_value: Option, -} - -impl LinkAuthId { - pub fn get_type(&self) -> &LinkAuthType { - &self.auth_type - } - pub fn get_value(&self) -> &Option { - &self.auth_value - } -} -impl Default for LinkAuthId { - fn default() -> Self { - LinkAuthId { - auth_type: LinkAuthType::None, - auth_value: None, - } - } -} - -#[derive(Debug)] -pub struct LinkAuthIdBuilder { - pub auth_type: LinkAuthType, //HAS to be provided when building - pub auth_value: Option, //actual value added to the above type; is None for None type -} -impl Default for LinkAuthIdBuilder { - fn default() -> Self { - Self::new() - } -} - -impl LinkAuthIdBuilder { - pub fn new() -> LinkAuthIdBuilder { - LinkAuthIdBuilder { - auth_type: LinkAuthType::None, - auth_value: None, - } - } - - pub fn auth_type(&mut self, auth_type: LinkAuthType) -> &mut Self { - self.auth_type = auth_type; - self - } - pub fn auth_value(&mut self, auth_value: Option) -> &mut Self { - self.auth_value = auth_value; - self - } - - pub fn build(&self) -> LinkAuthId { - LinkAuthId { - auth_type: self.auth_type.clone(), - auth_value: self.auth_value.clone(), - } - } -} diff --git a/io/zenoh-links/zenoh-link-quic/Cargo.toml b/io/zenoh-links/zenoh-link-quic/Cargo.toml index e10eed71a1..63bfc1f839 100644 --- a/io/zenoh-links/zenoh-link-quic/Cargo.toml +++ b/io/zenoh-links/zenoh-link-quic/Cargo.toml @@ -30,14 +30,13 @@ base64 = { workspace = true } futures = { workspace = true } quinn = { workspace = true } rustls-native-certs = { workspace = true } -rustls-pki-types = { workspace = true } +rustls-pki-types = { workspace = true } rustls-webpki = { workspace = true } - secrecy = { workspace = true } tokio = { workspace = true, features = [ + "fs", "io-util", "net", - "fs", "sync", "time", ] } @@ -57,5 +56,3 @@ zenoh-util = { workspace = true } rustls = { version = "0.21", features = ["dangerous_configuration", "quic"] } tokio-rustls = "0.24.1" rustls-pemfile = { version = "1" } - -x509-parser = "0.16.0" diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index cd9cad071f..a3b2687b6f 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -22,11 +22,10 @@ use std::{ use async_trait::async_trait; use tokio::sync::Mutex as AsyncMutex; use tokio_util::sync::CancellationToken; -use x509_parser::prelude::*; use zenoh_core::zasynclock; use zenoh_link_commons::{ - get_ip_interface_names, LinkAuthId, LinkAuthIdBuilder, LinkAuthType, LinkManagerUnicastTrait, - LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, + get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, + ListenersUnicastIP, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -47,7 +46,6 @@ pub struct LinkUnicastQuic { dst_locator: Locator, send: AsyncMutex, recv: AsyncMutex, - auth_identifier: LinkAuthId, } impl LinkUnicastQuic { @@ -57,7 +55,6 @@ impl LinkUnicastQuic { dst_locator: Locator, send: quinn::SendStream, recv: quinn::RecvStream, - auth_identifier: LinkAuthId, ) -> LinkUnicastQuic { // Build the Quic object LinkUnicastQuic { @@ -67,7 +64,6 @@ impl LinkUnicastQuic { dst_locator, send: AsyncMutex::new(send), recv: AsyncMutex::new(recv), - auth_identifier, } } } @@ -160,10 +156,6 @@ impl LinkUnicastTrait for LinkUnicastQuic { fn is_streamed(&self) -> bool { true } - #[inline(always)] - fn get_auth_identifier(&self) -> LinkAuthId { - self.auth_identifier.clone() - } } impl Drop for LinkUnicastQuic { @@ -262,7 +254,6 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { .open_bi() .await .map_err(|e| zerror!("Can not create a new QUIC link bound to {}: {}", host, e))?; - let auth_id = get_cert_common_name(quic_conn.clone())?; let link = Arc::new(LinkUnicastQuic::new( quic_conn, @@ -270,7 +261,6 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { endpoint.into(), send, recv, - auth_id.into(), )); Ok(LinkUnicast(link)) @@ -398,15 +388,12 @@ async fn accept_task( let dst_addr = quic_conn.remote_address(); tracing::debug!("Accepted QUIC connection on {:?}: {:?}", src_addr, dst_addr); // Create the new link object - let auth_id = get_cert_common_name(quic_conn.clone())?; - let link = Arc::new(LinkUnicastQuic::new( quic_conn, src_addr, Locator::new(QUIC_LOCATOR_PREFIX, dst_addr.to_string(), "")?, send, recv, - auth_id.into() )); // Communicate the new link to the initial transport manager @@ -431,36 +418,3 @@ async fn accept_task( } Ok(()) } - -fn get_cert_common_name(conn: quinn::Connection) -> ZResult { - let mut auth_id = QuicAuthId { auth_value: None }; - if let Some(pi) = conn.peer_identity() { - let serv_certs = pi.downcast::>().unwrap(); - if let Some(item) = serv_certs.iter().next() { - let (_, cert) = X509Certificate::from_der(item.as_ref()).unwrap(); - let subject_name = cert - .subject - .iter_common_name() - .next() - .and_then(|cn| cn.as_str().ok()) - .unwrap(); - auth_id = QuicAuthId { - auth_value: Some(subject_name.to_string()), - }; - } - } - Ok(auth_id) -} - -#[derive(Debug, Clone)] -struct QuicAuthId { - auth_value: Option, -} -impl From for LinkAuthId { - fn from(value: QuicAuthId) -> Self { - LinkAuthIdBuilder::new() - .auth_type(LinkAuthType::Quic) - .auth_value(value.auth_value.clone()) - .build() - } -} diff --git a/io/zenoh-links/zenoh-link-serial/src/unicast.rs b/io/zenoh-links/zenoh-link-serial/src/unicast.rs index 31213f5c43..ca4efacdc6 100644 --- a/io/zenoh-links/zenoh-link-serial/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-serial/src/unicast.rs @@ -32,8 +32,8 @@ use tokio_util::sync::CancellationToken; use z_serial::ZSerial; use zenoh_core::{zasynclock, zasyncread, zasyncwrite}; use zenoh_link_commons::{ - ConstructibleLinkManagerUnicast, LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, - LinkUnicastTrait, NewLinkChannelSender, + ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, + NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -212,10 +212,6 @@ impl LinkUnicastTrait for LinkUnicastSerial { fn is_streamed(&self) -> bool { false } - #[inline(always)] - fn get_auth_identifier(&self) -> LinkAuthId { - LinkAuthId::default() - } } impl fmt::Display for LinkUnicastSerial { diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index bf2e66c863..79812c526e 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -20,7 +20,7 @@ use tokio::{ }; use tokio_util::sync::CancellationToken; use zenoh_link_commons::{ - get_ip_interface_names, LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, + get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; use zenoh_protocol::{ @@ -164,9 +164,6 @@ impl LinkUnicastTrait for LinkUnicastTcp { fn is_streamed(&self) -> bool { true } - fn get_auth_identifier(&self) -> LinkAuthId { - LinkAuthId::default() - } } // // WARN: This sometimes causes timeout in routing test diff --git a/io/zenoh-links/zenoh-link-tls/Cargo.toml b/io/zenoh-links/zenoh-link-tls/Cargo.toml index 00f7207bb0..3025e3d7d7 100644 --- a/io/zenoh-links/zenoh-link-tls/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tls/Cargo.toml @@ -47,5 +47,3 @@ zenoh-result = { workspace = true } zenoh-runtime = { workspace = true } zenoh-sync = { workspace = true } zenoh-util = { workspace = true } - -x509-parser = "0.16.0" diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 2e40f23dae..1ced1a26b1 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -21,12 +21,10 @@ use tokio::{ }; use tokio_rustls::{TlsAcceptor, TlsConnector, TlsStream}; use tokio_util::sync::CancellationToken; -//use webpki::anchor_from_trusted_cert; -use x509_parser::prelude::*; use zenoh_core::zasynclock; use zenoh_link_commons::{ - get_ip_interface_names, LinkAuthId, LinkAuthIdBuilder, LinkAuthType, LinkManagerUnicastTrait, - LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, + get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, + ListenersUnicastIP, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -39,10 +37,6 @@ use crate::{ TLS_ACCEPT_THROTTLE_TIME, TLS_DEFAULT_MTU, TLS_LINGER_TIMEOUT, TLS_LOCATOR_PREFIX, }; -#[derive(Default, Debug, PartialEq, Eq, Hash)] -pub struct TlsCommonName(String); - -//impl pub struct LinkUnicastTls { // The underlying socket as returned from the async-rustls library // NOTE: TlsStream requires &mut for read and write operations. This means @@ -62,7 +56,6 @@ pub struct LinkUnicastTls { // Make sure there are no concurrent read or writes write_mtx: AsyncMutex<()>, read_mtx: AsyncMutex<()>, - auth_identifier: LinkAuthId, } unsafe impl Send for LinkUnicastTls {} @@ -73,7 +66,6 @@ impl LinkUnicastTls { socket: TlsStream, src_addr: SocketAddr, dst_addr: SocketAddr, - auth_identifier: LinkAuthId, ) -> LinkUnicastTls { let (tcp_stream, _) = socket.get_ref(); // Set the TLS nodelay option @@ -107,7 +99,6 @@ impl LinkUnicastTls { dst_locator: Locator::new(TLS_LOCATOR_PREFIX, dst_addr.to_string(), "").unwrap(), write_mtx: AsyncMutex::new(()), read_mtx: AsyncMutex::new(()), - auth_identifier, } } @@ -198,10 +189,6 @@ impl LinkUnicastTrait for LinkUnicastTls { fn is_streamed(&self) -> bool { true } - #[inline(always)] - fn get_auth_identifier(&self) -> LinkAuthId { - self.auth_identifier.clone() - } } impl Drop for LinkUnicastTls { @@ -295,19 +282,9 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastTls { e ) })?; - - let (_, tls_conn) = tls_stream.get_ref(); - - let auth_identifier = get_server_cert_common_name(tls_conn)?; - let tls_stream = TlsStream::Client(tls_stream); - let link = Arc::new(LinkUnicastTls::new( - tls_stream, - src_addr, - dst_addr, - auth_identifier.into(), - )); + let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); Ok(LinkUnicast(link)) } @@ -407,16 +384,8 @@ async fn accept_task( }; tracing::debug!("Accepted TLS connection on {:?}: {:?}", src_addr, dst_addr); - let (_, tls_conn) = tls_stream.get_ref(); - let auth_identifier = get_client_cert_common_name(tls_conn)?; - tracing::debug!("Accepted TLS connection on {:?}: {:?}", src_addr, dst_addr); - // Create the new link object - let link = Arc::new(LinkUnicastTls::new( - tls_stream, - src_addr, - dst_addr, - auth_identifier.into(), - )); + // Create the new link object + let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); // Communicate the new link to the initial transport manager if let Err(e) = manager.send_async(LinkUnicast(link)).await { @@ -440,55 +409,3 @@ async fn accept_task( Ok(()) } - -fn get_client_cert_common_name(tls_conn: &rustls::CommonState) -> ZResult { - if let Some(serv_certs) = tls_conn.peer_certificates() { - let (_, cert) = X509Certificate::from_der(serv_certs[0].as_ref())?; - let subject_name = &cert - .subject - .iter_common_name() - .next() - .and_then(|cn| cn.as_str().ok()) - .unwrap(); - - Ok(TlsAuthId { - auth_value: Some(subject_name.to_string()), - }) - } else { - Ok(TlsAuthId { auth_value: None }) - } -} - -fn get_server_cert_common_name(tls_conn: &rustls::ClientConnection) -> ZResult { - let serv_certs = tls_conn.peer_certificates().unwrap(); - let mut auth_id = TlsAuthId { auth_value: None }; - - //need the first certificate in the chain os no need for looping - if let Some(item) = serv_certs.iter().next() { - let (_, cert) = X509Certificate::from_der(item.as_ref())?; - let subject_name = &cert - .subject - .iter_common_name() - .next() - .and_then(|cn| cn.as_str().ok()) - .unwrap(); - - auth_id = TlsAuthId { - auth_value: Some(subject_name.to_string()), - }; - return Ok(auth_id); - } - Ok(auth_id) -} - -struct TlsAuthId { - auth_value: Option, -} -impl From for LinkAuthId { - fn from(value: TlsAuthId) -> Self { - LinkAuthIdBuilder::new() - .auth_type(LinkAuthType::Tls) - .auth_value(value.auth_value.clone()) - .build() - } -} diff --git a/io/zenoh-links/zenoh-link-udp/src/unicast.rs b/io/zenoh-links/zenoh-link-udp/src/unicast.rs index 760ed2209c..79f980ca96 100644 --- a/io/zenoh-links/zenoh-link-udp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/unicast.rs @@ -24,8 +24,8 @@ use tokio::{net::UdpSocket, sync::Mutex as AsyncMutex}; use tokio_util::sync::CancellationToken; use zenoh_core::{zasynclock, zlock}; use zenoh_link_commons::{ - get_ip_interface_names, ConstructibleLinkManagerUnicast, LinkAuthId, LinkManagerUnicastTrait, - LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, + get_ip_interface_names, ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, + LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -224,10 +224,6 @@ impl LinkUnicastTrait for LinkUnicastUdp { fn is_streamed(&self) -> bool { false } - #[inline(always)] - fn get_auth_identifier(&self) -> LinkAuthId { - LinkAuthId::default() - } } impl fmt::Display for LinkUnicastUdp { diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index 7dea524ca1..1b30ceb553 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -36,8 +36,8 @@ use tokio_util::sync::CancellationToken; use unix_named_pipe::{create, open_write}; use zenoh_core::{zasyncread, zasyncwrite, ResolveFuture, Wait}; use zenoh_link_commons::{ - ConstructibleLinkManagerUnicast, LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, - LinkUnicastTrait, NewLinkChannelSender, + ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, + NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -525,10 +525,6 @@ impl LinkUnicastTrait for UnicastPipe { fn is_streamed(&self) -> bool { true } - #[inline(always)] - fn get_auth_identifier(&self) -> LinkAuthId { - LinkAuthId::default() - } } impl fmt::Display for UnicastPipe { diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index 7adbb3ab30..cc7147c9e0 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -27,7 +27,7 @@ use tokio_util::sync::CancellationToken; use uuid::Uuid; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ - LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, + LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -143,10 +143,6 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { fn is_streamed(&self) -> bool { true } - #[inline(always)] - fn get_auth_identifier(&self) -> LinkAuthId { - LinkAuthId::default() - } } impl Drop for LinkUnicastUnixSocketStream { diff --git a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs index 32b292ca7e..605f114173 100644 --- a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs @@ -28,7 +28,7 @@ use tokio_vsock::{ }; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ - LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, + LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::{ core::{endpoint::Address, EndPoint, Locator}, @@ -189,10 +189,6 @@ impl LinkUnicastTrait for LinkUnicastVsock { fn is_streamed(&self) -> bool { true } - #[inline(always)] - fn get_auth_identifier(&self) -> LinkAuthId { - LinkAuthId::default() - } } impl fmt::Display for LinkUnicastVsock { diff --git a/io/zenoh-links/zenoh-link-ws/src/unicast.rs b/io/zenoh-links/zenoh-link-ws/src/unicast.rs index 336e8af975..b671bf67f2 100644 --- a/io/zenoh-links/zenoh-link-ws/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-ws/src/unicast.rs @@ -34,7 +34,7 @@ use tokio_tungstenite::{accept_async, tungstenite::Message, MaybeTlsStream, WebS use tokio_util::sync::CancellationToken; use zenoh_core::{zasynclock, zasyncread, zasyncwrite}; use zenoh_link_commons::{ - LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, + LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -226,10 +226,6 @@ impl LinkUnicastTrait for LinkUnicastWs { fn is_streamed(&self) -> bool { false } - #[inline(always)] - fn get_auth_identifier(&self) -> LinkAuthId { - LinkAuthId::default() - } } impl Drop for LinkUnicastWs { diff --git a/io/zenoh-transport/src/unicast/authentication.rs b/io/zenoh-transport/src/unicast/authentication.rs deleted file mode 100644 index b66289983e..0000000000 --- a/io/zenoh-transport/src/unicast/authentication.rs +++ /dev/null @@ -1,43 +0,0 @@ -use zenoh_link::{LinkAuthId, LinkAuthType}; - -#[cfg(feature = "auth_usrpwd")] -use super::establishment::ext::auth::UsrPwdId; - -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum AuthId { - CertCommonName(String), - Username(String), - None, -} - -impl From for AuthId { - fn from(lid: LinkAuthId) -> Self { - match (lid.get_type(), lid.get_value()) { - (LinkAuthType::Tls | LinkAuthType::Quic, Some(auth_value)) => { - AuthId::CertCommonName(auth_value.clone()) - } - _ => AuthId::None, - } - } -} - -#[cfg(feature = "auth_usrpwd")] -impl From for AuthId { - fn from(user_password_id: UsrPwdId) -> Self { - // pub(crate) struct UsrPwdId(pub Option>); - match user_password_id.0 { - Some(username) => { - //do something - //convert username from vecu8 to string - match std::str::from_utf8(&username) { - Ok(name) => AuthId::Username(name.to_owned()), - Err(e) => { - tracing::error!("Error in extracting username {}", e); - AuthId::None - } - } - } - None => AuthId::None, - } - } -} diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index 9a7151252d..d074ea9642 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -31,8 +31,6 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; -#[cfg(feature = "auth_usrpwd")] -use super::ext::auth::UsrPwdId; #[cfg(feature = "shared-memory")] use super::ext::shm::AuthSegment; #[cfg(feature = "shared-memory")] @@ -113,8 +111,6 @@ struct RecvOpenSynOut { other_whatami: WhatAmI, other_lease: Duration, other_initial_sn: TransportSn, - #[cfg(feature = "auth_usrpwd")] - other_auth_id: UsrPwdId, } // OpenAck @@ -490,18 +486,11 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { } // Extension Auth - #[allow(unused_mut, unused_assignments)] - #[cfg(feature = "auth_usrpwd")] - let mut user_password_id = UsrPwdId(None); - - #[cfg(feature = "auth_usrpwd")] - { - user_password_id = self - .ext_auth - .recv_open_syn((&mut state.link.ext_auth, open_syn.ext_auth)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; - } + #[cfg(feature = "transport_auth")] + self.ext_auth + .recv_open_syn((&mut state.link.ext_auth, open_syn.ext_auth)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension MultiLink #[cfg(feature = "transport_multilink")] @@ -528,8 +517,6 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { other_whatami: cookie.whatami, other_lease: open_syn.lease, other_initial_sn: open_syn.initial_sn, - #[cfg(feature = "auth_usrpwd")] - other_auth_id: user_password_id, }; Ok((state, output)) } @@ -724,6 +711,7 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - cookie_nonce: iack_out.cookie_nonce, }; let (mut state, osyn_out) = step!(fsm.recv_open_syn(osyn_in).await); + // Create the OpenAck but not send it yet let oack_in = SendOpenAckIn { mine_zid: manager.config.zid, @@ -747,8 +735,6 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - false => None, }, is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), - #[cfg(feature = "auth_usrpwd")] - auth_id: osyn_out.other_auth_id, }; let a_config = TransportLinkUnicastConfig { diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs index 0bc46c6edc..8d57434bc3 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs @@ -571,12 +571,7 @@ impl<'a> AcceptFsm for &'a AuthFsm<'a> { } type RecvOpenSynIn = (&'a mut StateAccept, Option); - - #[cfg(not(feature = "auth_usrpwd"))] type RecvOpenSynOut = (); - #[cfg(feature = "auth_usrpwd")] - type RecvOpenSynOut = UsrPwdId; - async fn recv_open_syn( self, input: Self::RecvOpenSynIn, @@ -609,17 +604,13 @@ impl<'a> AcceptFsm for &'a AuthFsm<'a> { match (self.usrpwd.as_ref(), state.usrpwd.as_mut()) { (Some(e), Some(s)) => { let x = ztake!(exts, id::USRPWD); - let username = e.recv_open_syn((s, ztryinto!(x, S))).await?; - let user_passwd_id = UsrPwdId(Some(username)); - return Ok(user_passwd_id); - } - (None, None) => { - return Ok(UsrPwdId(None)); + e.recv_open_syn((s, ztryinto!(x, S))).await?; } + (None, None) => {} _ => bail!("{S} Invalid UsrPwd configuration."), } } - #[cfg(not(feature = "auth_usrpwd"))] + Ok(()) } diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs index 22d7a86817..be24337fad 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs @@ -162,8 +162,6 @@ impl StateOpen { pub(crate) struct StateAccept { nonce: u64, } -#[derive(Clone, Debug, PartialEq, Eq)] -pub(crate) struct UsrPwdId(pub Option>); impl StateAccept { pub(crate) fn new(prng: &mut R) -> Self @@ -408,7 +406,7 @@ impl<'a> AcceptFsm for &'a AuthUsrPwdFsm<'a> { } type RecvOpenSynIn = (&'a mut StateAccept, Option); - type RecvOpenSynOut = Vec; //value of userid is returned if recvopensynout is processed as valid + type RecvOpenSynOut = (); async fn recv_open_syn( self, input: Self::RecvOpenSynIn, @@ -438,8 +436,8 @@ impl<'a> AcceptFsm for &'a AuthUsrPwdFsm<'a> { if hmac != open_syn.hmac { bail!("{S} Invalid password."); } - let username = open_syn.user.to_owned(); - Ok(username) + + Ok(()) } type SendOpenAckIn = &'a StateAccept; diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index 2d50d465bf..49c57d9e9a 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -32,8 +32,6 @@ use zenoh_result::ZResult; use super::ext::shm::AuthSegment; #[cfg(feature = "shared-memory")] use crate::shm::TransportShmConfig; -#[cfg(feature = "auth_usrpwd")] -use crate::unicast::establishment::ext::auth::UsrPwdId; use crate::{ common::batch::BatchConfig, unicast::{ @@ -646,8 +644,6 @@ pub(crate) async fn open_link( false => None, }, is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), - #[cfg(feature = "auth_usrpwd")] - auth_id: UsrPwdId(None), }; let o_config = TransportLinkUnicastConfig { diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index abffb665b7..9c46b55174 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -32,7 +32,6 @@ use zenoh_result::{zerror, ZResult}; use crate::stats::TransportStats; use crate::{ unicast::{ - authentication::AuthId, link::{LinkUnicastWithOpenAck, TransportLinkUnicast}, transport_unicast_inner::{AddLinkResult, TransportUnicastTrait}, TransportConfigUnicast, @@ -188,10 +187,6 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { self.config.zid } - fn get_auth_ids(&self) -> Vec { - vec![] - } - fn get_whatami(&self) -> WhatAmI { self.config.whatami } diff --git a/io/zenoh-transport/src/unicast/mod.rs b/io/zenoh-transport/src/unicast/mod.rs index 973d0bf09a..1726ba2559 100644 --- a/io/zenoh-transport/src/unicast/mod.rs +++ b/io/zenoh-transport/src/unicast/mod.rs @@ -11,16 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -pub mod authentication; pub mod establishment; pub(crate) mod link; pub(crate) mod lowlatency; pub(crate) mod manager; -#[cfg(feature = "test")] -pub mod test_helpers; pub(crate) mod transport_unicast_inner; pub(crate) mod universal; +#[cfg(feature = "test")] +pub mod test_helpers; + use std::{ fmt, sync::{Arc, Weak}, @@ -42,9 +42,6 @@ use self::transport_unicast_inner::TransportUnicastTrait; use super::{TransportPeer, TransportPeerEventHandler}; #[cfg(feature = "shared-memory")] use crate::shm::TransportShmConfig; -use crate::unicast::authentication::AuthId; -#[cfg(feature = "auth_usrpwd")] -use crate::unicast::establishment::ext::auth::UsrPwdId; /*************************************/ /* TRANSPORT UNICAST */ @@ -61,8 +58,6 @@ pub(crate) struct TransportConfigUnicast { #[cfg(feature = "shared-memory")] pub(crate) shm: Option, pub(crate) is_lowlatency: bool, - #[cfg(feature = "auth_usrpwd")] - pub(crate) auth_id: UsrPwdId, } /// [`TransportUnicast`] is the transport handler returned @@ -122,11 +117,6 @@ impl TransportUnicast { Ok(transport.get_links()) } - pub fn get_auth_ids(&self) -> ZResult> { - let transport = self.get_inner()?; - Ok(transport.get_auth_ids()) - } - #[inline(always)] pub fn schedule(&self, message: NetworkMessage) -> ZResult<()> { let transport = self.get_inner()?; diff --git a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs index bc0c34b7e8..c687a6aa16 100644 --- a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs +++ b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs @@ -56,7 +56,6 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { fn get_whatami(&self) -> WhatAmI; fn get_callback(&self) -> Option>; fn get_links(&self) -> Vec; - fn get_auth_ids(&self) -> Vec; #[cfg(feature = "shared-memory")] fn is_shm(&self) -> bool; fn is_qos(&self) -> bool; diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index e7b0d52458..538756f6ee 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -28,7 +28,6 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, zerror, ZResult}; -use super::super::authentication::AuthId; #[cfg(feature = "stats")] use crate::stats::TransportStats; use crate::{ @@ -382,18 +381,6 @@ impl TransportUnicastTrait for TransportUnicastUniversal { zread!(self.links).iter().map(|l| l.link.link()).collect() } - fn get_auth_ids(&self) -> Vec { - //convert link level auth ids to AuthId - #[allow(unused_mut)] - let mut auth_ids: Vec = zread!(self.links) - .iter() - .map(|l| l.link.link().auth_identifier.into()) - .collect(); - // convert usrpwd auth id to AuthId - #[cfg(feature = "auth_usrpwd")] - auth_ids.push(self.config.auth_id.clone().into()); - auth_ids - } /*************************************/ /* TX */ /*************************************/ diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 885752e2c6..fe78ce8aed 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -26,10 +26,7 @@ use zenoh_protocol::{ zenoh::{PushBody, RequestBody}, }; use zenoh_result::ZResult; -use zenoh_transport::{ - multicast::TransportMulticast, - unicast::{authentication::AuthId, TransportUnicast}, -}; +use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; use super::{ authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, @@ -40,19 +37,18 @@ pub struct AclEnforcer { enforcer: Arc, } #[derive(Clone, Debug)] -pub struct AuthSubject { +pub struct Interface { id: usize, - name: String, //make Subject + name: String, } - struct EgressAclEnforcer { policy_enforcer: Arc, - subject: Vec, + interface_list: Vec, zid: ZenohId, } struct IngressAclEnforcer { policy_enforcer: Arc, - subject: Vec, + interface_list: Vec, zid: ZenohId, } @@ -84,29 +80,9 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, transport: &TransportUnicast, ) -> (Option, Option) { - let mut authn_ids = vec![]; - if let Ok(ids) = transport.get_auth_ids() { - let enforcer = self.enforcer.clone(); - for auth_id in ids { - match auth_id { - AuthId::CertCommonName(name) => { - let subject = &Subject::CertCommonName(name.clone()); - if let Some(val) = enforcer.subject_map.get(subject) { - authn_ids.push(AuthSubject { id: *val, name }); - } - } - AuthId::Username(name) => { - let subject = &Subject::Username(name.clone()); - if let Some(val) = enforcer.subject_map.get(subject) { - authn_ids.push(AuthSubject { id: *val, name }); - } - } - AuthId::None => {} - } - } - } match transport.get_zid() { Ok(zid) => { + let mut interface_list: Vec = Vec::new(); match transport.get_links() { Ok(links) => { for link in links { @@ -114,7 +90,7 @@ impl InterceptorFactoryTrait for AclEnforcer { for face in link.interfaces { let subject = &Subject::Interface(face.clone()); if let Some(val) = enforcer.subject_map.get(subject) { - authn_ids.push(AuthSubject { + interface_list.push(Interface { id: *val, name: face, }); @@ -129,13 +105,13 @@ impl InterceptorFactoryTrait for AclEnforcer { } let ingress_interceptor = Box::new(IngressAclEnforcer { policy_enforcer: self.enforcer.clone(), + interface_list: interface_list.clone(), zid, - subject: authn_ids.clone(), }); let egress_interceptor = Box::new(EgressAclEnforcer { policy_enforcer: self.enforcer.clone(), + interface_list: interface_list.clone(), zid, - subject: authn_ids, }); match ( self.enforcer.interface_enabled.ingress, @@ -306,15 +282,15 @@ impl InterceptorTrait for EgressAclEnforcer { } pub trait AclActionMethods { fn policy_enforcer(&self) -> Arc; + fn interface_list(&self) -> Vec; fn zid(&self) -> ZenohId; fn flow(&self) -> InterceptorFlow; - fn authn_ids(&self) -> Vec; fn action(&self, action: Action, log_msg: &str, key_expr: &str) -> Permission { let policy_enforcer = self.policy_enforcer(); - let authn_ids: Vec = self.authn_ids(); + let interface_list = self.interface_list(); let zid = self.zid(); let mut decision = policy_enforcer.default_permission; - for subject in &authn_ids { + for subject in &interface_list { match policy_enforcer.policy_decision_point(subject.id, self.flow(), action, key_expr) { Ok(Permission::Allow) => { tracing::trace!( @@ -360,28 +336,32 @@ impl AclActionMethods for EgressAclEnforcer { fn policy_enforcer(&self) -> Arc { self.policy_enforcer.clone() } + + fn interface_list(&self) -> Vec { + self.interface_list.clone() + } + fn zid(&self) -> ZenohId { self.zid } fn flow(&self) -> InterceptorFlow { InterceptorFlow::Egress } - fn authn_ids(&self) -> Vec { - self.subject.clone() - } } impl AclActionMethods for IngressAclEnforcer { fn policy_enforcer(&self) -> Arc { self.policy_enforcer.clone() } + + fn interface_list(&self) -> Vec { + self.interface_list.clone() + } + fn zid(&self) -> ZenohId { self.zid } fn flow(&self) -> InterceptorFlow { InterceptorFlow::Ingress } - fn authn_ids(&self) -> Vec { - self.subject.clone() - } } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 78185c9405..4ff36b1ce3 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -177,20 +177,6 @@ impl PolicyEnforcer { ); } } - match rule.usernames { - Some(_) => (), - None => { - tracing::warn!("ACL config usernames list is empty. Applying rule #{} to all usernames", rule_offset); - rule.usernames = Some(Vec::new()); - } - } - match rule.cert_common_names { - Some(_) => (), - None => { - tracing::warn!("ACL config cert_common_names list is empty. Applying rule #{} to all certificate common names", rule_offset); - rule.cert_common_names = Some(Vec::new()); - } - } } let policy_information = self.policy_information_point(&rules)?; let subject_map = policy_information.subject_map; @@ -243,7 +229,9 @@ impl PolicyEnforcer { for config_rule in config_rule_set { // config validation let mut validation_err = String::new(); - + if config_rule.interfaces.as_ref().unwrap().is_empty() { + validation_err.push_str("ACL config interfaces list is empty. "); + } if config_rule.actions.is_empty() { validation_err.push_str("ACL config actions list is empty. "); } @@ -256,28 +244,6 @@ impl PolicyEnforcer { if !validation_err.is_empty() { bail!("{}", validation_err); } - - //for when at least one is not empty - let mut subject_validation_err: usize = 0; - validation_err = String::new(); - - if config_rule.interfaces.as_ref().unwrap().is_empty() { - subject_validation_err += 1; - validation_err.push_str("ACL config interfaces list is empty. "); - } - if config_rule.cert_common_names.as_ref().unwrap().is_empty() { - subject_validation_err += 1; - validation_err.push_str("ACL config certificate common names list is empty. "); - } - if config_rule.usernames.as_ref().unwrap().is_empty() { - subject_validation_err += 1; - validation_err.push_str("ACL config usernames list is empty. "); - } - - if subject_validation_err == 3 { - bail!("{}", validation_err); - } - for subject in config_rule.interfaces.as_ref().unwrap() { if subject.trim().is_empty() { bail!("found an empty interface value in interfaces list"); @@ -299,48 +265,6 @@ impl PolicyEnforcer { } } } - for subject in config_rule.cert_common_names.as_ref().unwrap() { - if subject.trim().is_empty() { - bail!("found an empty value in certificate common names list"); - } - for flow in config_rule.flows.as_ref().unwrap() { - for action in &config_rule.actions { - for key_expr in &config_rule.key_exprs { - if key_expr.trim().is_empty() { - bail!("found an empty key-expression value in key_exprs list"); - } - policy_rules.push(PolicyRule { - subject: Subject::CertCommonName(subject.clone()), - key_expr: key_expr.clone(), - action: *action, - permission: config_rule.permission, - flow: *flow, - }) - } - } - } - } - for subject in config_rule.usernames.as_ref().unwrap() { - if subject.trim().is_empty() { - bail!("found an empty value in usernames list"); - } - for flow in config_rule.flows.as_ref().unwrap() { - for action in &config_rule.actions { - for key_expr in &config_rule.key_exprs { - if key_expr.trim().is_empty() { - bail!("found an empty key-expression value in key_exprs list"); - } - policy_rules.push(PolicyRule { - subject: Subject::Username(subject.clone()), - key_expr: key_expr.clone(), - action: *action, - permission: config_rule.permission, - flow: *flow, - }) - } - } - } - } } let mut subject_map = SubjectMap::default(); let mut counter = 1; @@ -369,9 +293,6 @@ impl PolicyEnforcer { key_expr: &str, ) -> ZResult { let policy_map = &self.policy_map; - if policy_map.is_empty() { - return Ok(self.default_permission); - } match policy_map.get(&subject) { Some(single_policy) => { let deny_result = single_policy diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 3aed0e6541..b78a9ac888 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -47,7 +47,7 @@ mod test { async fn get_basic_router_config() -> Config { let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec!["tcp/127.0.0.1:27447".parse().unwrap()]; + config.listen.endpoints = vec!["tcp/127.0.0.1:7447".parse().unwrap()]; config.scouting.multicast.set_enabled(Some(false)).unwrap(); config } @@ -59,9 +59,9 @@ mod test { async fn get_client_sessions() -> (Session, Session) { println!("Opening client sessions"); - let config = config::client(["tcp/127.0.0.1:27447".parse::().unwrap()]); + let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); let s01 = ztimeout!(zenoh::open(config)).unwrap(); - let config = config::client(["tcp/127.0.0.1:27447".parse::().unwrap()]); + let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); let s02 = ztimeout!(zenoh::open(config)).unwrap(); (s01, s02) } diff --git a/zenoh/tests/authentication.rs b/zenoh/tests/authentication.rs deleted file mode 100644 index e4b15d5771..0000000000 --- a/zenoh/tests/authentication.rs +++ /dev/null @@ -1,1245 +0,0 @@ -// -// Copyright (c) 2024 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -mod test { - use std::{ - fs, - path::Path, - sync::{Arc, Mutex}, - time::Duration, - }; - - use tokio::runtime::Handle; - use zenoh::{ - config, - config::{EndPoint, WhatAmI}, - prelude::*, - Config, Session, - }; - use zenoh_core::{zlock, ztimeout}; - - const TIMEOUT: Duration = Duration::from_secs(60); - const SLEEP: Duration = Duration::from_secs(1); - const KEY_EXPR: &str = "test/demo"; - const VALUE: &str = "zenoh"; - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn test_authentication() { - zenoh_util::try_init_log_from_env(); - let path = "./tests/testfiles"; - create_new_files(path).await.unwrap(); - println!("testfiles created successfully."); - - test_pub_sub_deny_then_allow_usrpswd().await; - test_pub_sub_allow_then_deny_usrpswd().await; - test_get_qbl_allow_then_deny_usrpswd().await; - test_get_qbl_deny_then_allow_usrpswd().await; - - test_pub_sub_deny_then_allow_tls(3774).await; - test_pub_sub_allow_then_deny_tls(3775).await; - test_get_qbl_allow_then_deny_tls(3776).await; - test_get_qbl_deny_then_allow_tls(3777).await; - - test_pub_sub_deny_then_allow_quic(3774).await; - test_pub_sub_allow_then_deny_quic(3775).await; - test_get_qbl_deny_then_allow_quic(3776).await; - test_get_qbl_allow_then_deny_quic(3777).await; - - std::fs::remove_dir_all(path).unwrap(); - println!("testfiles removed successfully."); - } - - #[allow(clippy::all)] - async fn create_new_files(file_path: &str) -> std::io::Result<()> { - use std::io::prelude::*; - let ca_pem = b"-----BEGIN CERTIFICATE----- -MIIDiTCCAnGgAwIBAgIUO1x6LAlICgKs5+pYUTo4CughfKEwDQYJKoZIhvcNAQEL -BQAwVDELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G -A1UECgwIenMsIEluYy4xGDAWBgNVBAMMD3pzX3Rlc3Rfcm9vdF9jYTAeFw0yNDAz -MTExNDM0MjNaFw0yNTAzMTExNDM0MjNaMFQxCzAJBgNVBAYTAkZSMQswCQYDVQQI -DAJJRjELMAkGA1UEBwwCUFIxETAPBgNVBAoMCHpzLCBJbmMuMRgwFgYDVQQDDA96 -c190ZXN0X3Jvb3RfY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3 -pFWM+IJNsRCYHt1v/TliecppwVZV+ZHfFw9JKN9ev4K/fWHUiAOwp91MOLxbaYKd -C6dxW28YVGltoGz3kUZJZcJRQVso1jXv24Op4muOsiYXukLc4TU2F6dG1XqkLt5t -svsYAQFf1uK3//QZFVRBosJEn+jjiJ4XCvt49mnPRolp1pNKX0z31mZO6bSly6c9 -OVlJMjWpDCYSOuf6qZZ36fa9eSut2bRJIPY0QCsgnqYBTnIEhksS+3jy6Qt+QpLz -95pFdLbW/MW4XKpaDltyYkO6QrBekF6uWRlvyAHU+NqvXZ4F/3Z5l26qLuBcsLPJ -kyawkO+yNIDxORmQgMczAgMBAAGjUzBRMB0GA1UdDgQWBBThgotd9ws2ryEEaKp2 -+RMOWV8D7jAfBgNVHSMEGDAWgBThgotd9ws2ryEEaKp2+RMOWV8D7jAPBgNVHRMB -Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQA9QoPv78hGmvmqF4GZeqrOBKQB -N/H5wL7f8H6BXU/wpNo2nnWOJn3u37lT+zivAdGEv+x+GeKekcugKBCSluhBLpVb -VNXe4WwMm5FBuO2NRBN2nblTMm1kEO00nVk1/yNo4hI8mj7d4YLU62d7324osNpF -wHqu6B0/c99JeKRvODGswyff1i8rJ1jpcgk/JmHg7UQBHEIkn0cRR0f9W3Mxv6b5 -ZeowRe81neWNkC6IMiMmzA0iHGkhoUMA15qG1ZKOr1XR364LH5BfNNpzAWYwkvJs -0JFrrdw+rm+cRJWs55yiyCCs7pyg1IJkY/o8bifdCOUgIyonzffwREk3+kZR ------END CERTIFICATE-----"; - - let client_side_pem = b"-----BEGIN CERTIFICATE----- -MIIDjDCCAnSgAwIBAgIUOi9jKILrOzfRNGIkQ48S90NehpkwDQYJKoZIhvcNAQEL -BQAwVDELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G -A1UECgwIenMsIEluYy4xGDAWBgNVBAMMD3pzX3Rlc3Rfcm9vdF9jYTAeFw0yNDAz -MTkxMTMxNDhaFw0yNTAzMTkxMTMxNDhaMFAxCzAJBgNVBAYTAkZSMQswCQYDVQQI -DAJJRjELMAkGA1UEBwwCUFIxETAPBgNVBAoMCHpzLCBJbmMuMRQwEgYDVQQDDAtj -bGllbnRfc2lkZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMzU2p1a -ly/1bi2TDZ8+Qlvk9/3KyHqrg2BGZUxB3Pj/lufDuYNwOHkss99wp8gzMsT28mD4 -y6X7nCgEN8WeHl+/xfLuGsWIBa1OOr6dz0qewoWFsor01cQ8+nwAKlgnz6IvHfkQ -OJZD/QYSdyn6c1AcIyS60vo4qMjyI4OVb1Dl4WpC4vCmWvDT0WjBZ5GckCnuQ8wS -wZ5MtPuMQf8kYX95ll7eBtDfEXF9Oja0l1/5SmlHuKyqDy4sIKovxtFHTqgb8PUc -yT33pUHOsBXruNBxl1MKq1outdMqcQknT6FAC+aVZ7bTlwhnH8p5Apn57g+dJYTI -9dCr1e2oK5NohhkCAwEAAaNaMFgwFgYDVR0RBA8wDYILY2xpZW50X3NpZGUwHQYD -VR0OBBYEFHDUYYfQacLj1tp49OG9NbPuL0N/MB8GA1UdIwQYMBaAFOGCi133Czav -IQRoqnb5Ew5ZXwPuMA0GCSqGSIb3DQEBCwUAA4IBAQB+nFAe6QyD2AaFdgrFOyEE -MeYb97sy9p5ylhMYyU62AYsIzzpTY74wBG78qYPIw3lAYzNcN0L6T6kBQ4lu6gFm -XB0SqCZ2AkwvV8tTlbLkZeoO6rONeke6c8cJsxYN7NiknDvTMrkTTgiyvbCWfEVX -Htnc4j/KzSBX3UjVcbPM3L/6KwMRw050/6RCiOIPFjTOCfTGoDx5fIyBk3ch/Plw -TkH2juHxX0/aCxr8hRE1v9+pXXlGnGoKbsDMLN9Aziu6xzdT/kD7BvyoM8rh7CE5 -ae7/R4sd13cZ2WGDPimqO0z1kItMOIdiYvk4DgOg+J8hZSkKT56erafdDa2LPBE6 ------END CERTIFICATE-----"; - - let client_side_key = b"-----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDM1NqdWpcv9W4t -kw2fPkJb5Pf9ysh6q4NgRmVMQdz4/5bnw7mDcDh5LLPfcKfIMzLE9vJg+Mul+5wo -BDfFnh5fv8Xy7hrFiAWtTjq+nc9KnsKFhbKK9NXEPPp8ACpYJ8+iLx35EDiWQ/0G -Encp+nNQHCMkutL6OKjI8iODlW9Q5eFqQuLwplrw09FowWeRnJAp7kPMEsGeTLT7 -jEH/JGF/eZZe3gbQ3xFxfTo2tJdf+UppR7isqg8uLCCqL8bRR06oG/D1HMk996VB -zrAV67jQcZdTCqtaLrXTKnEJJ0+hQAvmlWe205cIZx/KeQKZ+e4PnSWEyPXQq9Xt -qCuTaIYZAgMBAAECggEAAlqVVw7UEzLjtN4eX1S6tD3jvCzFBETdjgENF7TfjlR4 -lln9UyV6Xqkc+Y28vdwZwqHwW90sEPCc5ShUQD7+jBzi8FVcZSX4o7rVCbz8RXgg -1eI5EKf632YQflWNpwTxGcTnGCY/sjleil/yst6sDdD+9eR4OXQme2Wt8wyH8pLm -bf1OensGrFu3kJaPMOfP6jXnqEqkUPqmaCNW7+Ans8E+4J9oksRVPQJEuxwSjdJu -BlG50KKpl0XwZ/u/hkkj8/BlRDa62YMGJkFOwaaGUu2/0UU139XaJiMSPoL6t/BU -1H15dtW9liEtnHIssXMRzc9cg+xPgCs79ABXSZaFUQKBgQD4mH/DcEFwkZQcr08i -GUk0RE5arAqHui4eiujcPZVV6j/L7PHHmabKRPBlsndFP7KUCtvzNRmHq7JWDkpF -S36OE4e94CBYb0CIrO8OO5zl1vGAn5qa9ckefSFz9AMWW+hSuo185hFjt67BMaI0 -8CxfYDH+QY5D4JE5RhSwsOmiUQKBgQDS7qjq+MQKPHHTztyHK8IbAfEGlrBdCAjf -K1bDX2BdfbRJMZ+y8LgK5HxDPlNx2/VauBLsIyU1Zirepd8hOsbCVoK1fOq+T7bY -KdB1oqLK1Rq1sMBc26F24LBaZ3Pw5XgYEcvaOW0JFQ9Oc4VjcIXKjTNhobNOegfK -QDnw8fEtSQKBgQDrCuTh2GVHFZ3AcVCUoOvB60NaH4flRHcOkbARbHihvtWK7gC8 -A97bJ8tTnCWA5/TkXFAR54a36/K1wtUeJ38Evhp9wEdU1ftiPn/YKSzzcwLr5fu7 -v9/kX9MdWv0ASu2iKphUGwMeETG9oDwJaXvKwZ0DFOB59P3Z9RTi6qI7wQKBgQCp -uBZ6WgeDJPeBsaSHrpHUIU/KOV1WvaxFxR1evlNPZmG1sxQIat/rA8VoZbHGn3Ff -uVSgY/cAbGB6HYTXu+9JV0p8tTI8Ru+cJqjwvhe2lJmVL87X6HCWsluzoiIL5tcm -pssbn7E36ZYTTag6RsOgItUA7ZbUwiOafOsiD8o64QKBgE6nOkAfy5mbp7X+q9uD -J5y6IXpY/Oia/RwveLWFbI/aum4Nnhb6L9Y0XlrYjm4cJOchQyDR7FF6f4EuAiYb -wdxBbkxXpwXnfKCtNvMF/wZMvPVaS5HTQga8hXMrtlW6jtTJ4HmkTTB/MILAXVkJ -EHi+N70PcrYg6li415TGfgDz ------END PRIVATE KEY-----"; - - let server_side_pem = b"-----BEGIN CERTIFICATE----- -MIIDjDCCAnSgAwIBAgIUOi9jKILrOzfRNGIkQ48S90NehpgwDQYJKoZIhvcNAQEL -BQAwVDELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G -A1UECgwIenMsIEluYy4xGDAWBgNVBAMMD3pzX3Rlc3Rfcm9vdF9jYTAeFw0yNDAz -MTkxMTMxMDRaFw0yNTAzMTkxMTMxMDRaMFAxCzAJBgNVBAYTAkZSMQswCQYDVQQI -DAJJRjELMAkGA1UEBwwCUFIxETAPBgNVBAoMCHpzLCBJbmMuMRQwEgYDVQQDDAtz -ZXJ2ZXJfc2lkZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKw4eKzt -T1inzuEIPBaPksWyjoD9n6uJx9jAQ2wRB6rXiAsXVLRSuczdGDpb1MwAqoIi6ozw -tzDRwkr58vUNaTCswxadlAmB44JEVYKZoublHjlVj5ygr0R4R5F2T9tIV+jpqZuK -HR4dHe8PiDCiWVzWvYwOLVKXQKSeaE2Z143ukVIJ85qmNykJ066AVhgWnIYSCR9c -s7WPBdTWAW3L4yNlast9hfvxdQNDs5AtUnJKfAX+7DylPAm8V7YjU1k9AtTNPbpy -kb9X97ErsB8891MmZaGZp0J6tnuucDkk0dlowMVvi2aUCsYoKF5DgGxtyVAeLhTP -70GenaLe2uwG8fMCAwEAAaNaMFgwFgYDVR0RBA8wDYILc2VydmVyX3NpZGUwHQYD -VR0OBBYEFBKms1sOw8nM/O5SN1EZIH+LsWaPMB8GA1UdIwQYMBaAFOGCi133Czav -IQRoqnb5Ew5ZXwPuMA0GCSqGSIb3DQEBCwUAA4IBAQA6H/sfm8YUn86+GwxNR9i9 -MCL7WHVRx3gS9ENK87+HtZNL2TVvhPJtupG3Pjgqi33FOHrM4rMUcWSZeCEycVgy -5cjimQLwfDljIBRQE6sem3gKf0obdWl5AlPDLTL/iKj5Su7NycrjZFYqkjZjn+58 -fe8lzHNeP/3RQTgjJ98lQI0bdzGDG1+QoxTgPEc77vgN0P4MHJYx2auz/7jYBqNJ -ko8nugIQsd4kOhmOIBUQ8aXkXFktSQIerEGB8uw5iF2cCdH/sTCvhzhxLb4IWo/O -0cAZ+Vs4FW3KUn/Y44yrVAWl1H6xdFsNXBqbzVEMzlt/RV3rH70RDCc20XhP+w+g ------END CERTIFICATE-----"; - - let server_side_key = b"-----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCsOHis7U9Yp87h -CDwWj5LFso6A/Z+ricfYwENsEQeq14gLF1S0UrnM3Rg6W9TMAKqCIuqM8Lcw0cJK -+fL1DWkwrMMWnZQJgeOCRFWCmaLm5R45VY+coK9EeEeRdk/bSFfo6ambih0eHR3v -D4gwollc1r2MDi1Sl0CknmhNmdeN7pFSCfOapjcpCdOugFYYFpyGEgkfXLO1jwXU -1gFty+MjZWrLfYX78XUDQ7OQLVJySnwF/uw8pTwJvFe2I1NZPQLUzT26cpG/V/ex -K7AfPPdTJmWhmadCerZ7rnA5JNHZaMDFb4tmlArGKCheQ4BsbclQHi4Uz+9Bnp2i -3trsBvHzAgMBAAECggEAUjpIS/CmkOLWYRVoczEr197QMYBnCyUm2TO7PU7IRWbR -GtKR6+MPuWPbHIoaCSlMQARhztdj8BhG1zuOKDi1/7qNDzA/rWZp9RmhZlDquamt -i5xxjEwgQuXW7fn6WO2qo5dlFtGT43vtfeYBlY7+cdhJ+iQOub9j6vWDQYHxrF7x -yM8xvNzomHThvLFzWXJV/nGjX5pqPraMmwJUW+MGX0YaEr6tClqsc1Kmxhs3iIUo -1JCqh3FpVu2i/mR9fdcQ0ONT/s1UHzy+1Bhmh3j2Fuk4+ZeLMfxTfFxk5U0BeMQY -sES3qmd+pG5iqPW+AmXy299G89jf5+1Q4J2Km5KOUQKBgQDidifoeknpi9hRHLLD -w/7KMMe8yYg3c3dv5p0iUQQ2pXd1lJIFQ+B2/D+hfOXhnN/iCDap89ll2LoQ2Q9L -38kQXH06HCM2q11RP0BEsZCG0CnluS+JVNnjs/ALi+yc4HSpzKPs3zXIC3dLOUbq -ov5Esa5h/RU6+NO+DH72TWTv6wKBgQDCryPKtOcLp1eqdwIBRoXdPZeUdZdnwT8+ -70DnC+YdOjFkqTbaoYE5ePa3ziGOZyTFhJbPgiwEdj9Ez1JSgqLLv5hBc4s6FigK -D7fOnn7Q7+al/kEW7+X5yoSl1bFuPCqGL1xxzxmpDY8Gf3nyZ+QGfWIenbk3nq12 -nTgINyWMGQKBgQDSrxBDxXl8EMGH/MYHQRGKs8UvSuMyi3bjoU4w/eSInno75qPO -yC5NJDJin9sSgar8E54fkSCBExdP01DayvC5CwLqDAFqvBTOIKU/A18tPP6tnRKv -lkQ8Bkxdwai47k07J4qeNa9IU/qA/mGOq2MZL6DHwvd8bMA5gFCh/rDYTwKBgAPm -gGASScK5Ao+evMKLyCjLkBrgVD026O542qMGYQDa5pxuq3Or4qvlGYRLM+7ncBwo -8OCNahZYzCGzyaFvjpVobEN7biGmyfyRngwcrsu+0q8mreUov0HG5etwoZJk0DFK -B58cGBaD+AaYTTgnDrF2l52naUuM+Uq0EahQeocZAoGBAMJEGUFyEdm1JATkNhBv -ruDzj07PCjdvq3lUJix2ZlKlabsi5V+oYxMmrUSU8Nkaxy6O+qETNRNWQeWbPQHL -IZx/qrP32PmWX0IVj3pbfKHQSpOKNGzL9xUJ/FIycZWyT3yGf24KBuJwIx7xSrRx -qNsoty1gY/y3n7SN/iMZo8lO ------END PRIVATE KEY-----"; - - let credentials_txt = b"client1name:client1passwd -client2name:client2passwd"; - - let certs_dir = Path::new(file_path); - if !certs_dir.exists() { - fs::create_dir(certs_dir)?; - } - struct Testfile<'a> { - name: &'a str, - value: &'a [u8], - } - - let test_files = vec![ - Testfile { - name: "ca.pem", - value: ca_pem, - }, - Testfile { - name: "clientsidekey.pem", - value: client_side_key, - }, - Testfile { - name: "clientside.pem", - value: client_side_pem, - }, - Testfile { - name: "serversidekey.pem", - value: server_side_key, - }, - Testfile { - name: "serverside.pem", - value: server_side_pem, - }, - Testfile { - name: "credentials.txt", - value: credentials_txt, - }, - ]; - for test_file in test_files.iter() { - let file_path = certs_dir.join(test_file.name); - let mut file = fs::File::create(&file_path)?; - file.write_all(test_file.value)?; - } - - Ok(()) - } - - async fn get_basic_router_config_tls(port: u16) -> Config { - let mut config = config::default(); - config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec![format!("tls/127.0.0.1:{}", port).parse().unwrap()]; - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - config - .insert_json5( - "transport", - r#"{ - "link": { - "protocols": [ - "tls" - ], - "tls": { - "server_private_key": "tests/testfiles/serversidekey.pem", - "server_certificate": "tests/testfiles/serverside.pem", - "root_ca_certificate": "tests/testfiles/ca.pem", - "client_auth": true, - "server_name_verification": false - }, - }, - }"#, - ) - .unwrap(); - config - } - async fn get_basic_router_config_quic(port: u16) -> Config { - let mut config = config::default(); - config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec![format!("quic/127.0.0.1:{}", port).parse().unwrap()]; - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - config - .insert_json5( - "transport", - r#"{ - "link": { - "protocols": [ - "quic" - ], - "tls": { - "server_private_key": "tests/testfiles/serversidekey.pem", - "server_certificate": "tests/testfiles/serverside.pem", - "root_ca_certificate": "tests/testfiles/ca.pem", - "client_auth": true, - "server_name_verification": false - }, - }, - }"#, - ) - .unwrap(); - config - } - - async fn get_basic_router_config_usrpswd() -> Config { - let mut config = config::default(); - config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec!["tcp/127.0.0.1:37447".parse().unwrap()]; - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - config - .insert_json5( - "transport", - r#"{ - "auth": { - usrpwd: { - user: "routername", - password: "routerpasswd", - dictionary_file: "tests/testfiles/credentials.txt", - }, - }, - }"#, - ) - .unwrap(); - config - } - async fn close_router_session(s: Session) { - println!("Closing router session"); - ztimeout!(s.close()).unwrap(); - } - - async fn get_client_sessions_tls(port: u16) -> (Session, Session) { - println!("Opening client sessions"); - let mut config = config::client([format!("tls/127.0.0.1:{}", port) - .parse::() - .unwrap()]); - config - .insert_json5( - "transport", - r#"{ - "link": { - "protocols": [ - "tls" - ], - "tls": { - "root_ca_certificate": "tests/testfiles/ca.pem", - "client_private_key": "tests/testfiles/clientsidekey.pem", - "client_certificate": "tests/testfiles/clientside.pem", - "client_auth": true, - "server_name_verification": false - } - } - }"#, - ) - .unwrap(); - let s01 = ztimeout!(zenoh::open(config)).unwrap(); - let mut config = config::client([format!("tls/127.0.0.1:{}", port) - .parse::() - .unwrap()]); - config - .insert_json5( - "transport", - r#"{ - "link": { - "protocols": [ - "tls" - ], - "tls": { - "root_ca_certificate": "tests/testfiles/ca.pem", - "client_private_key": "tests/testfiles/clientsidekey.pem", - "client_certificate": "tests/testfiles/clientside.pem", - "client_auth": true, - "server_name_verification": false - } - } - }"#, - ) - .unwrap(); - let s02 = ztimeout!(zenoh::open(config)).unwrap(); - (s01, s02) - } - - async fn get_client_sessions_quic(port: u16) -> (Session, Session) { - println!("Opening client sessions"); - let mut config = config::client([format!("quic/127.0.0.1:{}", port) - .parse::() - .unwrap()]); - config - .insert_json5( - "transport", - r#"{ - "link": { - "protocols": [ - "quic" - ], - "tls": { - "root_ca_certificate": "tests/testfiles/ca.pem", - "client_private_key": "tests/testfiles/clientsidekey.pem", - "client_certificate": "tests/testfiles/clientside.pem", - "client_auth": true, - "server_name_verification": false - } - } - }"#, - ) - .unwrap(); - let s01 = ztimeout!(zenoh::open(config)).unwrap(); - let mut config = config::client([format!("quic/127.0.0.1:{}", port) - .parse::() - .unwrap()]); - config - .insert_json5( - "transport", - r#"{ - "link": { - "protocols": [ - "quic" - ], - "tls": { - "root_ca_certificate": "tests/testfiles/ca.pem", - "client_private_key": "tests/testfiles/clientsidekey.pem", - "client_certificate": "tests/testfiles/clientside.pem", - "client_auth": true, - "server_name_verification": false - } - } - }"#, - ) - .unwrap(); - let s02 = ztimeout!(zenoh::open(config)).unwrap(); - (s01, s02) - } - - async fn get_client_sessions_usrpswd() -> (Session, Session) { - println!("Opening client sessions"); - let mut config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); - config - .insert_json5( - "transport", - r#"{ - "auth": { - usrpwd: { - user: "client1name", - password: "client1passwd", - }, - } - }"#, - ) - .unwrap(); - let s01 = ztimeout!(zenoh::open(config)).unwrap(); - let mut config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); - config - .insert_json5( - "transport", - r#"{ - "auth": { - usrpwd: { - user: "client2name", - password: "client2passwd", - }, - } - }"#, - ) - .unwrap(); - let s02 = ztimeout!(zenoh::open(config)).unwrap(); - (s01, s02) - } - - async fn close_sessions(s01: Session, s02: Session) { - println!("Closing client sessions"); - ztimeout!(s01.close()).unwrap(); - ztimeout!(s02.close()).unwrap(); - } - - async fn test_pub_sub_deny_then_allow_tls(port: u16) { - println!("test_pub_sub_deny_then_allow_tls"); - - let mut config_router = get_basic_router_config_tls(port).await; - - config_router - .insert_json5( - "access_control", - r#"{ - "enabled": false, - "default_permission": "deny", - "rules": [ - { - "permission": "allow", - "flows": ["ingress","egress"], - "actions": [ - "put", - "declare_subscriber" - ], - "key_exprs": [ - "test/demo" - ], - "cert_common_names": [ - "client_side" - ] - }, - ] - }"#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router)).unwrap(); - - let (sub_session, pub_session) = get_client_sessions_tls(port).await; - { - let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); - let temp_recv_value = received_value.clone(); - let subscriber = sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.payload().deserialize::().unwrap(); - }) - .await - .unwrap(); - - tokio::time::sleep(SLEEP).await; - publisher.put(VALUE).await.unwrap(); - tokio::time::sleep(SLEEP).await; - assert_eq!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare()).unwrap(); - } - close_sessions(sub_session, pub_session).await; - close_router_session(session).await; - } - - async fn test_pub_sub_allow_then_deny_tls(port: u16) { - println!("test_pub_sub_allow_then_deny_tls"); - let mut config_router = get_basic_router_config_tls(port).await; - config_router - .insert_json5( - "access_control", - r#"{ - "enabled": true, - "default_permission": "allow", - "rules": [ - { - "permission": "deny", - "flows": ["egress"], - "actions": [ - "put", - "declare_subscriber" - ], - "key_exprs": [ - "test/demo" - ], - "cert_common_names": [ - "client_side" - ] - }, - ] - }"#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (sub_session, pub_session) = get_client_sessions_tls(port).await; - { - let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); - let temp_recv_value = received_value.clone(); - let subscriber = - ztimeout!(sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.payload().deserialize::().unwrap(); - })) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - - ztimeout!(publisher.put(VALUE)).unwrap(); - tokio::time::sleep(SLEEP).await; - - assert_ne!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare()).unwrap(); - } - close_sessions(sub_session, pub_session).await; - close_router_session(session).await; - } - - async fn test_get_qbl_deny_then_allow_tls(port: u16) { - println!("test_get_qbl_deny_then_allow_tls"); - - let mut config_router = get_basic_router_config_tls(port).await; - config_router - .insert_json5( - "access_control", - r#"{ - "enabled": true, - "default_permission": "deny", - "rules": [ - { - "permission": "allow", - "flows": ["egress","ingress"], - "actions": [ - "get", - "declare_queryable" - ], - "key_exprs": [ - "test/demo" - ], - "cert_common_names": [ - "client_side" - ] - }, - ] - }"#, - ) - .unwrap(); - - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router)).unwrap(); - - let (get_session, qbl_session) = get_client_sessions_tls(port).await; - { - let mut received_value = String::new(); - - let qbl = ztimeout!(qbl_session - .declare_queryable(KEY_EXPR) - .callback(move |sample| { - tokio::task::block_in_place(move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() - }); - }); - })) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); - while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.result() { - Ok(sample) => { - received_value = sample.payload().deserialize::().unwrap(); - break; - } - Err(e) => println!( - "Error : {}", - e.payload() - .deserialize::() - .unwrap_or_else(|e| format!("{}", e)) - ), - } - } - tokio::time::sleep(SLEEP).await; - assert_eq!(received_value, VALUE); - ztimeout!(qbl.undeclare()).unwrap(); - } - close_sessions(get_session, qbl_session).await; - close_router_session(session).await; - } - - async fn test_get_qbl_allow_then_deny_tls(port: u16) { - println!("test_get_qbl_allow_then_deny_tls"); - - let mut config_router = get_basic_router_config_tls(port).await; - config_router - .insert_json5( - "access_control", - r#"{ - "enabled": true, - "default_permission": "allow", - "rules": [ - { - "permission": "deny", - "flows": ["egress"], - "actions": [ - "get", - "declare_queryable" - ], - "key_exprs": [ - "test/demo" - ], - "cert_common_names": [ - "client_side" - ] - }, - ] - }"#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router)).unwrap(); - - let (get_session, qbl_session) = get_client_sessions_tls(port).await; - { - let mut received_value = String::new(); - - let qbl = ztimeout!(qbl_session - .declare_queryable(KEY_EXPR) - .callback(move |sample| { - tokio::task::block_in_place(move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() - }); - }); - })) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); - while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.result() { - Ok(sample) => { - received_value = sample.payload().deserialize::().unwrap(); - break; - } - Err(e) => println!( - "Error : {}", - e.payload() - .deserialize::() - .unwrap_or_else(|e| format!("{}", e)) - ), - } - } - tokio::time::sleep(SLEEP).await; - assert_ne!(received_value, VALUE); - ztimeout!(qbl.undeclare()).unwrap(); - } - close_sessions(get_session, qbl_session).await; - close_router_session(session).await; - } - - async fn test_pub_sub_deny_then_allow_quic(port: u16) { - println!("test_pub_sub_deny_then_allow_quic"); - - let mut config_router = get_basic_router_config_quic(port).await; - - config_router - .insert_json5( - "access_control", - r#"{ - "enabled": false, - "default_permission": "deny", - "rules": [ - { - "permission": "allow", - "flows": ["ingress","egress"], - "actions": [ - "put", - "declare_subscriber" - ], - "key_exprs": [ - "test/demo" - ], - "cert_common_names": [ - "client_side" - ] - }, - ] - }"#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router)).unwrap(); - - let (sub_session, pub_session) = get_client_sessions_quic(port).await; - { - let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); - let temp_recv_value = received_value.clone(); - let subscriber = sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.payload().deserialize::().unwrap(); - }) - .await - .unwrap(); - - tokio::time::sleep(SLEEP).await; - publisher.put(VALUE).await.unwrap(); - tokio::time::sleep(SLEEP).await; - assert_eq!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare()).unwrap(); - } - close_sessions(sub_session, pub_session).await; - close_router_session(session).await; - } - - #[allow(unused)] - async fn test_pub_sub_allow_then_deny_quic(port: u16) { - println!("test_pub_sub_allow_then_deny_quic"); - - let mut config_router = get_basic_router_config_quic(port).await; - config_router - .insert_json5( - "access_control", - r#"{ - "enabled": true, - "default_permission": "allow", - "rules": [ - { - "permission": "deny", - "flows": ["egress"], - "actions": [ - "put", - "declare_subscriber" - ], - "key_exprs": [ - "test/demo" - ], - "cert_common_names": [ - "client_side" - ] - }, - ] - }"#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (sub_session, pub_session) = get_client_sessions_quic(port).await; - { - let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); - let temp_recv_value = received_value.clone(); - let subscriber = - ztimeout!(sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.payload().deserialize::().unwrap(); - })) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - - ztimeout!(publisher.put(VALUE)).unwrap(); - tokio::time::sleep(SLEEP).await; - - assert_ne!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare()).unwrap(); - } - close_sessions(sub_session, pub_session).await; - close_router_session(session).await; - } - - #[allow(unused)] - async fn test_get_qbl_deny_then_allow_quic(port: u16) { - println!("test_get_qbl_deny_then_allow_quic"); - - let mut config_router = get_basic_router_config_quic(port).await; - config_router - .insert_json5( - "access_control", - r#"{ - "enabled": true, - "default_permission": "deny", - "rules": [ - { - "permission": "allow", - "flows": ["egress","ingress"], - "actions": [ - "get", - "declare_queryable"], - "key_exprs": [ - "test/demo" - ], - "cert_common_names": [ - "client_side" - ] - }, - ] - }"#, - ) - .unwrap(); - - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router)).unwrap(); - - let (get_session, qbl_session) = get_client_sessions_quic(port).await; - { - let mut received_value = String::new(); - - let qbl = ztimeout!(qbl_session - .declare_queryable(KEY_EXPR) - .callback(move |sample| { - tokio::task::block_in_place(move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() - }); - }); - })) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); - while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.result() { - Ok(sample) => { - received_value = sample.payload().deserialize::().unwrap(); - break; - } - Err(e) => println!( - "Error : {}", - e.payload() - .deserialize::() - .unwrap_or_else(|e| format!("{}", e)) - ), - } - } - tokio::time::sleep(SLEEP).await; - assert_eq!(received_value, VALUE); - ztimeout!(qbl.undeclare()).unwrap(); - } - close_sessions(get_session, qbl_session).await; - close_router_session(session).await; - } - - #[allow(unused)] - async fn test_get_qbl_allow_then_deny_quic(port: u16) { - println!("test_get_qbl_allow_then_deny_quic"); - - let mut config_router = get_basic_router_config_quic(port).await; - config_router - .insert_json5( - "access_control", - r#"{ - "enabled": true, - "default_permission": "allow", - "rules": - [ - { - "permission": "deny", - "flows": ["egress"], - "actions": [ - "get", - "declare_queryable" - ], - "key_exprs": [ - "test/demo" - ], - "cert_common_names": [ - "client_side" - ] - }, - ] - }"#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router)).unwrap(); - - let (get_session, qbl_session) = get_client_sessions_quic(port).await; - { - let mut received_value = String::new(); - - let qbl = ztimeout!(qbl_session - .declare_queryable(KEY_EXPR) - .callback(move |sample| { - tokio::task::block_in_place(move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() - }); - }); - })) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); - while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.result() { - Ok(sample) => { - received_value = sample.payload().deserialize::().unwrap(); - break; - } - Err(e) => println!( - "Error : {}", - e.payload() - .deserialize::() - .unwrap_or_else(|e| format!("{}", e)) - ), - } - } - tokio::time::sleep(SLEEP).await; - assert_ne!(received_value, VALUE); - ztimeout!(qbl.undeclare()).unwrap(); - } - close_sessions(get_session, qbl_session).await; - close_router_session(session).await; - } - - async fn test_pub_sub_deny_then_allow_usrpswd() { - println!("test_pub_sub_deny_then_allow_usrpswd"); - - let mut config_router = get_basic_router_config_usrpswd().await; - - config_router - .insert_json5( - "access_control", - r#"{ - "enabled": false, - "default_permission": "deny", - "rules": [ - { - "permission": "allow", - "flows": ["ingress","egress"], - "actions": [ - "put", - "declare_subscriber" - ], - "key_exprs": [ - "test/demo" - ], - "usernames": [ - "client1name", - "client2name" - ] - }, - ] - }"#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router)).unwrap(); - - let (sub_session, pub_session) = get_client_sessions_usrpswd().await; - { - let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); - let temp_recv_value = received_value.clone(); - let subscriber = sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.payload().deserialize::().unwrap(); - }) - .await - .unwrap(); - - tokio::time::sleep(SLEEP).await; - publisher.put(VALUE).await.unwrap(); - tokio::time::sleep(SLEEP).await; - assert_eq!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare()).unwrap(); - } - close_sessions(sub_session, pub_session).await; - close_router_session(session).await; - } - - async fn test_pub_sub_allow_then_deny_usrpswd() { - println!("test_pub_sub_allow_then_deny_usrpswd"); - - let mut config_router = get_basic_router_config_usrpswd().await; - config_router - .insert_json5( - "access_control", - r#"{ - "enabled": true, - "default_permission": "allow", - "rules": [ - { - "permission": "deny", - "flows": ["egress"], - "actions": [ - "put", - "declare_subscriber" - ], - "key_exprs": [ - "test/demo" - ], - "usernames": [ - "client1name", - "client2name" - ] - }, - ] - }"#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (sub_session, pub_session) = get_client_sessions_usrpswd().await; - { - let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); - let temp_recv_value = received_value.clone(); - let subscriber = - ztimeout!(sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.payload().deserialize::().unwrap(); - })) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - - ztimeout!(publisher.put(VALUE)).unwrap(); - tokio::time::sleep(SLEEP).await; - - assert_ne!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare()).unwrap(); - } - close_sessions(sub_session, pub_session).await; - close_router_session(session).await; - } - - async fn test_get_qbl_deny_then_allow_usrpswd() { - println!("test_get_qbl_deny_then_allow_usrpswd"); - - let mut config_router = get_basic_router_config_usrpswd().await; - config_router - .insert_json5( - "access_control", - r#"{ - "enabled": true, - "default_permission": "deny", - "rules": [ - { - "permission": "allow", - "flows": ["egress","ingress"], - "actions": [ - "get", - "declare_queryable" - ], - "key_exprs": [ - "test/demo" - ], - "usernames": [ - "client1name", - "client2name" - ] - }, - ] - }"#, - ) - .unwrap(); - - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router)).unwrap(); - - let (get_session, qbl_session) = get_client_sessions_usrpswd().await; - { - let mut received_value = String::new(); - - let qbl = ztimeout!(qbl_session - .declare_queryable(KEY_EXPR) - .callback(move |sample| { - tokio::task::block_in_place(move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() - }); - }); - })) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); - while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.result() { - Ok(sample) => { - received_value = sample.payload().deserialize::().unwrap(); - break; - } - Err(e) => println!( - "Error : {}", - e.payload() - .deserialize::() - .unwrap_or_else(|e| format!("{}", e)) - ), - } - } - tokio::time::sleep(SLEEP).await; - assert_eq!(received_value, VALUE); - ztimeout!(qbl.undeclare()).unwrap(); - } - close_sessions(get_session, qbl_session).await; - close_router_session(session).await; - } - - async fn test_get_qbl_allow_then_deny_usrpswd() { - println!("test_get_qbl_allow_then_deny_usrpswd"); - - let mut config_router = get_basic_router_config_usrpswd().await; - config_router - .insert_json5( - "access_control", - r#"{ - "enabled": true, - "default_permission": "allow", - "rules": [ - { - "permission": "deny", - "flows": ["egress"], - "actions": [ - "get", - "declare_queryable" - ], - "key_exprs": [ - "test/demo" - ], - "usernames": [ - "client1name", - "client2name" - ] - }, - ] - }"#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router)).unwrap(); - - let (get_session, qbl_session) = get_client_sessions_usrpswd().await; - { - let mut received_value = String::new(); - - let qbl = ztimeout!(qbl_session - .declare_queryable(KEY_EXPR) - .callback(move |sample| { - tokio::task::block_in_place(move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() - }); - }); - })) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); - while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.result() { - Ok(sample) => { - received_value = sample.payload().deserialize::().unwrap(); - break; - } - Err(e) => println!( - "Error : {}", - e.payload() - .deserialize::() - .unwrap_or_else(|e| format!("{}", e)) - ), - } - } - tokio::time::sleep(SLEEP).await; - assert_ne!(received_value, VALUE); - ztimeout!(qbl.undeclare()).unwrap(); - } - close_sessions(get_session, qbl_session).await; - close_router_session(session).await; - } -} From 9dd6c7852a7279962cefaed00ac28ff037227e69 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 31 May 2024 16:01:55 +0200 Subject: [PATCH 350/598] internal feature added, buffers under internal --- .../zenoh-plugin-storage-manager/Cargo.toml | 2 +- .../src/replica/storage.rs | 2 +- zenoh-ext/Cargo.toml | 2 +- zenoh/Cargo.toml | 1 + zenoh/src/api/bytes.rs | 4 +--- zenoh/src/lib.rs | 21 ++++++++++--------- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index e328e16948..6f72fc91e6 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -45,7 +45,7 @@ tracing = { workspace = true } serde = { workspace = true, features = ["default"] } serde_json = { workspace = true } urlencoding = { workspace = true } -zenoh = { workspace = true, features = ["unstable"] } +zenoh = { workspace = true, features = ["unstable", "internal"] } zenoh-plugin-trait = { workspace = true } zenoh-util = { workspace = true } zenoh_backend_traits = { workspace = true } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index bd7d56f7fc..4555498ddd 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -22,7 +22,6 @@ use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; use zenoh::{ - buffers::{SplitBuffer, ZBuf}, core::Result as ZResult, internal::{bail, zenoh_home, Timed, TimedEvent, Timer}, key_expr::{ @@ -38,6 +37,7 @@ use zenoh::{ time::{new_reception_timestamp, Timestamp, NTP64}, value::Value, }; +use zenoh::internal::buffers::{SplitBuffer, ZBuf}; use zenoh_backend_traits::{ config::{GarbageCollectionConfig, StorageConfig}, Capability, History, Persistence, StorageInsertionResult, StoredData, diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index 402d37e5f4..47204a2d66 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -50,7 +50,7 @@ tracing = { workspace = true } serde = { workspace = true, features = ["default"] } serde_cbor = { workspace = true } serde_json = { workspace = true } -zenoh = { workspace = true, features = ["unstable"], default-features = false } +zenoh = { workspace = true, features = ["unstable", "internal"], default-features = false } zenoh-macros = { workspace = true } [package.metadata.docs.rs] diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 8203a1673d..6e9a0caf1c 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -51,6 +51,7 @@ transport_unixsock-stream = ["zenoh-transport/transport_unixsock-stream"] transport_ws = ["zenoh-transport/transport_ws"] transport_vsock = ["zenoh-transport/transport_vsock"] unstable = [] +internal = [] default = [ "auth_pubkey", "auth_usrpwd", diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 4b6a8fc33b..a28625027e 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -23,7 +23,7 @@ use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, writer::HasWriter, - ZBufReader, ZBufWriter, ZSlice, + ZBufReader, ZBufWriter, ZSlice, ZBuf, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; @@ -37,8 +37,6 @@ use zenoh_shm::{ SharedMemoryBuf, }; -use crate::buffers::ZBuf; - /// Trait to encode a type `T` into a [`Value`]. pub trait Serialize { type Output; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c033c7feee..ad9b632263 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -134,16 +134,6 @@ pub mod core { pub use zenoh_result::ZResult as Result; } -/// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate -/// reading and writing data. -pub mod buffers { - pub use zenoh_buffers::{ - buffer::SplitBuffer, - reader::{HasReader, Reader}, - ZBuf, ZBufReader, ZSlice, ZSliceBuffer, - }; -} - /// [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. /// /// In Zenoh, operations are performed on keys. To allow addressing multiple keys with a single operation, we use Key Expressions (KE). @@ -372,6 +362,7 @@ pub mod plugins { } #[doc(hidden)] +#[cfg(feature = "internal")] pub mod internal { pub use zenoh_core::{zasync_executor_init, zerror, zlock, ztimeout, ResolveFuture}; pub use zenoh_result::bail; @@ -380,6 +371,16 @@ pub mod internal { pub use zenoh_util::{zenoh_home, LibLoader, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; pub use crate::api::encoding::EncodingInternals; + + /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate + /// reading and writing data. + pub mod buffers { + pub use zenoh_buffers::{ + buffer::SplitBuffer, + reader::{HasReader, Reader}, + ZBuf, ZBufReader, ZSlice, ZSliceBuffer, + }; + } } #[cfg(all(feature = "unstable", feature = "shared-memory"))] From 504bcdf27670147298be696b395236a2a1a27366 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 3 Jun 2024 13:14:36 +0200 Subject: [PATCH 351/598] zenoh_macro::internal added --- commons/zenoh-macros/src/lib.rs | 53 ++++++++++++++++++++++++--------- 1 file changed, 39 insertions(+), 14 deletions(-) diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index b1d59dcf59..3b3b14cd3d 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -59,15 +59,15 @@ pub fn rustc_version_release(_tokens: TokenStream) -> TokenStream { (quote! {(#release, #commit)}).into() } -/// An enumeration of items supported by the [`unstable`] attribute. -enum UnstableItem { +/// An enumeration of items which can be annotated with `#[zenoh_macros::unstable]`, #[zenoh_macros::unstable]`, `#[zenoh_macros::internal]` +enum AnnotableItem { /// Wrapper around [`syn::Item`]. Item(Item), /// Wrapper around [`syn::TraitItem`]. TraitItem(TraitItem), } -macro_rules! parse_unstable_item { +macro_rules! parse_annotable_item { ($tokens:ident) => {{ let item: Item = parse_macro_input!($tokens as Item); @@ -81,19 +81,19 @@ macro_rules! parse_unstable_item { "the `unstable` proc-macro attribute only supports items and trait items", )) } else { - Ok(UnstableItem::TraitItem(trait_item)) + Ok(AnnotableItem::TraitItem(trait_item)) } } else { - Ok(UnstableItem::Item(item)) + Ok(AnnotableItem::Item(item)) } }}; } -impl UnstableItem { +impl AnnotableItem { /// Mutably borrows the attribute list of this item. fn attributes_mut(&mut self) -> Result<&mut Vec, Error> { match self { - UnstableItem::Item(item) => match item { + AnnotableItem::Item(item) => match item { Item::Const(item) => Ok(&mut item.attrs), Item::Enum(item) => Ok(&mut item.attrs), Item::ExternCrate(item) => Ok(&mut item.attrs), @@ -111,17 +111,17 @@ impl UnstableItem { Item::Use(item) => Ok(&mut item.attrs), other => Err(Error::new_spanned( other, - "item is not supported by the `unstable` proc-macro attribute", + "item is not supported by the `unstable` or `internal` proc-macro attribute", )), }, - UnstableItem::TraitItem(trait_item) => match trait_item { + AnnotableItem::TraitItem(trait_item) => match trait_item { TraitItem::Const(trait_item) => Ok(&mut trait_item.attrs), TraitItem::Fn(trait_item) => Ok(&mut trait_item.attrs), TraitItem::Type(trait_item) => Ok(&mut trait_item.attrs), TraitItem::Macro(trait_item) => Ok(&mut trait_item.attrs), other => Err(Error::new_spanned( other, - "item is not supported by the `unstable` proc-macro attribute", + "item is not supported by the `unstable` or `internal` proc-macro attribute", )), }, } @@ -130,15 +130,18 @@ impl UnstableItem { /// Converts this item to a `proc_macro2::TokenStream`. fn to_token_stream(&self) -> proc_macro2::TokenStream { match self { - UnstableItem::Item(item) => item.to_token_stream(), - UnstableItem::TraitItem(trait_item) => trait_item.to_token_stream(), + AnnotableItem::Item(item) => item.to_token_stream(), + AnnotableItem::TraitItem(trait_item) => trait_item.to_token_stream(), } } } #[proc_macro_attribute] +/// Adds only piece of documentation about the item being unstable but no unstable attribute itself. +/// This is useful when the whole crate is supposed to be used in unstable mode only, it makes sense +/// to mention it in dcoumentation for the crate items, but not to add `#[cfg(feature = "unstable")]` to every item. pub fn unstable_doc(_attr: TokenStream, tokens: TokenStream) -> TokenStream { - let mut item = match parse_unstable_item!(tokens) { + let mut item = match parse_annotable_item!(tokens) { Ok(item) => item, Err(err) => return err.into_compile_error().into(), }; @@ -159,9 +162,10 @@ pub fn unstable_doc(_attr: TokenStream, tokens: TokenStream) -> TokenStream { } #[proc_macro_attribute] +/// Adds a `#[cfg(feature = "unstable")]` attribute to the item and appends piece of documentation about the item being unstable. pub fn unstable(attr: TokenStream, tokens: TokenStream) -> TokenStream { let tokens = unstable_doc(attr, tokens); - let mut item = match parse_unstable_item!(tokens) { + let mut item = match parse_annotable_item!(tokens) { Ok(item) => item, Err(err) => return err.into_compile_error().into(), }; @@ -177,6 +181,27 @@ pub fn unstable(attr: TokenStream, tokens: TokenStream) -> TokenStream { TokenStream::from(item.to_token_stream()) } +#[proc_macro_attribute] +/// Adds a `#[cfg(feature = "internal")]` and `#[doc(hidden)]` attributes to the item. +pub fn internal(_attr: TokenStream, tokens: TokenStream) -> TokenStream { + let mut item = match parse_annotable_item!(tokens) { + Ok(item) => item, + Err(err) => return err.into_compile_error().into(), + }; + + let attrs = match item.attributes_mut() { + Ok(attrs) => attrs, + Err(err) => return err.into_compile_error().into(), + }; + + let feature_gate: Attribute = parse_quote!(#[cfg(feature = "internal")]); + let hide_doc: Attribute = parse_quote!(#[doc(hidden)]); + attrs.push(feature_gate); + attrs.push(hide_doc); + + TokenStream::from(item.to_token_stream()) +} + /// Returns `true` if the attribute is a `#[doc = "..."]` attribute. fn is_doc_attribute(attr: &Attribute) -> bool { attr.path() From 6e7bac475f75c9ab7ede9a52658e347d3734340b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 3 Jun 2024 14:04:28 +0200 Subject: [PATCH 352/598] inti is internal, priority in core, keyexpr unstables --- zenoh-ext/src/group.rs | 3 ++- zenoh/src/api/session.rs | 7 ++++++- zenoh/src/lib.rs | 21 ++++++++++++--------- 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 3eb807a638..f415ffe5be 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -28,10 +28,11 @@ use serde::{Deserialize, Serialize}; use tokio::sync::Mutex; use zenoh::{ bytes::ZBytesReader, + core::Priority, internal::{bail, Condition, TaskController}, key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}, prelude::*, - publisher::{Priority, Publisher}, + publisher::Publisher, Session, }; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index a81835cadc..23f01e8723 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -2794,8 +2794,8 @@ where /// Initialize a Session with an existing Runtime. /// This operation is used by the plugins to share the same Runtime as the router. -#[doc(hidden)] #[zenoh_macros::unstable] +#[zenoh_macros::internal] pub fn init(runtime: Runtime) -> InitBuilder { InitBuilder { runtime, @@ -2808,6 +2808,7 @@ pub fn init(runtime: Runtime) -> InitBuilder { #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[doc(hidden)] #[zenoh_macros::unstable] +#[zenoh_macros::internal] pub struct InitBuilder { runtime: Runtime, aggregated_subscribers: Vec, @@ -2815,6 +2816,7 @@ pub struct InitBuilder { } #[zenoh_macros::unstable] +#[zenoh_macros::internal] impl InitBuilder { #[inline] pub fn aggregated_subscribers(mut self, exprs: Vec) -> Self { @@ -2830,11 +2832,13 @@ impl InitBuilder { } #[zenoh_macros::unstable] +#[zenoh_macros::internal] impl Resolvable for InitBuilder { type To = ZResult; } #[zenoh_macros::unstable] +#[zenoh_macros::internal] impl Wait for InitBuilder { fn wait(self) -> ::To { Ok(Session::init( @@ -2847,6 +2851,7 @@ impl Wait for InitBuilder { } #[zenoh_macros::unstable] +#[zenoh_macros::internal] impl IntoFuture for InitBuilder { type Output = ::To; type IntoFuture = Ready<::To>; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index ad9b632263..40181d96c5 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -115,7 +115,6 @@ pub const FEATURES: &str = zenoh_util::concat_enabled_features!( pub use crate::{ config::Config, core::{Error, Result}, - key_expr::{kedefine, keformat, kewrite}, scouting::scout, session::{open, Session}, }; @@ -132,6 +131,8 @@ pub mod core { pub use zenoh_result::Error; /// A zenoh result. pub use zenoh_result::ZResult as Result; + /// Zenoh message priority + pub use crate::api::publisher::Priority; } /// [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. @@ -165,6 +166,7 @@ pub mod core { /// [`kedefine`] also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, /// as the [`keformat`] and [`kewrite`] macros will be able to tell you if you're attempting to set fields of the format that do not exist. pub mod key_expr { + #[zenoh_macros::unstable] pub mod keyexpr_tree { pub use zenoh_keyexpr::keyexpr_tree::{ impls::KeyedSetProvider, @@ -172,12 +174,16 @@ pub mod key_expr { IKeyExprTree, IKeyExprTreeMut, KeBoxTree, }; } - pub use zenoh_keyexpr::{keyexpr, OwnedKeyExpr, SetIntersectionLevel}; - pub use zenoh_macros::{kedefine, keformat, kewrite}; + pub use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; + + #[zenoh_macros::unstable] + pub use zenoh_keyexpr::SetIntersectionLevel; pub use crate::api::key_expr::{KeyExpr, KeyExprUndeclaration}; // keyexpr format macro support + #[zenoh_macros::unstable] pub mod format { + pub use zenoh_macros::{kedefine, keformat, kewrite}; pub use zenoh_keyexpr::format::*; pub mod macro_support { pub use zenoh_keyexpr::format::macro_support::*; @@ -188,11 +194,8 @@ pub mod key_expr { /// Zenoh [`Session`](crate::session::Session) and associated types pub mod session { #[zenoh_macros::unstable] - #[doc(hidden)] - pub use crate::api::session::init; - #[zenoh_macros::unstable] - #[doc(hidden)] - pub use crate::api::session::InitBuilder; + #[zenoh_macros::internal] + pub use crate::api::session::{init, InitBuilder}; pub use crate::api::{ builders::publisher::{SessionDeleteBuilder, SessionPutBuilder}, session::{open, OpenBuilder, Session, SessionDeclarations, SessionRef, Undeclarable}, @@ -276,7 +279,7 @@ pub mod publisher { PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, PublisherDeleteBuilder, PublisherPutBuilder, }, - publisher::{Priority, Publisher, PublisherUndeclaration}, + publisher::{Publisher, PublisherUndeclaration}, }; } From 7a4c7d99a43b78c951de90338fcb8b0c3b3d1457 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 3 Jun 2024 14:07:23 +0200 Subject: [PATCH 353/598] feat: add missing serialization implementation for `Zbytes` to `ZBytes` (#1076) --- zenoh/src/api/bytes.rs | 46 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 4b6a8fc33b..0373c2cc4d 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -311,6 +311,52 @@ pub struct ZSerde; #[derive(Debug, Clone, Copy)] pub struct ZDeserializeError; +// ZBytes +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: ZBytes) -> Self::Output { + t + } +} + +impl From<&ZBytes> for ZBytes { + fn from(t: &ZBytes) -> Self { + ZSerde.serialize(t) + } +} + +impl From<&mut ZBytes> for ZBytes { + fn from(t: &mut ZBytes) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&ZBytes> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &ZBytes) -> Self::Output { + t.clone() + } +} + +impl Serialize<&mut ZBytes> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut ZBytes) -> Self::Output { + t.clone() + } +} + +impl<'a> Deserialize<'a, ZBytes> for ZSerde { + type Input = &'a ZBytes; + type Error = Infallible; + + fn deserialize(self, v: Self::Input) -> Result { + Ok(v.clone()) + } +} + // ZBuf impl Serialize for ZSerde { type Output = ZBytes; From 7b8e729c06c3badd8ebd97f9c99b8d4fbf8c7049 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 3 Jun 2024 14:24:34 +0200 Subject: [PATCH 354/598] compile fixes --- examples/examples/z_formats.rs | 9 ++++++--- examples/examples/z_ping_shm.rs | 2 +- examples/examples/z_pub_shm_thr.rs | 2 +- examples/examples/z_pub_thr.rs | 6 +----- zenoh/tests/formatters.rs | 2 +- zenoh/tests/qos.rs | 6 +----- 6 files changed, 11 insertions(+), 16 deletions(-) diff --git a/examples/examples/z_formats.rs b/examples/examples/z_formats.rs index 7c3d3988d3..a423e9b756 100644 --- a/examples/examples/z_formats.rs +++ b/examples/examples/z_formats.rs @@ -12,8 +12,11 @@ // ZettaScale Zenoh Team, // -use zenoh::key_expr::keyexpr; -zenoh::kedefine!( +use zenoh::key_expr::{ + format::{kedefine, keformat}, + keyexpr, +}; +kedefine!( pub file_format: "user_id/${user_id:*}/file/${file:*/**}", pub(crate) settings_format: "user_id/${user_id:*}/settings/${setting:**}" ); @@ -22,7 +25,7 @@ fn main() { // Formatting let mut formatter = file_format::formatter(); let file = "hi/there"; - let ke = zenoh::keformat!(formatter, user_id = 42, file).unwrap(); + let ke = keformat!(formatter, user_id = 42, file).unwrap(); println!("{formatter:?} => {ke}"); // Parsing let settings_ke = keyexpr::new("user_id/30/settings/dark_mode").unwrap(); diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index a88a9f59a6..20d2c0f540 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -15,7 +15,7 @@ use std::time::{Duration, Instant}; use clap::Parser; use zenoh::{ - buffers::ZSlice, + internal::buffers::ZSlice, key_expr::keyexpr, prelude::*, publisher::CongestionControl, diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 2d52668ac9..35e6b81441 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -13,7 +13,7 @@ // use clap::Parser; use zenoh::{ - buffers::ZSlice, + internal::buffers::ZSlice, prelude::*, publisher::CongestionControl, shm::{PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, POSIX_PROTOCOL_ID}, diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 8ea7226c8a..817d0ee0d1 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -15,11 +15,7 @@ use std::convert::TryInto; use clap::Parser; -use zenoh::{ - bytes::ZBytes, - prelude::*, - publisher::{CongestionControl, Priority}, -}; +use zenoh::{bytes::ZBytes, core::Priority, prelude::*, publisher::CongestionControl}; use zenoh_examples::CommonArgs; fn main() { diff --git a/zenoh/tests/formatters.rs b/zenoh/tests/formatters.rs index e1c366ac52..faa4839abd 100644 --- a/zenoh/tests/formatters.rs +++ b/zenoh/tests/formatters.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::{kedefine, keformat}; +use zenoh::key_expr::format::{kedefine, keformat}; #[test] fn kedefine_reuse() { diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index ab2dd1d000..18bc782852 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -13,11 +13,7 @@ // use std::time::Duration; -use zenoh::{ - internal::ztimeout, - prelude::*, - publisher::{CongestionControl, Priority}, -}; +use zenoh::{core::Priority, internal::ztimeout, prelude::*, publisher::CongestionControl}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); From fb30b1959d4718c37c565a9854d548913761cfb3 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 3 Jun 2024 14:36:39 +0200 Subject: [PATCH 355/598] SessionGetBuilder, WhatAMI in config --- examples/src/lib.rs | 6 +++--- zenoh-ext/examples/src/lib.rs | 6 +++--- zenoh/src/api/query.rs | 32 ++++++++++++++++---------------- zenoh/src/api/session.rs | 6 +++--- zenoh/src/lib.rs | 18 +++++++----------- 5 files changed, 32 insertions(+), 36 deletions(-) diff --git a/examples/src/lib.rs b/examples/src/lib.rs index 0be3809cf2..1ab27dfc8f 100644 --- a/examples/src/lib.rs +++ b/examples/src/lib.rs @@ -49,9 +49,9 @@ impl From<&CommonArgs> for Config { None => Config::default(), }; match value.mode { - Some(Wai::Peer) => config.set_mode(Some(zenoh::scouting::WhatAmI::Peer)), - Some(Wai::Client) => config.set_mode(Some(zenoh::scouting::WhatAmI::Client)), - Some(Wai::Router) => config.set_mode(Some(zenoh::scouting::WhatAmI::Router)), + Some(Wai::Peer) => config.set_mode(Some(zenoh::config::WhatAmI::Peer)), + Some(Wai::Client) => config.set_mode(Some(zenoh::config::WhatAmI::Client)), + Some(Wai::Router) => config.set_mode(Some(zenoh::config::WhatAmI::Router)), None => Ok(None), } .unwrap(); diff --git a/zenoh-ext/examples/src/lib.rs b/zenoh-ext/examples/src/lib.rs index 4c203d5cff..b3e675b046 100644 --- a/zenoh-ext/examples/src/lib.rs +++ b/zenoh-ext/examples/src/lib.rs @@ -43,9 +43,9 @@ impl From<&CommonArgs> for Config { None => Config::default(), }; match value.mode { - Some(Wai::Peer) => config.set_mode(Some(zenoh::scouting::WhatAmI::Peer)), - Some(Wai::Client) => config.set_mode(Some(zenoh::scouting::WhatAmI::Client)), - Some(Wai::Router) => config.set_mode(Some(zenoh::scouting::WhatAmI::Router)), + Some(Wai::Peer) => config.set_mode(Some(zenoh::config::WhatAmI::Peer)), + Some(Wai::Client) => config.set_mode(Some(zenoh::config::WhatAmI::Client)), + Some(Wai::Router) => config.set_mode(Some(zenoh::config::WhatAmI::Router)), None => Ok(None), } .unwrap(); diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index ffe3d67e14..d0ac0e0044 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -146,7 +146,7 @@ pub(crate) struct QueryState { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct GetBuilder<'a, 'b, Handler> { +pub struct SessionGetBuilder<'a, 'b, Handler> { pub(crate) session: &'a Session, pub(crate) selector: ZResult>, pub(crate) scope: ZResult>>, @@ -164,7 +164,7 @@ pub struct GetBuilder<'a, 'b, Handler> { } #[zenoh_macros::unstable] -impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { +impl SampleBuilderTrait for SessionGetBuilder<'_, '_, Handler> { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { Self { @@ -183,7 +183,7 @@ impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { } } -impl QoSBuilderTrait for GetBuilder<'_, '_, DefaultHandler> { +impl QoSBuilderTrait for SessionGetBuilder<'_, '_, DefaultHandler> { fn congestion_control(self, congestion_control: CongestionControl) -> Self { let qos = self.qos.congestion_control(congestion_control); Self { qos, ..self } @@ -200,7 +200,7 @@ impl QoSBuilderTrait for GetBuilder<'_, '_, DefaultHandler> { } } -impl ValueBuilderTrait for GetBuilder<'_, '_, Handler> { +impl ValueBuilderTrait for SessionGetBuilder<'_, '_, Handler> { fn encoding>(self, encoding: T) -> Self { let mut value = self.value.unwrap_or_default(); value.encoding = encoding.into(); @@ -227,7 +227,7 @@ impl ValueBuilderTrait for GetBuilder<'_, '_, Handler> { } } -impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { +impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { /// Receive the replies for this query with a callback. /// /// # Examples @@ -245,11 +245,11 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// # } /// ``` #[inline] - pub fn callback(self, callback: Callback) -> GetBuilder<'a, 'b, Callback> + pub fn callback(self, callback: Callback) -> SessionGetBuilder<'a, 'b, Callback> where Callback: Fn(Reply) + Send + Sync + 'static, { - let GetBuilder { + let SessionGetBuilder { session, selector, scope, @@ -265,7 +265,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { source_info, handler: _, } = self; - GetBuilder { + SessionGetBuilder { session, selector, scope, @@ -307,7 +307,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { pub fn callback_mut( self, callback: CallbackMut, - ) -> GetBuilder<'a, 'b, impl Fn(Reply) + Send + Sync + 'static> + ) -> SessionGetBuilder<'a, 'b, impl Fn(Reply) + Send + Sync + 'static> where CallbackMut: FnMut(Reply) + Send + Sync + 'static, { @@ -334,11 +334,11 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// # } /// ``` #[inline] - pub fn with(self, handler: Handler) -> GetBuilder<'a, 'b, Handler> + pub fn with(self, handler: Handler) -> SessionGetBuilder<'a, 'b, Handler> where Handler: IntoHandler<'static, Reply>, { - let GetBuilder { + let SessionGetBuilder { session, selector, scope, @@ -354,7 +354,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { source_info, handler: _, } = self; - GetBuilder { + SessionGetBuilder { session, selector, scope, @@ -372,7 +372,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { } } } -impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { +impl<'a, 'b, Handler> SessionGetBuilder<'a, 'b, Handler> { /// Change the target of the query. #[inline] pub fn target(self, target: QueryTarget) -> Self { @@ -436,7 +436,7 @@ pub enum ReplyKeyExpr { MatchingQuery, } -impl Resolvable for GetBuilder<'_, '_, Handler> +impl Resolvable for SessionGetBuilder<'_, '_, Handler> where Handler: IntoHandler<'static, Reply> + Send, Handler::Handler: Send, @@ -444,7 +444,7 @@ where type To = ZResult; } -impl Wait for GetBuilder<'_, '_, Handler> +impl Wait for SessionGetBuilder<'_, '_, Handler> where Handler: IntoHandler<'static, Reply> + Send, Handler::Handler: Send, @@ -472,7 +472,7 @@ where } } -impl IntoFuture for GetBuilder<'_, '_, Handler> +impl IntoFuture for SessionGetBuilder<'_, '_, Handler> where Handler: IntoHandler<'static, Reply> + Send, Handler::Handler: Send, diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 23f01e8723..a483a7ac81 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -72,7 +72,7 @@ use super::{ info::SessionInfo, key_expr::{KeyExpr, KeyExprInner}, publisher::{Priority, PublisherState}, - query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, + query::{ConsolidationMode, SessionGetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, selector::{Selector, TIME_RANGE_KEY}, @@ -796,7 +796,7 @@ impl Session { pub fn get<'a, 'b: 'a, TryIntoSelector>( &'a self, selector: TryIntoSelector, - ) -> GetBuilder<'a, 'b, DefaultHandler> + ) -> SessionGetBuilder<'a, 'b, DefaultHandler> where TryIntoSelector: TryInto>, >>::Error: Into, @@ -807,7 +807,7 @@ impl Session { Duration::from_millis(unwrap_or_default!(conf.queries_default_timeout())) }; let qos: QoS = request::ext::QoSType::REQUEST.into(); - GetBuilder { + SessionGetBuilder { session: self, selector, scope: Ok(None), diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 40181d96c5..ece41bc729 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -123,6 +123,8 @@ pub mod prelude; /// Zenoh core types pub mod core { + /// Zenoh message priority + pub use crate::api::publisher::Priority; #[allow(deprecated)] pub use zenoh_core::{AsyncResolve, SyncResolve}; pub use zenoh_core::{Resolvable, Resolve, Wait}; @@ -131,8 +133,6 @@ pub mod core { pub use zenoh_result::Error; /// A zenoh result. pub use zenoh_result::ZResult as Result; - /// Zenoh message priority - pub use crate::api::publisher::Priority; } /// [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. @@ -175,7 +175,7 @@ pub mod key_expr { }; } pub use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; - + #[zenoh_macros::unstable] pub use zenoh_keyexpr::SetIntersectionLevel; @@ -183,8 +183,8 @@ pub mod key_expr { // keyexpr format macro support #[zenoh_macros::unstable] pub mod format { - pub use zenoh_macros::{kedefine, keformat, kewrite}; pub use zenoh_keyexpr::format::*; + pub use zenoh_macros::{kedefine, keformat, kewrite}; pub mod macro_support { pub use zenoh_keyexpr::format::macro_support::*; } @@ -197,6 +197,7 @@ pub mod session { #[zenoh_macros::internal] pub use crate::api::session::{init, InitBuilder}; pub use crate::api::{ + query::SessionGetBuilder, builders::publisher::{SessionDeleteBuilder, SessionPutBuilder}, session::{open, OpenBuilder, Session, SessionDeclarations, SessionRef, Undeclarable}, }; @@ -289,9 +290,7 @@ pub mod query { pub use crate::api::query::ReplyKeyExpr; #[zenoh_macros::unstable] pub use crate::api::query::REPLY_KEY_EXPR_ANY_SEL_PARAM; - pub use crate::api::query::{ - ConsolidationMode, GetBuilder, QueryConsolidation, QueryTarget, Reply, - }; + pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget, Reply}; } /// Queryable primitives @@ -314,12 +313,9 @@ pub mod handlers { /// Scouting primitives pub mod scouting { - /// Constants and helpers for zenoh `whatami` flags. - pub use zenoh_protocol::core::WhatAmI; + pub use crate::api::scouting::{scout, Scout, ScoutBuilder}; /// A zenoh Hello message. pub use zenoh_protocol::scouting::Hello; - - pub use crate::api::scouting::{scout, Scout, ScoutBuilder}; } /// Liveliness primitives From 510bd102599202562f40346b82b59070be932d51 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 3 Jun 2024 14:37:46 +0200 Subject: [PATCH 356/598] compile fix --- zenohd/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 229352e5db..465f0b539e 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -20,7 +20,7 @@ use url::Url; use zenoh::{ config::{Config, EndPoint, ModeDependentValue, PermissionsConf, ValidatedMap}, core::Result, - scouting::WhatAmI, + config::WhatAmI, }; #[cfg(feature = "loki")] From e1cbb91f28522f0eea5ea11fbdb7ccd5ba44b022 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 3 Jun 2024 15:02:28 +0200 Subject: [PATCH 357/598] feat: add `RingChannel.recv_deadline` (#1075) * feat: add `RingChannel.recv_deadline` It's needed for Python binding. In fact, `recv` cannot be called in Python because it prevents Python signals, e.g. `KeyboardInterrupt` to be handled. The solution is to call `recv_deadline` within in a loop with a short deadline, e.g. 100ms. * fix: fix implementation --- zenoh/src/api/handlers/ring.rs | 59 +++++++++++++++++++++++++++++++--- 1 file changed, 55 insertions(+), 4 deletions(-) diff --git a/zenoh/src/api/handlers/ring.rs b/zenoh/src/api/handlers/ring.rs index 77ad867d36..18ca6f495c 100644 --- a/zenoh/src/api/handlers/ring.rs +++ b/zenoh/src/api/handlers/ring.rs @@ -13,7 +13,10 @@ // //! Callback handler trait. -use std::sync::{Arc, Weak}; +use std::{ + sync::{Arc, Weak}, + time::{Duration, Instant}, +}; use zenoh_collections::RingBuffer; use zenoh_result::ZResult; @@ -49,7 +52,9 @@ pub struct RingChannelHandler { } impl RingChannelHandler { - /// Receive from the ring channel. If the ring channel is empty, this call will block until an element is available in the channel. + /// Receive from the ring channel. + /// + /// If the ring channel is empty, this call will block until an element is available in the channel. pub fn recv(&self) -> ZResult { let Some(channel) = self.ring.upgrade() else { bail!("The ringbuffer has been deleted."); @@ -62,7 +67,51 @@ impl RingChannelHandler { } } - /// Receive from the ring channel. If the ring channel is empty, this call will block until an element is available in the channel. + /// Receive from the ring channel with a deadline. + /// + /// If the ring channel is empty, this call will block until an element is available in the channel, + /// or return `None` if the deadline has passed. + pub fn recv_deadline(&self, deadline: Instant) -> ZResult> { + let Some(channel) = self.ring.upgrade() else { + bail!("The ringbuffer has been deleted."); + }; + + loop { + if let Some(t) = channel.ring.lock().map_err(|e| zerror!("{}", e))?.pull() { + return Ok(Some(t)); + } + match channel.not_empty.recv_deadline(deadline) { + Ok(()) => {} + Err(flume::RecvTimeoutError::Timeout) => return Ok(None), + Err(err) => bail!("{}", err), + } + } + } + + /// Receive from the ring channel with a timeout. + /// + /// If the ring channel is empty, this call will block until an element is available in the channel, + /// or return `None` if the deadline has expired. + pub fn recv_timeout(&self, timeout: Duration) -> ZResult> { + let Some(channel) = self.ring.upgrade() else { + bail!("The ringbuffer has been deleted."); + }; + + loop { + if let Some(t) = channel.ring.lock().map_err(|e| zerror!("{}", e))?.pull() { + return Ok(Some(t)); + } + match channel.not_empty.recv_timeout(timeout) { + Ok(()) => {} + Err(flume::RecvTimeoutError::Timeout) => return Ok(None), + Err(err) => bail!("{}", err), + } + } + } + + /// Receive from the ring channel. + /// + /// If the ring channel is empty, this call will block until an element is available in the channel. pub async fn recv_async(&self) -> ZResult { let Some(channel) = self.ring.upgrade() else { bail!("The ringbuffer has been deleted."); @@ -79,7 +128,9 @@ impl RingChannelHandler { } } - /// Try to receive from the ring channel. If the ring channel is empty, this call will return immediately without blocking. + /// Try to receive from the ring channel. + /// + /// If the ring channel is empty, this call will return immediately without blocking. pub fn try_recv(&self) -> ZResult> { let Some(channel) = self.ring.upgrade() else { bail!("The ringbuffer has been deleted."); From 133bb94ab789898d1dd4111af3ef3a2b09c21770 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 3 Jun 2024 16:33:24 +0200 Subject: [PATCH 358/598] query and reply in query module --- zenoh-ext/src/publication_cache.rs | 3 ++- zenoh/src/api/queryable.rs | 12 ++++++++++-- zenoh/src/lib.rs | 18 +++++++++++++----- 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 1796668f1c..510e806a3f 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -23,7 +23,8 @@ use zenoh::{ internal::{bail, ResolveFuture, TerminatableTask}, key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}, prelude::Wait, - queryable::{Query, Queryable}, + query::Query, + queryable::Queryable, runtime::ZRuntime, sample::{Locality, Sample}, session::{SessionDeclarations, SessionRef}, diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 127d8cb281..8ad7d09f7b 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -149,8 +149,8 @@ impl Query { /// replying on a disjoint key expression will result in an error when resolving the reply. /// This api is for internal use only. #[inline(always)] - #[cfg(feature = "unstable")] - #[doc(hidden)] + #[zenoh_macros::unstable] + #[zenoh_macros::internal] pub fn reply_sample(&self, sample: Sample) -> ReplySample<'_> { ReplySample { query: self, @@ -272,21 +272,29 @@ impl fmt::Display for Query { } } +#[zenoh_macros::unstable] +#[zenoh_macros::internal] pub struct ReplySample<'a> { query: &'a Query, sample: Sample, } +#[zenoh_macros::unstable] +#[zenoh_macros::internal] impl Resolvable for ReplySample<'_> { type To = ZResult<()>; } +#[zenoh_macros::unstable] +#[zenoh_macros::internal] impl Wait for ReplySample<'_> { fn wait(self) -> ::To { self.query._reply_sample(self.sample) } } +#[zenoh_macros::unstable] +#[zenoh_macros::internal] impl IntoFuture for ReplySample<'_> { type Output = ::To; type IntoFuture = Ready<::To>; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index ece41bc729..6c0696586c 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -284,22 +284,30 @@ pub mod publisher { }; } -/// Query primitives +/// Get operation primitives +pub mod querier { + // Later the `Querier` with `get`` operation will be added here, in addition to `Session::get`, + // similarly to the `Publisher` with `put` operation and `Session::put` +} + +/// Query and Reply primitives pub mod query { #[zenoh_macros::unstable] pub use crate::api::query::ReplyKeyExpr; #[zenoh_macros::unstable] pub use crate::api::query::REPLY_KEY_EXPR_ANY_SEL_PARAM; pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget, Reply}; + pub use crate::api::queryable::Query; + pub use crate::api::queryable::{ReplyBuilder, ReplyBuilderDelete, ReplyBuilderPut, ReplyErrBuilder}; + #[zenoh_macros::unstable] + #[zenoh_macros::internal] + pub use crate::api::queryable::ReplySample; } /// Queryable primitives pub mod queryable { - #[zenoh_macros::unstable] - pub use crate::api::queryable::ReplySample; pub use crate::api::queryable::{ - Query, Queryable, QueryableBuilder, QueryableUndeclaration, ReplyBuilder, - ReplyBuilderDelete, ReplyBuilderPut, ReplyErrBuilder, + Queryable, QueryableBuilder, QueryableUndeclaration, }; } From 128373514d094ae5bb3fc81acd1e9eb7cdde25ae Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 3 Jun 2024 17:02:30 +0200 Subject: [PATCH 359/598] feat: add `zenoh_util::init_log_with_callback` It will notably use in bindings, to redirect tracing events into native logs. --- commons/zenoh-util/src/std_only/log.rs | 86 +++++++++++++++++++++++++- 1 file changed, 83 insertions(+), 3 deletions(-) diff --git a/commons/zenoh-util/src/std_only/log.rs b/commons/zenoh-util/src/std_only/log.rs index 07d66d9233..d9498b9cb3 100644 --- a/commons/zenoh-util/src/std_only/log.rs +++ b/commons/zenoh-util/src/std_only/log.rs @@ -11,9 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use tracing_subscriber::EnvFilter; +use std::{fmt, thread, thread::ThreadId}; -/// This is an utility function to enable the tracing formatting subscriber from +use tracing::{field::Field, span, Event, Subscriber}; +use tracing_subscriber::{ + layer::{Context, SubscriberExt}, + registry::LookupSpan, + EnvFilter, +}; + +/// This is a utility function to enable the tracing formatting subscriber from /// the `RUST_LOG` environment variable. If `RUST_LOG` is not set, then logging is not enabled. /// /// # Safety @@ -27,7 +34,7 @@ pub fn try_init_log_from_env() { } } -/// This is an utility function to enable the tracing formatting subscriber from +/// This is a utility function to enable the tracing formatting subscriber from /// the environment variable. If `RUST_LOG` is not set, then fallback directives are used. /// /// # Safety @@ -55,6 +62,79 @@ fn init_env_filter(env_filter: EnvFilter) { let _ = tracing::subscriber::set_global_default(subscriber); } +pub struct LogRecord { + pub target: String, + pub level: tracing::Level, + pub file: Option<&'static str>, + pub line: Option, + pub thread_id: ThreadId, + pub thread_name: Option, + pub message: Option, + pub attributes: Vec<(&'static str, String)>, +} + +#[derive(Clone)] +struct SpanFields(Vec<(&'static str, String)>); + +struct Layer(F); + +impl tracing_subscriber::Layer for Layer +where + S: Subscriber + for<'a> LookupSpan<'a>, + F: Fn(LogRecord) + 'static, +{ + fn on_new_span(&self, attrs: &span::Attributes<'_>, id: &span::Id, ctx: Context<'_, S>) { + let span = ctx.span(id).unwrap(); + let mut extensions = span.extensions_mut(); + let mut fields = vec![]; + attrs.record(&mut |field: &Field, value: &dyn fmt::Debug| { + fields.push((field.name(), format!("{value:?}"))) + }); + extensions.insert(SpanFields(fields)); + } + fn on_record(&self, id: &span::Id, values: &span::Record<'_>, ctx: Context<'_, S>) { + let span = ctx.span(id).unwrap(); + let mut extensions = span.extensions_mut(); + let fields = extensions.get_mut::().unwrap(); + values.record(&mut |field: &Field, value: &dyn fmt::Debug| { + fields.0.push((field.name(), format!("{value:?}"))) + }); + } + fn on_event(&self, event: &Event<'_>, ctx: Context<'_, S>) { + let thread = thread::current(); + let mut record = LogRecord { + target: event.metadata().target().into(), + level: *event.metadata().level(), + file: event.metadata().file(), + line: event.metadata().line(), + thread_id: thread.id(), + thread_name: thread.name().map(Into::into), + message: None, + attributes: vec![], + }; + if let Some(scope) = ctx.event_scope(event) { + for span in scope.from_root() { + let extensions = span.extensions(); + let fields = extensions.get::().unwrap(); + record.attributes.extend(fields.0.iter().cloned()); + } + } + event.record(&mut |field: &Field, value: &dyn fmt::Debug| { + if field.name() == "message" { + record.message = Some(format!("{value:?}")); + } else { + record.attributes.push((field.name(), format!("{value:?}"))) + } + }); + self.0(record); + } +} + +pub fn init_log_with_callback(cb: impl Fn(LogRecord) + Send + Sync + 'static) { + let subscriber = tracing_subscriber::registry().with(Layer(cb)); + let _ = tracing::subscriber::set_global_default(subscriber); +} + #[cfg(feature = "test")] // Used to verify memory leaks for valgrind CI. // `EnvFilter` internally uses a static reference that is not cleaned up yielding to false positive in valgrind. From a1c058164f6aefa74392a813cd8ff6fd9a6a0575 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 3 Jun 2024 17:19:09 +0200 Subject: [PATCH 360/598] feat: expose `zenoh::try_init_log_from_env` from `zenoh_util` --- examples/examples/z_alloc_shm.rs | 2 +- examples/examples/z_delete.rs | 2 +- examples/examples/z_forward.rs | 2 +- examples/examples/z_get.rs | 2 +- examples/examples/z_get_liveliness.rs | 2 +- examples/examples/z_get_shm.rs | 2 +- examples/examples/z_info.rs | 2 +- examples/examples/z_liveliness.rs | 2 +- examples/examples/z_ping.rs | 2 +- examples/examples/z_ping_shm.rs | 2 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_pub.rs | 2 +- examples/examples/z_pub_shm.rs | 2 +- examples/examples/z_pub_shm_thr.rs | 2 +- examples/examples/z_pub_thr.rs | 2 +- examples/examples/z_pull.rs | 2 +- examples/examples/z_put.rs | 2 +- examples/examples/z_put_float.rs | 2 +- examples/examples/z_queryable.rs | 2 +- examples/examples/z_queryable_shm.rs | 2 +- examples/examples/z_scout.rs | 2 +- examples/examples/z_storage.rs | 2 +- examples/examples/z_sub.rs | 2 +- examples/examples/z_sub_liveliness.rs | 2 +- examples/examples/z_sub_shm.rs | 2 +- examples/examples/z_sub_thr.rs | 2 +- zenoh-ext/examples/examples/z_member.rs | 2 +- zenoh-ext/examples/examples/z_pub_cache.rs | 2 +- zenoh-ext/examples/examples/z_query_sub.rs | 2 +- zenoh-ext/examples/examples/z_view_size.rs | 2 +- zenoh/src/lib.rs | 15 +++++++++------ zenoh/tests/acl.rs | 2 +- zenoh/tests/interceptors.rs | 6 +++--- zenoh/tests/open_time.rs | 22 +++++++++++----------- zenoh/tests/routing.rs | 8 ++++---- zenoh/tests/session.rs | 4 ++-- zenoh/tests/shm.rs | 4 ++-- zenoh/tests/unicity.rs | 4 ++-- 38 files changed, 64 insertions(+), 61 deletions(-) diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index abdbb2e443..4423e0b07a 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -23,7 +23,7 @@ use zenoh::{ #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); run().await.unwrap() } diff --git a/examples/examples/z_delete.rs b/examples/examples/z_delete.rs index 294d1b850a..090aadac48 100644 --- a/examples/examples/z_delete.rs +++ b/examples/examples/z_delete.rs @@ -18,7 +18,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr) = parse_args(); diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index deb82a2a7f..be9df7e2b0 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -19,7 +19,7 @@ use zenoh_ext::SubscriberForward; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr, forward) = parse_args(); diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index e04fc8bcf6..d4fc416f9c 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -20,7 +20,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, selector, value, target, timeout) = parse_args(); diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 150308aea4..53f7abc92a 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -20,7 +20,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr, timeout) = parse_args(); diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index bfb9213ab5..942ec0e34e 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -31,7 +31,7 @@ const N: usize = 10; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (mut config, selector, mut value, target, timeout) = parse_args(); diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index 281532e236..9d3f1bb223 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -18,7 +18,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let config = parse_args(); diff --git a/examples/examples/z_liveliness.rs b/examples/examples/z_liveliness.rs index 71b1fe4e4e..7bc8e857fe 100644 --- a/examples/examples/z_liveliness.rs +++ b/examples/examples/z_liveliness.rs @@ -18,7 +18,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr) = parse_args(); diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 56ba47b7f5..96454da614 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -19,7 +19,7 @@ use zenoh_examples::CommonArgs; fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, warmup, size, n, express) = parse_args(); let session = zenoh::open(config).wait().unwrap(); diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index a88a9f59a6..53a887b4fd 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -26,7 +26,7 @@ use zenoh_examples::CommonArgs; fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (mut config, warmup, size, n) = parse_args(); diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index ecf2aa1643..6a1b8580c7 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -17,7 +17,7 @@ use zenoh_examples::CommonArgs; fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (mut config, express) = parse_args(); diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 6812246cfa..74a9c2898e 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -20,7 +20,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr, value, attachment) = parse_args(); diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 93ce1df553..9c4e64c496 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -28,7 +28,7 @@ const N: usize = 10; #[tokio::main] async fn main() -> Result<(), ZError> { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (mut config, path, value) = parse_args(); diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 2d52668ac9..37c9ac93f2 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -24,7 +24,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (mut config, sm_size, size) = parse_args(); // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm_thr` to operate diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 8ea7226c8a..1a2a9cc177 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -24,7 +24,7 @@ use zenoh_examples::CommonArgs; fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let args = Args::parse(); let mut prio = Priority::DEFAULT; diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 3127e76c14..6716ef8cc5 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -20,7 +20,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr, size, interval) = parse_args(); diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index f56fbf2c8c..4fb6e0ca2a 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -18,7 +18,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr, value) = parse_args(); diff --git a/examples/examples/z_put_float.rs b/examples/examples/z_put_float.rs index 234579b8d5..89100b3731 100644 --- a/examples/examples/z_put_float.rs +++ b/examples/examples/z_put_float.rs @@ -18,7 +18,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr, value) = parse_args(); diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index eb950766ab..ede3eff635 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -18,7 +18,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (mut config, key_expr, value, complete) = parse_args(); diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs index 5cc8e301d3..ec2058c897 100644 --- a/examples/examples/z_queryable_shm.rs +++ b/examples/examples/z_queryable_shm.rs @@ -28,7 +28,7 @@ const N: usize = 10; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (mut config, key_expr, value, complete) = parse_args(); diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index f099beae46..1d485991fd 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -16,7 +16,7 @@ use zenoh::{config::WhatAmI, scout, Config}; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); println!("Scouting..."); let receiver = scout(WhatAmI::Peer | WhatAmI::Router, Config::default()) diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 86d73da2bb..83a2dee66d 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -28,7 +28,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr, complete) = parse_args(); diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index 95cd5f8988..47432cf9cb 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -18,7 +18,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (mut config, key_expr) = parse_args(); diff --git a/examples/examples/z_sub_liveliness.rs b/examples/examples/z_sub_liveliness.rs index 86420381e1..bb91c9f491 100644 --- a/examples/examples/z_sub_liveliness.rs +++ b/examples/examples/z_sub_liveliness.rs @@ -18,7 +18,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr) = parse_args(); diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 04ba8e9753..4cc797d8b4 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -18,7 +18,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (mut config, key_expr) = parse_args(); diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index bee15ada2b..0e9f53f36b 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -69,7 +69,7 @@ impl Drop for Stats { fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (mut config, m, n) = parse_args(); diff --git a/zenoh-ext/examples/examples/z_member.rs b/zenoh-ext/examples/examples/z_member.rs index 5ddd6e3141..90129ca21e 100644 --- a/zenoh-ext/examples/examples/z_member.rs +++ b/zenoh-ext/examples/examples/z_member.rs @@ -19,7 +19,7 @@ use zenoh_ext::group::*; #[tokio::main] async fn main() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let z = Arc::new(zenoh::open(Config::default()).await.unwrap()); let member = Member::new(z.zid().to_string()) .unwrap() diff --git a/zenoh-ext/examples/examples/z_pub_cache.rs b/zenoh-ext/examples/examples/z_pub_cache.rs index 56de7b2fbc..0c5a60751b 100644 --- a/zenoh-ext/examples/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/examples/z_pub_cache.rs @@ -24,7 +24,7 @@ use zenoh_ext_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr, value, history, prefix, complete) = parse_args(); diff --git a/zenoh-ext/examples/examples/z_query_sub.rs b/zenoh-ext/examples/examples/z_query_sub.rs index 513ac3ca58..c819a2a831 100644 --- a/zenoh-ext/examples/examples/z_query_sub.rs +++ b/zenoh-ext/examples/examples/z_query_sub.rs @@ -19,7 +19,7 @@ use zenoh_ext_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr, query) = parse_args(); diff --git a/zenoh-ext/examples/examples/z_view_size.rs b/zenoh-ext/examples/examples/z_view_size.rs index fd8220d506..a38120cfb4 100644 --- a/zenoh-ext/examples/examples/z_view_size.rs +++ b/zenoh-ext/examples/examples/z_view_size.rs @@ -20,7 +20,7 @@ use zenoh_ext_examples::CommonArgs; #[tokio::main] async fn main() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, group_name, id, size, timeout) = parse_args(); diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c033c7feee..5446dab78d 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -112,12 +112,15 @@ pub const FEATURES: &str = zenoh_util::concat_enabled_features!( ); #[doc(inline)] -pub use crate::{ - config::Config, - core::{Error, Result}, - key_expr::{kedefine, keformat, kewrite}, - scouting::scout, - session::{open, Session}, +pub use { + crate::{ + config::Config, + core::{Error, Result}, + key_expr::{kedefine, keformat, kewrite}, + scouting::scout, + session::{open, Session}, + }, + zenoh_util::try_init_log_from_env, }; pub mod prelude; diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index b78a9ac888..31294b1359 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -34,7 +34,7 @@ mod test { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn test_acl() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); test_pub_sub_deny().await; test_pub_sub_allow().await; test_pub_sub_deny_then_allow().await; diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 32001f1875..3ee2c51828 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -182,7 +182,7 @@ fn downsampling_by_keyexpr_impl(flow: InterceptorFlow) { #[test] fn downsampling_by_keyexpr() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); downsampling_by_keyexpr_impl(InterceptorFlow::Ingress); downsampling_by_keyexpr_impl(InterceptorFlow::Egress); } @@ -235,7 +235,7 @@ fn downsampling_by_interface_impl(flow: InterceptorFlow) { #[cfg(unix)] #[test] fn downsampling_by_interface() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); downsampling_by_interface_impl(InterceptorFlow::Ingress); downsampling_by_interface_impl(InterceptorFlow::Egress); } @@ -243,7 +243,7 @@ fn downsampling_by_interface() { #[test] #[should_panic(expected = "unknown variant `down`")] fn downsampling_config_error_wrong_strategy() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let mut config = Config::default(); config diff --git a/zenoh/tests/open_time.rs b/zenoh/tests/open_time.rs index 87c080bc97..dec41d1558 100644 --- a/zenoh/tests/open_time.rs +++ b/zenoh/tests/open_time.rs @@ -135,7 +135,7 @@ async fn time_lowlatency_open(endpoint: &EndPoint, mode: WhatAmI) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_tcp_only_open() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14000).parse().unwrap(); time_universal_open(&endpoint, WhatAmI::Client).await; } @@ -144,7 +144,7 @@ async fn time_tcp_only_open() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_tcp_only_with_lowlatency_open() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14100).parse().unwrap(); time_lowlatency_open(&endpoint, WhatAmI::Client).await; } @@ -153,7 +153,7 @@ async fn time_tcp_only_with_lowlatency_open() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_udp_only_open() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 14010).parse().unwrap(); time_universal_open(&endpoint, WhatAmI::Client).await; } @@ -162,7 +162,7 @@ async fn time_udp_only_open() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_udp_only_with_lowlatency_open() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 14110).parse().unwrap(); time_lowlatency_open(&endpoint, WhatAmI::Client).await; } @@ -171,7 +171,7 @@ async fn time_udp_only_with_lowlatency_open() { // #[tokio::test(flavor = "multi_thread", worker_threads = 4)] // #[ignore] // async fn time_ws_only_open() { -// zenoh_util::try_init_log_from_env(); +// zenoh::try_init_log_from_env(); // let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14020).parse().unwrap(); // time_universal_open(&endpoint, WhatAmI::Client).await; // } @@ -180,7 +180,7 @@ async fn time_udp_only_with_lowlatency_open() { // #[tokio::test(flavor = "multi_thread", worker_threads = 4)] // #[ignore] // async fn time_ws_only_with_lowlatency_open() { -// zenoh_util::try_init_log_from_env(); +// zenoh::try_init_log_from_env(); // let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14120).parse().unwrap(); // time_lowlatency_open(&endpoint, WhatAmI::Client).await; // } @@ -189,7 +189,7 @@ async fn time_udp_only_with_lowlatency_open() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unixpipe_only_open() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let endpoint: EndPoint = "unixpipe/time_unixpipe_only_open".parse().unwrap(); time_universal_open(&endpoint, WhatAmI::Client).await; } @@ -198,7 +198,7 @@ async fn time_unixpipe_only_open() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unixpipe_only_with_lowlatency_open() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let endpoint: EndPoint = "unixpipe/time_unixpipe_only_with_lowlatency_open" .parse() .unwrap(); @@ -209,7 +209,7 @@ async fn time_unixpipe_only_with_lowlatency_open() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unix_only_open() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let f1 = "zenoh-test-unix-socket-9-open.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); @@ -224,7 +224,7 @@ async fn time_unix_only_open() { async fn time_tls_only_open() { use zenoh_link::tls::config::*; - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key @@ -420,7 +420,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_vsock_only_open() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:18000".parse().unwrap(); time_lowlatency_open(&endpoint, WhatAmI::Client).await; } diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 123ff24201..b72f2d560c 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -362,7 +362,7 @@ impl Recipe { // And the message transmission should work even if the common node disappears after a while. #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn gossip() -> Result<()> { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let locator = String::from("tcp/127.0.0.1:17446"); let ke = String::from("testKeyExprGossip"); @@ -430,7 +430,7 @@ async fn gossip() -> Result<()> { // Simulate two peers connecting to a router but not directly reachable to each other can exchange messages via the brokering by the router. #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn static_failover_brokering() -> Result<()> { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let locator = String::from("tcp/127.0.0.1:17449"); let ke = String::from("testKeyExprStaticFailoverBrokering"); let msg_size = 8; @@ -491,7 +491,7 @@ async fn static_failover_brokering() -> Result<()> { // Total cases = 2 x 4 x 6 = 48 #[tokio::test(flavor = "multi_thread", worker_threads = 9)] async fn three_node_combination() -> Result<()> { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let modes = [WhatAmI::Peer, WhatAmI::Client]; let delay_in_secs = [ (0, 1, 2), @@ -622,7 +622,7 @@ async fn three_node_combination() -> Result<()> { // Total cases = 2 x 8 = 16 #[tokio::test(flavor = "multi_thread", worker_threads = 8)] async fn two_node_combination() -> Result<()> { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); #[derive(Clone, Copy)] struct IsFirstListen(bool); diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 5201be24a2..3c22d16b3f 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -245,7 +245,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_session_unicast() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:17447"]).await; test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; test_session_qryrep(&peer01, &peer02, Reliability::Reliable).await; @@ -254,7 +254,7 @@ async fn zenoh_session_unicast() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_session_multicast() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (peer01, peer02) = open_session_multicast("udp/224.0.0.1:17448", "udp/224.0.0.1:17448").await; test_session_pubsub(&peer01, &peer02, Reliability::BestEffort).await; diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index a969af4dbe..81e5fdece1 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -182,7 +182,7 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re fn zenoh_shm_unicast() { tokio::runtime::Runtime::new().unwrap().block_on(async { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:19447"]).await; test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; @@ -194,7 +194,7 @@ fn zenoh_shm_unicast() { fn zenoh_shm_multicast() { tokio::runtime::Runtime::new().unwrap().block_on(async { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (peer01, peer02) = open_session_multicast("udp/224.0.0.1:19448", "udp/224.0.0.1:19448").await; diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 70a70c5dce..35725a1abb 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -257,7 +257,7 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_unicity_p2p() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (s01, s02, s03) = open_p2p_sessions().await; test_unicity_pubsub(&s01, &s02, &s03).await; @@ -267,7 +267,7 @@ async fn zenoh_unicity_p2p() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_unicity_brokered() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let r = open_router_session().await; let (s01, s02, s03) = open_client_sessions().await; From fcaf18596bca14d5f65e5e3285918a8dfbb36002 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 3 Jun 2024 18:03:48 +0200 Subject: [PATCH 361/598] keyexpr methods unstable/internal --- commons/zenoh-keyexpr/Cargo.toml | 2 + .../zenoh-keyexpr/src/key_expr/borrowed.rs | 21 ++ .../src/keyexpr_tree/arc_tree.rs | 20 +- .../src/keyexpr_tree/box_tree.rs | 22 +- commons/zenoh-macros/src/lib.rs | 279 ------------------ .../src/replica/storage.rs | 2 +- plugins/zenoh-plugin-trait/Cargo.toml | 2 +- zenoh/Cargo.toml | 4 +- zenohd/src/main.rs | 1 - 9 files changed, 48 insertions(+), 305 deletions(-) diff --git a/commons/zenoh-keyexpr/Cargo.toml b/commons/zenoh-keyexpr/Cargo.toml index 41456af1ec..2f90386e4a 100644 --- a/commons/zenoh-keyexpr/Cargo.toml +++ b/commons/zenoh-keyexpr/Cargo.toml @@ -26,6 +26,8 @@ description = "Internal crate for zenoh." [features] default = ["std"] std = ["zenoh-result/std", "dep:schemars"] +internal = [] +unstable = [] [dependencies] keyed-set = { workspace = true } diff --git a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs index fd87cef55f..b5632a6cc6 100644 --- a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs +++ b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs @@ -126,7 +126,12 @@ impl keyexpr { } /// Returns `true` if `self` contains any wildcard character (`**` or `$*`). + #[cfg(feature = "internal")] + #[cfg(feature = "unstable")] pub fn is_wild(&self) -> bool { + self.is_wild_impl() + } + pub(crate) fn is_wild_impl(&self) -> bool { self.0.contains(super::SINGLE_WILD as char) } @@ -163,7 +168,12 @@ impl keyexpr { /// None, /// keyexpr::new("dem$*").unwrap().get_nonwild_prefix()); /// ``` + #[cfg(feature = "internal")] + #[cfg(feature = "unstable")] pub fn get_nonwild_prefix(&self) -> Option<&keyexpr> { + self.get_nonwild_prefix_impl() + } + fn get_nonwild_prefix_impl(&self) -> Option<&keyexpr> { match self.0.find('*') { Some(i) => match self.0[..i].rfind('/') { Some(j) => unsafe { Some(keyexpr::from_str_unchecked(&self.0[..j])) }, @@ -227,7 +237,12 @@ impl keyexpr { /// keyexpr::new("demo/example/test/**").unwrap().strip_prefix(keyexpr::new("not/a/prefix").unwrap()).is_empty() /// ); /// ``` + #[cfg(feature = "internal")] + #[cfg(feature = "unstable")] pub fn strip_prefix(&self, prefix: &Self) -> Vec<&keyexpr> { + self.strip_prefix_impl(prefix) + } + fn strip_prefix_impl(&self, prefix: &Self) -> Vec<&keyexpr> { let mut result = alloc::vec![]; 'chunks: for i in (0..=self.len()).rev() { if if i == self.len() { @@ -292,7 +307,13 @@ impl keyexpr { pub unsafe fn from_slice_unchecked(s: &[u8]) -> &Self { core::mem::transmute(s) } + + #[cfg(feature = "internal")] + #[cfg(feature = "unstable")] pub const fn chunks(&self) -> Chunks { + self.chunks_impl() + } + pub(crate) const fn chunks_impl(&self) -> Chunks { Chunks { inner: self.as_str(), } diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs index e800697bef..ef29ccc0f8 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs @@ -148,7 +148,7 @@ where // tags{ketree.arc.node} fn node(&'a self, token: &'a Token, at: &keyexpr) -> Option { let inner = ketree_borrow(&self.inner, token); - let mut chunks = at.chunks(); + let mut chunks = at.chunks_impl(); let mut node = inner.children.child_at(chunks.next().unwrap())?; for chunk in chunks { let as_node: &Arc< @@ -166,7 +166,7 @@ where // tags{ketree.arc.node.or_create} fn node_or_create(&'a self, token: &'a mut Token, at: &keyexpr) -> Self::NodeMut { let inner = ketree_borrow_mut(&self.inner, token); - if at.is_wild() { + if at.is_wild_impl() { inner.wildness.set(true); } let inner: &mut KeArcTreeInner = @@ -182,7 +182,7 @@ where token, )) }; - let mut chunks = at.chunks(); + let mut chunks = at.chunks_impl(); let mut node = inner .children .entry(chunks.next().unwrap()) @@ -258,7 +258,7 @@ where // tags{ketree.arc.intersecting} fn intersecting_nodes(&'a self, token: &'a Token, key: &'a keyexpr) -> Self::Intersection { let inner = ketree_borrow(&self.inner, token); - if inner.wildness.get() || key.is_wild() { + if inner.wildness.get() || key.is_wild_impl() { IterOrOption::Iter(TokenPacker { iter: Intersection::new(&inner.children, key), token, @@ -287,7 +287,7 @@ where key: &'a keyexpr, ) -> Self::IntersectionMut { let inner = ketree_borrow(&self.inner, token); - if inner.wildness.get() || key.is_wild() { + if inner.wildness.get() || key.is_wild_impl() { IterOrOption::Iter(TokenPacker { iter: Intersection::new(unsafe { core::mem::transmute(&inner.children) }, key), token, @@ -313,7 +313,7 @@ where // tags{ketree.arc.included} fn included_nodes(&'a self, token: &'a Token, key: &'a keyexpr) -> Self::Inclusion { let inner = ketree_borrow(&self.inner, token); - if inner.wildness.get() || key.is_wild() { + if inner.wildness.get() || key.is_wild_impl() { IterOrOption::Iter(TokenPacker { iter: Inclusion::new(&inner.children, key), token, @@ -338,7 +338,7 @@ where // tags{ketree.arc.included.mut} fn included_nodes_mut(&'a self, token: &'a mut Token, key: &'a keyexpr) -> Self::InclusionMut { let inner = ketree_borrow(&self.inner, token); - if inner.wildness.get() || key.is_wild() { + if inner.wildness.get() || key.is_wild_impl() { unsafe { IterOrOption::Iter(TokenPacker { iter: Inclusion::new(core::mem::transmute(&inner.children), key), @@ -366,7 +366,7 @@ where // tags{ketree.arc.including} fn nodes_including(&'a self, token: &'a Token, key: &'a keyexpr) -> Self::Includer { let inner = ketree_borrow(&self.inner, token); - if inner.wildness.get() || key.is_wild() { + if inner.wildness.get() || key.is_wild_impl() { IterOrOption::Iter(TokenPacker { iter: Includer::new(&inner.children, key), token, @@ -391,7 +391,7 @@ where // tags{ketree.arc.including.mut} fn nodes_including_mut(&'a self, token: &'a mut Token, key: &'a keyexpr) -> Self::IncluderMut { let inner = ketree_borrow(&self.inner, token); - if inner.wildness.get() || key.is_wild() { + if inner.wildness.get() || key.is_wild_impl() { unsafe { IterOrOption::Iter(TokenPacker { iter: Includer::new(core::mem::transmute(&inner.children), key), @@ -581,7 +581,7 @@ where }); if predicate(self) && self.children.is_empty() { result = PruneResult::Delete - } else if self.chunk.is_wild() { + } else if self.chunk.is_wild_impl() { result = PruneResult::Wild } result diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs index 1e4846e20a..c72047ee03 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs @@ -75,7 +75,7 @@ where { type Node = KeyExprTreeNode; fn node(&'a self, at: &keyexpr) -> Option<&'a Self::Node> { - let mut chunks = at.chunks(); + let mut chunks = at.chunks_impl(); let mut node = self.children.child_at(chunks.next().unwrap())?; for chunk in chunks { node = node.as_node().children.child_at(chunk)?; @@ -94,7 +94,7 @@ where &'a Self::Node, >; fn intersecting_nodes(&'a self, ke: &'a keyexpr) -> Self::Intersection { - if self.wildness.get() || ke.is_wild() { + if self.wildness.get() || ke.is_wild_impl() { Intersection::new(&self.children, ke).into() } else { let node = self.node(ke); @@ -108,7 +108,7 @@ where &'a Self::Node, >; fn included_nodes(&'a self, ke: &'a keyexpr) -> Self::Inclusion { - if self.wildness.get() || ke.is_wild() { + if self.wildness.get() || ke.is_wild_impl() { Inclusion::new(&self.children, ke).into() } else { let node = self.node(ke); @@ -122,7 +122,7 @@ where &'a Self::Node, >; fn nodes_including(&'a self, ke: &'a keyexpr) -> Self::Includer { - if self.wildness.get() || ke.is_wild() { + if self.wildness.get() || ke.is_wild_impl() { Includer::new(&self.children, ke).into() } else { let node = self.node(ke); @@ -145,7 +145,7 @@ where > + 'a, { fn node_mut<'b>(&'b mut self, at: &keyexpr) -> Option<&'b mut Self::Node> { - let mut chunks = at.chunks(); + let mut chunks = at.chunks_impl(); let mut node = self.children.child_at_mut(chunks.next().unwrap())?; for chunk in chunks { node = node.as_node_mut().children.child_at_mut(chunk)?; @@ -169,10 +169,10 @@ where } fn node_mut_or_create<'b>(&'b mut self, at: &keyexpr) -> &'b mut Self::Node { - if at.is_wild() { + if at.is_wild_impl() { self.wildness.set(true); } - let mut chunks = at.chunks(); + let mut chunks = at.chunks_impl(); let mut node = self .children .entry(chunks.next().unwrap()) @@ -210,7 +210,7 @@ where &'a mut Self::Node, >; fn intersecting_nodes_mut(&'a mut self, ke: &'a keyexpr) -> Self::IntersectionMut { - if self.wildness.get() || ke.is_wild() { + if self.wildness.get() || ke.is_wild_impl() { IntersectionMut::new(&mut self.children, ke).into() } else { let node = self.node_mut(ke); @@ -223,7 +223,7 @@ where &'a mut Self::Node, >; fn included_nodes_mut(&'a mut self, ke: &'a keyexpr) -> Self::InclusionMut { - if self.wildness.get() || ke.is_wild() { + if self.wildness.get() || ke.is_wild_impl() { InclusionMut::new(&mut self.children, ke).into() } else { let node = self.node_mut(ke); @@ -236,7 +236,7 @@ where &'a mut Self::Node, >; fn nodes_including_mut(&'a mut self, ke: &'a keyexpr) -> Self::IncluderMut { - if self.wildness.get() || ke.is_wild() { + if self.wildness.get() || ke.is_wild_impl() { IncluderMut::new(&mut self.children, ke).into() } else { let node = self.node_mut(ke); @@ -365,7 +365,7 @@ where }); if predicate(self) && self.children.is_empty() { result = PruneResult::Delete - } else if self.chunk.is_wild() { + } else if self.chunk.is_wild_impl() { result = PruneResult::Wild } result diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index 3b3b14cd3d..3649d633af 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -209,285 +209,6 @@ fn is_doc_attribute(attr: &Attribute) -> bool { .is_some_and(|ident| &ident.to_string() == "doc") } -fn keformat_support(source: &str) -> proc_macro2::TokenStream { - let format = match KeFormat::new(&source) { - Ok(format) => format, - Err(e) => panic!("{}", e), - }; - let specs = unsafe { macro_support::specs(&format) }; - let len = specs.len(); - let setters = specs.iter().map(|spec| { - let id = &source[spec.spec_start..(spec.spec_start + spec.id_end as usize)]; - let set_id = quote::format_ident!("{}", id); - quote! { - pub fn #set_id (&mut self, value: S) -> Result<&mut Self, ::zenoh::key_expr::format::FormatSetError> { - match self.0.set(#id, value) { - Ok(_) => Ok(self), - Err(e) => Err(e) - } - } - } - }); - let getters = specs.iter().map(|spec| { - let source = &source[spec.spec_start..spec.spec_end]; - let id = &source[..(spec.id_end as usize)]; - let get_id = quote::format_ident!("{}", id); - let pattern = unsafe { - keyexpr::from_str_unchecked(if spec.pattern_end != u16::MAX { - &source[(spec.id_end as usize + 1)..(spec.spec_start + spec.pattern_end as usize)] - } else { - &source[(spec.id_end as usize + 1)..] - }) - }; - let doc = format!("Get the parsed value for `{id}`.\n\nThis value is guaranteed to be a valid key expression intersecting with `{pattern}`"); - if pattern.as_bytes() == b"**" { - quote! { - #[doc = #doc] - /// Since the pattern is `**`, this may return `None` if the pattern didn't consume any chunks. - pub fn #get_id (&self) -> Option<& ::zenoh::key_expr::keyexpr> { - unsafe { - let s =self._0.get(#id).unwrap_unchecked(); - (!s.is_empty()).then(|| ::zenoh::key_expr::keyexpr::from_str_unchecked(s)) - } - } - } - } else { - quote! { - #[doc = #doc] - pub fn #get_id (&self) -> &::zenoh::key_expr::keyexpr { - unsafe {::zenoh::key_expr::keyexpr::from_str_unchecked(self._0.get(#id).unwrap_unchecked())} - } - } - } - }); - let segments = specs.iter().map(|spec| { - let SegmentBuilder { - segment_start, - prefix_end, - spec_start, - id_end, - pattern_end, - spec_end, - segment_end, - } = spec; - quote! { - ::zenoh::key_expr::format::macro_support::SegmentBuilder { - segment_start: #segment_start, - prefix_end: #prefix_end, - spec_start: #spec_start, - id_end: #id_end, - pattern_end: #pattern_end, - spec_end: #spec_end, - segment_end: #segment_end, - }, - } - }); - - let format_doc = format!("The `{source}` format, as a zero-sized-type."); - let formatter_doc = format!("And instance of a formatter for `{source}`."); - - quote! { - use ::zenoh::core::Result as ZResult; - const FORMAT_INNER: ::zenoh::key_expr::format::KeFormat<'static, [::zenoh::key_expr::format::Segment<'static>; #len]> = unsafe { - ::zenoh::key_expr::format::macro_support::const_new(#source, [#(#segments)*]) - }; - #[doc = #format_doc] - #[derive(Copy, Clone, Hash)] - pub struct Format; - - #[doc = #formatter_doc] - #[derive(Clone)] - pub struct Formatter(::zenoh::key_expr::format::KeFormatter<'static, [::zenoh::key_expr::format::Segment<'static>; #len]>); - impl ::core::fmt::Debug for Format { - fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - ::core::fmt::Debug::fmt(&FORMAT_INNER, f) - } - } - impl ::core::fmt::Debug for Formatter { - fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - ::core::fmt::Debug::fmt(&self.0, f) - } - } - impl ::core::fmt::Display for Format { - fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - ::core::fmt::Display::fmt(&FORMAT_INNER, f) - } - } - impl ::core::ops::Deref for Format { - type Target = ::zenoh::key_expr::format::KeFormat<'static, [::zenoh::key_expr::format::Segment<'static>; #len]>; - fn deref(&self) -> &Self::Target {&FORMAT_INNER} - } - impl ::core::ops::Deref for Formatter { - type Target = ::zenoh::key_expr::format::KeFormatter<'static, [::zenoh::key_expr::format::Segment<'static>; #len]>; - fn deref(&self) -> &Self::Target {&self.0} - } - impl ::core::ops::DerefMut for Formatter { - fn deref_mut(&mut self) -> &mut Self::Target {&mut self.0} - } - impl Formatter { - #(#setters)* - } - pub struct Parsed<'s>{_0: ::zenoh::key_expr::format::Parsed<'s, [::zenoh::key_expr::format::Segment<'s>; #len]>} - impl<'s> ::core::ops::Deref for Parsed<'s> { - type Target = ::zenoh::key_expr::format::Parsed<'s, [::zenoh::key_expr::format::Segment<'s>; #len]>; - fn deref(&self) -> &Self::Target {&self._0} - } - impl Parsed<'_> { - #(#getters)* - } - impl Format { - pub fn formatter() -> Formatter { - Formatter(Format.formatter()) - } - pub fn parse<'s>(target: &'s ::zenoh::key_expr::keyexpr) -> ZResult> { - Ok(Parsed{_0: Format.parse(target)?}) - } - pub fn into_inner(self) -> ::zenoh::key_expr::format::KeFormat<'static, [::zenoh::key_expr::format::Segment<'static>; #len]> { - FORMAT_INNER - } - } - pub fn formatter() -> Formatter { - Format::formatter() - } - pub fn parse<'s>(target: &'s ::zenoh::key_expr::keyexpr) -> ZResult> { - Format::parse(target) - } - } -} - -struct FormatDeclaration { - vis: syn::Visibility, - name: syn::Ident, - lit: syn::LitStr, -} -impl syn::parse::Parse for FormatDeclaration { - fn parse(input: syn::parse::ParseStream) -> syn::Result { - let vis = input.parse()?; - let name = input.parse()?; - let _: syn::Token!(:) = input.parse()?; - let lit = input.parse()?; - Ok(FormatDeclaration { vis, name, lit }) - } -} -struct FormatDeclarations(syn::punctuated::Punctuated); -impl syn::parse::Parse for FormatDeclarations { - fn parse(input: syn::parse::ParseStream) -> syn::Result { - Ok(Self(input.parse_terminated( - FormatDeclaration::parse, - syn::Token![,], - )?)) - } -} - -/// Create format modules from a format specification. -/// -/// `kedefine!($($vis $ident: $lit),*)` will validate each `$lit` to be a valid KeFormat, and declare a module called `$ident` with `$vis` visibility at its call-site for each format. -/// The modules contain the following elements: -/// - `Format`, a zero-sized type that represents your format. -/// - `formatter()`, a function that constructs a `Formatter` specialized for your format: -/// - for every spec in your format, `Formatter` will have a method named after the spec's `id` that lets you set a value for that field of your format. These methods will return `Result<&mut Formatter, FormatError>`. -/// - `parse(target: &keyexpr) -> ZResult>` will parse the provided key expression according to your format. Just like `KeFormat::parse`, parsing is lazy: each field will match the smallest subsection of your `target` that is included in its pattern. -/// - like `Formatter`, `Parsed` will have a method named after each spec's `id` that returns `&keyexpr`; except for specs whose pattern was `**`, these will return an `Option<&keyexpr>`, where `None` signifies that the pattern was matched by an empty list of chunks. -#[proc_macro] -pub fn kedefine(tokens: TokenStream) -> TokenStream { - let declarations: FormatDeclarations = syn::parse(tokens).unwrap(); - let content = declarations.0.into_iter().map(|FormatDeclaration { vis, name, lit }| - { - let source = lit.value(); - let docstring = format!( - r"The module associated with the `{source}` format, it contains: -- `Format`, a zero-sized type that represents your format. -- `formatter()`, a function that constructs a `Formatter` specialized for your format: - - for every spec in your format, `Formatter` will have a method named after the spec's `id` that lets you set a value for that field of your format. These methods will return `Result<&mut Formatter, FormatError>`. -- `parse(target: &keyexpr) -> ZResult>` will parse the provided key expression according to your format. Just like `KeFormat::parse`, parsing is lazy: each field will match the smallest subsection of your `target` that is included in its pattern. - - like `Formatter`, `Parsed` will have a method named after each spec's `id` that returns `&keyexpr`; except for specs whose pattern was `**`, these will return an `Option<&keyexpr>`, where `None` signifies that the pattern was matched by an empty list of chunks." - ); - let support = keformat_support(&source); - quote! { - #[doc = #docstring] - #vis mod #name{ - #support - } - }}); - quote!(#(#content)*).into() -} - -struct FormatUsage { - id: syn::Expr, - assigns: Vec<(syn::Expr, syn::Expr)>, -} -impl syn::parse::Parse for FormatUsage { - fn parse(input: syn::parse::ParseStream) -> syn::Result { - let id = input.parse()?; - let mut assigns = Vec::new(); - if !input.is_empty() { - input.parse::()?; - } - assigns.extend( - input - .parse_terminated(syn::Expr::parse, syn::Token![,])? - .into_iter() - .map(|a| match a { - syn::Expr::Assign(a) => (*a.left, *a.right), - a => (a.clone(), a), - }), - ); - Ok(FormatUsage { id, assigns }) - } -} - -/// Write a set of values into a `Formatter`, stopping as soon as a value doesn't fit the specification for its field. -/// Contrary to `keformat` doesn't build the Formatter into a Key Expression. -/// -/// `kewrite!($formatter, $($ident [= $expr]),*)` will attempt to write `$expr` into their respective `$ident` fields for `$formatter`. -/// `$formatter` must be an expression that dereferences to `&mut Formatter`. -/// `$expr` must resolve to a value that implements `core::fmt::Display`. -/// `$expr` defaults to `$ident` if omitted. -/// -/// This macro always results in an expression that resolves to `Result<&mut Formatter, FormatSetError>`. -#[proc_macro] -pub fn kewrite(tokens: TokenStream) -> TokenStream { - let FormatUsage { id, assigns } = syn::parse(tokens).unwrap(); - let mut sets = None; - for (l, r) in assigns.iter().rev() { - if let Some(set) = sets { - sets = Some(quote!(.#l(#r).and_then(|x| x #set))); - } else { - sets = Some(quote!(.#l(#r))); - } - } - quote!(#id #sets).into() -} - -/// Write a set of values into a `Formatter` and then builds it into an `OwnedKeyExpr`, stopping as soon as a value doesn't fit the specification for its field. -/// -/// `keformat!($formatter, $($ident [= $expr]),*)` will attempt to write `$expr` into their respective `$ident` fields for `$formatter`. -/// `$formatter` must be an expression that dereferences to `&mut Formatter`. -/// `$expr` must resolve to a value that implements `core::fmt::Display`. -/// `$expr` defaults to `$ident` if omitted. -/// -/// This macro always results in an expression that resolves to `ZResult`, and leaves `$formatter` in its written state. -#[proc_macro] -pub fn keformat(tokens: TokenStream) -> TokenStream { - let formatted: proc_macro2::TokenStream = kewrite(tokens).into(); - quote!(match #formatted { - Ok(ok) => ok.build(), - Err(e) => Err(e.into()), - }) - .into() -} - -/// Equivalent to [`keyexpr::new`](zenoh_keyexpr::keyexpr::new), but the check is run at compile-time and will throw a compile error in case of failure. -#[proc_macro] -pub fn ke(tokens: TokenStream) -> TokenStream { - let value: LitStr = syn::parse(tokens).unwrap(); - let ke = value.value(); - match zenoh_keyexpr::keyexpr::new(&ke) { - Ok(_) => quote!(unsafe {::zenoh::key_expr::keyexpr::from_str_unchecked(#ke)}).into(), - Err(e) => panic!("{}", e), - } -} - mod zenoh_runtime_derive; use syn::DeriveInput; use zenoh_runtime_derive::{derive_generic_runtime_param, derive_register_param}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 4555498ddd..a915510c63 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -495,7 +495,7 @@ impl StorageService { true } - async fn reply_query(&self, query: Result) { + async fn reply_query(&self, query: Result) { let q = match query { Ok(q) => q, Err(e) => { diff --git a/plugins/zenoh-plugin-trait/Cargo.toml b/plugins/zenoh-plugin-trait/Cargo.toml index f78967fe3d..b184f5f4e9 100644 --- a/plugins/zenoh-plugin-trait/Cargo.toml +++ b/plugins/zenoh-plugin-trait/Cargo.toml @@ -34,5 +34,5 @@ serde_json = { workspace = true } zenoh-macros = { workspace = true } zenoh-result = { workspace = true } zenoh-util = { workspace = true } -zenoh-keyexpr = { workspace = true } +zenoh-keyexpr = { workspace = true, features = ["internal", "unstable"] } const_format = { workspace = true } \ No newline at end of file diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 6e9a0caf1c..bfb7e3525a 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -50,8 +50,8 @@ transport_udp = ["zenoh-transport/transport_udp"] transport_unixsock-stream = ["zenoh-transport/transport_unixsock-stream"] transport_ws = ["zenoh-transport/transport_ws"] transport_vsock = ["zenoh-transport/transport_vsock"] -unstable = [] -internal = [] +unstable = ["zenoh-keyexpr/unstable"] +internal = ["zenoh-keyexpr/internal"] default = [ "auth_pubkey", "auth_usrpwd", diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 4cd9acfc32..123f6fc656 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -18,7 +18,6 @@ use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilte #[cfg(feature = "loki")] use url::Url; use zenoh::{ - config::WhatAmI, config::{Config, EndPoint, ModeDependentValue, PermissionsConf, ValidatedMap, WhatAmI}, core::Result, }; From ec687cae4d95f0ad182ca156cadf7263ad6de790 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 3 Jun 2024 18:06:21 +0200 Subject: [PATCH 362/598] fix: deprecate `SyncResolve`/`AsyncResolve` methods (#1080) I forgot methods in deprecation --- commons/zenoh-core/src/lib.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/commons/zenoh-core/src/lib.rs b/commons/zenoh-core/src/lib.rs index 8d6fbfcc0a..434d0e6740 100644 --- a/commons/zenoh-core/src/lib.rs +++ b/commons/zenoh-core/src/lib.rs @@ -58,8 +58,12 @@ pub trait Wait: Resolvable { pub trait AsyncResolve: Resolvable { type Future: Future + Send; + #[allow(deprecated)] + #[deprecated = "use `.await` directly instead"] fn res_async(self) -> Self::Future; + #[allow(deprecated)] + #[deprecated = "use `.wait()` instead`"] fn res(self) -> Self::Future where Self: Sized, @@ -83,8 +87,11 @@ where #[deprecated = "use `.wait()` instead`"] pub trait SyncResolve: Resolvable { + #[deprecated = "use `.wait()` instead`"] fn res_sync(self) -> Self::To; + #[allow(deprecated)] + #[deprecated = "use `.wait()` instead`"] fn res(self) -> Self::To where Self: Sized, From eea9d741e561c12daadba9372924ea256b365cd4 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 3 Jun 2024 18:12:25 +0200 Subject: [PATCH 363/598] lost code restored --- commons/zenoh-macros/src/lib.rs | 279 ++++++++++++++++++ .../src/replica/storage.rs | 2 +- zenoh/src/api/bytes.rs | 2 +- zenoh/src/api/session.rs | 4 +- 4 files changed, 284 insertions(+), 3 deletions(-) diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index 3649d633af..3b3b14cd3d 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -209,6 +209,285 @@ fn is_doc_attribute(attr: &Attribute) -> bool { .is_some_and(|ident| &ident.to_string() == "doc") } +fn keformat_support(source: &str) -> proc_macro2::TokenStream { + let format = match KeFormat::new(&source) { + Ok(format) => format, + Err(e) => panic!("{}", e), + }; + let specs = unsafe { macro_support::specs(&format) }; + let len = specs.len(); + let setters = specs.iter().map(|spec| { + let id = &source[spec.spec_start..(spec.spec_start + spec.id_end as usize)]; + let set_id = quote::format_ident!("{}", id); + quote! { + pub fn #set_id (&mut self, value: S) -> Result<&mut Self, ::zenoh::key_expr::format::FormatSetError> { + match self.0.set(#id, value) { + Ok(_) => Ok(self), + Err(e) => Err(e) + } + } + } + }); + let getters = specs.iter().map(|spec| { + let source = &source[spec.spec_start..spec.spec_end]; + let id = &source[..(spec.id_end as usize)]; + let get_id = quote::format_ident!("{}", id); + let pattern = unsafe { + keyexpr::from_str_unchecked(if spec.pattern_end != u16::MAX { + &source[(spec.id_end as usize + 1)..(spec.spec_start + spec.pattern_end as usize)] + } else { + &source[(spec.id_end as usize + 1)..] + }) + }; + let doc = format!("Get the parsed value for `{id}`.\n\nThis value is guaranteed to be a valid key expression intersecting with `{pattern}`"); + if pattern.as_bytes() == b"**" { + quote! { + #[doc = #doc] + /// Since the pattern is `**`, this may return `None` if the pattern didn't consume any chunks. + pub fn #get_id (&self) -> Option<& ::zenoh::key_expr::keyexpr> { + unsafe { + let s =self._0.get(#id).unwrap_unchecked(); + (!s.is_empty()).then(|| ::zenoh::key_expr::keyexpr::from_str_unchecked(s)) + } + } + } + } else { + quote! { + #[doc = #doc] + pub fn #get_id (&self) -> &::zenoh::key_expr::keyexpr { + unsafe {::zenoh::key_expr::keyexpr::from_str_unchecked(self._0.get(#id).unwrap_unchecked())} + } + } + } + }); + let segments = specs.iter().map(|spec| { + let SegmentBuilder { + segment_start, + prefix_end, + spec_start, + id_end, + pattern_end, + spec_end, + segment_end, + } = spec; + quote! { + ::zenoh::key_expr::format::macro_support::SegmentBuilder { + segment_start: #segment_start, + prefix_end: #prefix_end, + spec_start: #spec_start, + id_end: #id_end, + pattern_end: #pattern_end, + spec_end: #spec_end, + segment_end: #segment_end, + }, + } + }); + + let format_doc = format!("The `{source}` format, as a zero-sized-type."); + let formatter_doc = format!("And instance of a formatter for `{source}`."); + + quote! { + use ::zenoh::core::Result as ZResult; + const FORMAT_INNER: ::zenoh::key_expr::format::KeFormat<'static, [::zenoh::key_expr::format::Segment<'static>; #len]> = unsafe { + ::zenoh::key_expr::format::macro_support::const_new(#source, [#(#segments)*]) + }; + #[doc = #format_doc] + #[derive(Copy, Clone, Hash)] + pub struct Format; + + #[doc = #formatter_doc] + #[derive(Clone)] + pub struct Formatter(::zenoh::key_expr::format::KeFormatter<'static, [::zenoh::key_expr::format::Segment<'static>; #len]>); + impl ::core::fmt::Debug for Format { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + ::core::fmt::Debug::fmt(&FORMAT_INNER, f) + } + } + impl ::core::fmt::Debug for Formatter { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + ::core::fmt::Debug::fmt(&self.0, f) + } + } + impl ::core::fmt::Display for Format { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + ::core::fmt::Display::fmt(&FORMAT_INNER, f) + } + } + impl ::core::ops::Deref for Format { + type Target = ::zenoh::key_expr::format::KeFormat<'static, [::zenoh::key_expr::format::Segment<'static>; #len]>; + fn deref(&self) -> &Self::Target {&FORMAT_INNER} + } + impl ::core::ops::Deref for Formatter { + type Target = ::zenoh::key_expr::format::KeFormatter<'static, [::zenoh::key_expr::format::Segment<'static>; #len]>; + fn deref(&self) -> &Self::Target {&self.0} + } + impl ::core::ops::DerefMut for Formatter { + fn deref_mut(&mut self) -> &mut Self::Target {&mut self.0} + } + impl Formatter { + #(#setters)* + } + pub struct Parsed<'s>{_0: ::zenoh::key_expr::format::Parsed<'s, [::zenoh::key_expr::format::Segment<'s>; #len]>} + impl<'s> ::core::ops::Deref for Parsed<'s> { + type Target = ::zenoh::key_expr::format::Parsed<'s, [::zenoh::key_expr::format::Segment<'s>; #len]>; + fn deref(&self) -> &Self::Target {&self._0} + } + impl Parsed<'_> { + #(#getters)* + } + impl Format { + pub fn formatter() -> Formatter { + Formatter(Format.formatter()) + } + pub fn parse<'s>(target: &'s ::zenoh::key_expr::keyexpr) -> ZResult> { + Ok(Parsed{_0: Format.parse(target)?}) + } + pub fn into_inner(self) -> ::zenoh::key_expr::format::KeFormat<'static, [::zenoh::key_expr::format::Segment<'static>; #len]> { + FORMAT_INNER + } + } + pub fn formatter() -> Formatter { + Format::formatter() + } + pub fn parse<'s>(target: &'s ::zenoh::key_expr::keyexpr) -> ZResult> { + Format::parse(target) + } + } +} + +struct FormatDeclaration { + vis: syn::Visibility, + name: syn::Ident, + lit: syn::LitStr, +} +impl syn::parse::Parse for FormatDeclaration { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let vis = input.parse()?; + let name = input.parse()?; + let _: syn::Token!(:) = input.parse()?; + let lit = input.parse()?; + Ok(FormatDeclaration { vis, name, lit }) + } +} +struct FormatDeclarations(syn::punctuated::Punctuated); +impl syn::parse::Parse for FormatDeclarations { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + Ok(Self(input.parse_terminated( + FormatDeclaration::parse, + syn::Token![,], + )?)) + } +} + +/// Create format modules from a format specification. +/// +/// `kedefine!($($vis $ident: $lit),*)` will validate each `$lit` to be a valid KeFormat, and declare a module called `$ident` with `$vis` visibility at its call-site for each format. +/// The modules contain the following elements: +/// - `Format`, a zero-sized type that represents your format. +/// - `formatter()`, a function that constructs a `Formatter` specialized for your format: +/// - for every spec in your format, `Formatter` will have a method named after the spec's `id` that lets you set a value for that field of your format. These methods will return `Result<&mut Formatter, FormatError>`. +/// - `parse(target: &keyexpr) -> ZResult>` will parse the provided key expression according to your format. Just like `KeFormat::parse`, parsing is lazy: each field will match the smallest subsection of your `target` that is included in its pattern. +/// - like `Formatter`, `Parsed` will have a method named after each spec's `id` that returns `&keyexpr`; except for specs whose pattern was `**`, these will return an `Option<&keyexpr>`, where `None` signifies that the pattern was matched by an empty list of chunks. +#[proc_macro] +pub fn kedefine(tokens: TokenStream) -> TokenStream { + let declarations: FormatDeclarations = syn::parse(tokens).unwrap(); + let content = declarations.0.into_iter().map(|FormatDeclaration { vis, name, lit }| + { + let source = lit.value(); + let docstring = format!( + r"The module associated with the `{source}` format, it contains: +- `Format`, a zero-sized type that represents your format. +- `formatter()`, a function that constructs a `Formatter` specialized for your format: + - for every spec in your format, `Formatter` will have a method named after the spec's `id` that lets you set a value for that field of your format. These methods will return `Result<&mut Formatter, FormatError>`. +- `parse(target: &keyexpr) -> ZResult>` will parse the provided key expression according to your format. Just like `KeFormat::parse`, parsing is lazy: each field will match the smallest subsection of your `target` that is included in its pattern. + - like `Formatter`, `Parsed` will have a method named after each spec's `id` that returns `&keyexpr`; except for specs whose pattern was `**`, these will return an `Option<&keyexpr>`, where `None` signifies that the pattern was matched by an empty list of chunks." + ); + let support = keformat_support(&source); + quote! { + #[doc = #docstring] + #vis mod #name{ + #support + } + }}); + quote!(#(#content)*).into() +} + +struct FormatUsage { + id: syn::Expr, + assigns: Vec<(syn::Expr, syn::Expr)>, +} +impl syn::parse::Parse for FormatUsage { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let id = input.parse()?; + let mut assigns = Vec::new(); + if !input.is_empty() { + input.parse::()?; + } + assigns.extend( + input + .parse_terminated(syn::Expr::parse, syn::Token![,])? + .into_iter() + .map(|a| match a { + syn::Expr::Assign(a) => (*a.left, *a.right), + a => (a.clone(), a), + }), + ); + Ok(FormatUsage { id, assigns }) + } +} + +/// Write a set of values into a `Formatter`, stopping as soon as a value doesn't fit the specification for its field. +/// Contrary to `keformat` doesn't build the Formatter into a Key Expression. +/// +/// `kewrite!($formatter, $($ident [= $expr]),*)` will attempt to write `$expr` into their respective `$ident` fields for `$formatter`. +/// `$formatter` must be an expression that dereferences to `&mut Formatter`. +/// `$expr` must resolve to a value that implements `core::fmt::Display`. +/// `$expr` defaults to `$ident` if omitted. +/// +/// This macro always results in an expression that resolves to `Result<&mut Formatter, FormatSetError>`. +#[proc_macro] +pub fn kewrite(tokens: TokenStream) -> TokenStream { + let FormatUsage { id, assigns } = syn::parse(tokens).unwrap(); + let mut sets = None; + for (l, r) in assigns.iter().rev() { + if let Some(set) = sets { + sets = Some(quote!(.#l(#r).and_then(|x| x #set))); + } else { + sets = Some(quote!(.#l(#r))); + } + } + quote!(#id #sets).into() +} + +/// Write a set of values into a `Formatter` and then builds it into an `OwnedKeyExpr`, stopping as soon as a value doesn't fit the specification for its field. +/// +/// `keformat!($formatter, $($ident [= $expr]),*)` will attempt to write `$expr` into their respective `$ident` fields for `$formatter`. +/// `$formatter` must be an expression that dereferences to `&mut Formatter`. +/// `$expr` must resolve to a value that implements `core::fmt::Display`. +/// `$expr` defaults to `$ident` if omitted. +/// +/// This macro always results in an expression that resolves to `ZResult`, and leaves `$formatter` in its written state. +#[proc_macro] +pub fn keformat(tokens: TokenStream) -> TokenStream { + let formatted: proc_macro2::TokenStream = kewrite(tokens).into(); + quote!(match #formatted { + Ok(ok) => ok.build(), + Err(e) => Err(e.into()), + }) + .into() +} + +/// Equivalent to [`keyexpr::new`](zenoh_keyexpr::keyexpr::new), but the check is run at compile-time and will throw a compile error in case of failure. +#[proc_macro] +pub fn ke(tokens: TokenStream) -> TokenStream { + let value: LitStr = syn::parse(tokens).unwrap(); + let ke = value.value(); + match zenoh_keyexpr::keyexpr::new(&ke) { + Ok(_) => quote!(unsafe {::zenoh::key_expr::keyexpr::from_str_unchecked(#ke)}).into(), + Err(e) => panic!("{}", e), + } +} + mod zenoh_runtime_derive; use syn::DeriveInput; use zenoh_runtime_derive::{derive_generic_runtime_param, derive_register_param}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index a915510c63..c99b322a12 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -21,6 +21,7 @@ use async_std::sync::{Arc, Mutex, RwLock}; use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; +use zenoh::internal::buffers::{SplitBuffer, ZBuf}; use zenoh::{ core::Result as ZResult, internal::{bail, zenoh_home, Timed, TimedEvent, Timer}, @@ -37,7 +38,6 @@ use zenoh::{ time::{new_reception_timestamp, Timestamp, NTP64}, value::Value, }; -use zenoh::internal::buffers::{SplitBuffer, ZBuf}; use zenoh_backend_traits::{ config::{GarbageCollectionConfig, StorageConfig}, Capability, History, Persistence, StorageInsertionResult, StoredData, diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 0ee1a4557d..446b2bc270 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -23,7 +23,7 @@ use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, writer::HasWriter, - ZBufReader, ZBufWriter, ZSlice, ZBuf, + ZBuf, ZBufReader, ZBufWriter, ZSlice, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index a483a7ac81..e633bd88b8 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -72,7 +72,9 @@ use super::{ info::SessionInfo, key_expr::{KeyExpr, KeyExprInner}, publisher::{Priority, PublisherState}, - query::{ConsolidationMode, SessionGetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, + query::{ + ConsolidationMode, QueryConsolidation, QueryState, QueryTarget, Reply, SessionGetBuilder, + }, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, selector::{Selector, TIME_RANGE_KEY}, From 73bf9cb421292b19db23aff7aa3c0ec0d2f67f1a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 3 Jun 2024 19:32:11 +0200 Subject: [PATCH 364/598] wrapped zenohid for hide methods --- commons/zenoh-config/src/lib.rs | 43 +++++++++++++++---- io/zenoh-transport/src/manager.rs | 2 +- .../src/unicast/establishment/accept.rs | 4 +- zenoh/src/api/info.rs | 7 +-- zenoh/src/api/publisher.rs | 17 +++++--- zenoh/src/api/queryable.rs | 2 +- zenoh/src/api/session.rs | 12 +++--- zenoh/src/api/subscriber.rs | 2 +- .../src/net/routing/hat/linkstate_peer/mod.rs | 3 +- zenoh/src/net/routing/hat/mod.rs | 3 +- zenoh/src/net/routing/hat/router/mod.rs | 3 +- .../net/routing/interceptor/access_control.rs | 3 +- zenoh/src/net/runtime/adminspace.rs | 6 +-- zenoh/src/net/runtime/mod.rs | 7 +-- 14 files changed, 76 insertions(+), 38 deletions(-) diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index e57660800f..e2713104c6 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -19,13 +19,7 @@ mod include; #[allow(unused_imports)] use std::convert::TryFrom; // This is a false positive from the rust analyser use std::{ - any::Any, - collections::HashSet, - fmt, - io::Read, - net::SocketAddr, - path::Path, - sync::{Arc, Mutex, MutexGuard, Weak}, + any::Any, collections::HashSet, fmt, io::Read, net::SocketAddr, path::Path, str::FromStr, sync::{Arc, Mutex, MutexGuard, Weak} }; use include::recursive_include; @@ -36,7 +30,7 @@ use validated_struct::ValidatedMapAssociatedTypes; pub use validated_struct::{GetError, ValidatedMap}; use zenoh_core::zlock; pub use zenoh_protocol::core::{ - whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, + whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, }; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Bits}, @@ -51,6 +45,39 @@ pub use mode_dependent::*; pub mod connection_retry; pub use connection_retry::*; +/// The global unique id of a zenoh peer. +#[derive( + Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug, Default +)] +#[repr(transparent)] +pub struct ZenohId(zenoh_protocol::core::ZenohId); + +impl fmt::Display for ZenohId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl From for ZenohId { + fn from(id: zenoh_protocol::core::ZenohId) -> Self { + Self(id) + } +} + +impl From for zenoh_protocol::core::ZenohId { + fn from(id: ZenohId) -> Self { + id.0 + } +} + +impl FromStr for ZenohId { + type Err = zenoh_result::Error; + + fn from_str(s: &str) -> Result { + zenoh_protocol::core::ZenohId::from_str(s).map(|zid| zid.into()) + } +} + // Wrappers for secrecy of values #[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] pub struct SecretString(String); diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index 3f57b3ceae..7d5e8f0885 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -221,7 +221,7 @@ impl TransportManagerBuilder { } pub async fn from_config(mut self, config: &Config) -> ZResult { - self = self.zid(*config.id()); + self = self.zid((*config.id()).into()); if let Some(v) = config.mode() { self = self.whatami(*v); } diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index d074ea9642..d53374b5db 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -696,7 +696,7 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - let iack_in = SendInitAckIn { mine_version: manager.config.version, - mine_zid: manager.config.zid, + mine_zid: manager.config.zid.into(), mine_whatami: manager.config.whatami, other_zid: isyn_out.other_zid, other_whatami: isyn_out.other_whatami, @@ -714,7 +714,7 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - // Create the OpenAck but not send it yet let oack_in = SendOpenAckIn { - mine_zid: manager.config.zid, + mine_zid: manager.config.zid.into(), mine_lease: manager.config.unicast.lease, other_zid: osyn_out.other_zid, }; diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index 6409760a72..4016397c0e 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -15,8 +15,9 @@ //! Tools to access information about the current zenoh [`Session`](crate::Session). use std::future::{IntoFuture, Ready}; +use zenoh_config::ZenohId; use zenoh_core::{Resolvable, Wait}; -use zenoh_protocol::core::{WhatAmI, ZenohId}; +use zenoh_protocol::core::WhatAmI; use super::session::SessionRef; @@ -93,7 +94,7 @@ impl<'a> Wait for RoutersZenohIdBuilder<'a> { s.get_whatami() .ok() .and_then(|what| (what == WhatAmI::Router).then_some(())) - .and_then(|_| s.get_zid().ok()) + .and_then(|_| s.get_zid().map(Into::::into).ok()) }), ) } @@ -143,7 +144,7 @@ impl<'a> Wait for PeersZenohIdBuilder<'a> { s.get_whatami() .ok() .and_then(|what| (what == WhatAmI::Peer).then_some(())) - .and_then(|_| s.get_zid().ok()) + .and_then(|_| s.get_zid().map(|zid| zid.into()) .ok()) }), ) } diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index 2432c0ebee..9287d7d508 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -155,7 +155,7 @@ impl<'a> Publisher<'a> { #[zenoh_macros::unstable] pub fn id(&self) -> EntityGlobalId { EntityGlobalId { - zid: self.session.zid(), + zid: self.session.zid().into(), eid: self.id, } } @@ -661,11 +661,16 @@ impl Priority { /// Default pub const DEFAULT: Self = Self::Data; /// The lowest Priority - pub const MIN: Self = Self::Background; + #[zenoh_macros::internal] + pub const MIN: Self = Self::MIN_; + const MIN_: Self = Self::Background; /// The highest Priority - pub const MAX: Self = Self::RealTime; + #[zenoh_macros::internal] + pub const MAX: Self = Self::MAX_; + const MAX_: Self = Self::RealTime; /// The number of available priorities - pub const NUM: usize = 1 + Self::MIN as usize - Self::MAX as usize; + #[zenoh_macros::internal] + pub const NUM: usize = 1 + Self::MIN_ as usize - Self::MAX_ as usize; } impl TryFrom for Priority { @@ -691,8 +696,8 @@ impl TryFrom for Priority { unknown => bail!( "{} is not a valid priority value. Admitted values are: [{}-{}].", unknown, - Self::MAX as u8, - Self::MIN as u8 + Self::MAX_ as u8, + Self::MIN_ as u8 ), } } diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 8ad7d09f7b..1959dc0d77 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -894,7 +894,7 @@ impl<'a, Handler> Queryable<'a, Handler> { #[zenoh_macros::unstable] pub fn id(&self) -> EntityGlobalId { EntityGlobalId { - zid: self.queryable.session.zid(), + zid: self.queryable.session.zid().into(), eid: self.queryable.state.id, } } diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index e633bd88b8..4456c0f42f 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -28,14 +28,14 @@ use tracing::{error, trace, warn}; use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; -use zenoh_config::{unwrap_or_default, Config, Notifier}; +use zenoh_config::{unwrap_or_default, Config, Notifier, ZenohId}; use zenoh_core::{zconfigurable, zread, Resolvable, Resolve, ResolveClosure, ResolveFuture, Wait}; #[cfg(feature = "unstable")] use zenoh_protocol::network::{declare::SubscriberId, ext}; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, - AtomicExprId, CongestionControl, EntityId, ExprId, Reliability, WireExpr, ZenohId, + AtomicExprId, CongestionControl, EntityId, ExprId, Reliability, WireExpr, EMPTY_EXPR_ID, }, network::{ @@ -1711,7 +1711,7 @@ impl Session { } (query.callback)(Reply { result: Err("Timeout".into()), - replier_id: zid, + replier_id: zid.into(), }); } } @@ -1860,7 +1860,7 @@ impl Session { key_expr, parameters: parameters.to_owned().into(), qid, - zid, + zid: zid.into(), primitives: if local { Arc::new(self.clone()) } else { @@ -2227,7 +2227,7 @@ impl Primitives for Session { }; let replier_id = match e.ext_sinfo { Some(info) => info.id.zid, - None => ZenohId::rand(), + None => zenoh_protocol::core::ZenohId::rand().into(), }; let new_reply = Reply { replier_id, @@ -2354,7 +2354,7 @@ impl Primitives for Session { ); let new_reply = Reply { result: Ok(sample), - replier_id: ZenohId::rand(), // TODO + replier_id: zenoh_protocol::core::ZenohId::rand().into(), // TODO }; let callback = match query.reception_mode { diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 5235ad4917..e298e3c9c9 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -459,7 +459,7 @@ impl<'a, Handler> Subscriber<'a, Handler> { #[zenoh_macros::unstable] pub fn id(&self) -> EntityGlobalId { EntityGlobalId { - zid: self.subscriber.session.zid(), + zid: self.subscriber.session.zid().into(), eid: self.subscriber.state.id, } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 82c2a96166..e42c7b88a4 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -24,8 +24,9 @@ use std::{ time::Duration, }; -use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ + core::ZenohId, common::ZExtBody, network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 3ab346ed7d..17d7fbab57 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -20,8 +20,9 @@ use std::{any::Any, collections::HashMap, sync::Arc}; use zenoh_buffers::ZBuf; -use zenoh_config::{unwrap_or_default, Config, WhatAmI, ZenohId}; +use zenoh_config::{unwrap_or_default, Config, WhatAmI}; use zenoh_protocol::{ + core::ZenohId, core::WireExpr, network::{ declare::{ diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index dd7c6e11c7..9a65ebf8e4 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -25,8 +25,9 @@ use std::{ time::Duration, }; -use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ + core::ZenohId, common::ZExtBody, network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index fe78ce8aed..6467edd13e 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -20,8 +20,9 @@ use std::{any::Any, sync::Arc}; -use zenoh_config::{AclConfig, Action, InterceptorFlow, Permission, Subject, ZenohId}; +use zenoh_config::{AclConfig, Action, InterceptorFlow, Permission, Subject}; use zenoh_protocol::{ + core::ZenohId, network::{Declare, DeclareBody, NetworkBody, NetworkMessage, Push, Request}, zenoh::{PushBody, RequestBody}, }; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index f59a17d0d0..dcc5af05d6 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -19,14 +19,14 @@ use std::{ use serde_json::json; use tracing::{error, trace}; use zenoh_buffers::buffer::SplitBuffer; -use zenoh_config::{unwrap_or_default, ConfigValidator, ValidatedMap, WhatAmI}; +use zenoh_config::{unwrap_or_default, ConfigValidator, ValidatedMap, WhatAmI, ZenohId}; use zenoh_core::Wait; #[cfg(all(feature = "unstable", feature = "plugins"))] use zenoh_plugin_trait::{PluginControl, PluginStatus}; #[cfg(all(feature = "unstable", feature = "plugins"))] use zenoh_protocol::core::key_expr::keyexpr; use zenoh_protocol::{ - core::{key_expr::OwnedKeyExpr, ExprId, WireExpr, ZenohId, EMPTY_EXPR_ID}, + core::{key_expr::OwnedKeyExpr, ExprId, WireExpr, EMPTY_EXPR_ID}, network::{ declare::{ queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, QueryableId, @@ -467,7 +467,7 @@ impl Primitives for AdminSpace { key_expr: key_expr.clone(), parameters: query.parameters.into(), qid: msg.id, - zid, + zid: zid.into(), primitives, }), eid: self.queryable_id, diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 515f3f54be..82fd943f2b 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -36,10 +36,11 @@ use futures::{stream::StreamExt, Future}; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use uhlc::{HLCBuilder, HLC}; +use zenoh_config::ZenohId; use zenoh_link::{EndPoint, Link}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_protocol::{ - core::{Locator, WhatAmI, ZenohId}, + core::{Locator, WhatAmI}, network::NetworkMessage, }; use zenoh_result::{bail, ZResult}; @@ -133,7 +134,7 @@ impl RuntimeBuilder { } = self; tracing::debug!("Zenoh Rust API {}", GIT_VERSION); - let zid = *config.id(); + let zid = (*config.id()).into(); tracing::info!("Using ZID: {}", zid); let whatami = unwrap_or_default!(config.mode()); @@ -175,7 +176,7 @@ impl RuntimeBuilder { let config = Notifier::new(config); let runtime = Runtime { state: Arc::new(RuntimeState { - zid, + zid: zid.into(), whatami, next_id: AtomicU32::new(1), // 0 is reserved for routing core metadata, From 057a842b9ef39f0e93a0053d32a738d88182908d Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 4 Jun 2024 16:49:09 +0200 Subject: [PATCH 365/598] cargo fmt --- commons/zenoh-config/src/lib.rs | 11 +++++++++-- zenoh/src/api/info.rs | 2 +- zenoh/src/api/session.rs | 3 +-- zenoh/src/net/routing/hat/linkstate_peer/mod.rs | 2 +- zenoh/src/net/routing/hat/mod.rs | 2 +- zenoh/src/net/routing/hat/router/mod.rs | 2 +- 6 files changed, 14 insertions(+), 8 deletions(-) diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index aa1a536347..2b141a0951 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -19,7 +19,14 @@ mod include; #[allow(unused_imports)] use std::convert::TryFrom; // This is a false positive from the rust analyser use std::{ - any::Any, collections::HashSet, fmt, io::Read, net::SocketAddr, path::Path, str::FromStr, sync::{Arc, Mutex, MutexGuard, Weak} + any::Any, + collections::HashSet, + fmt, + io::Read, + net::SocketAddr, + path::Path, + str::FromStr, + sync::{Arc, Mutex, MutexGuard, Weak}, }; use include::recursive_include; @@ -47,7 +54,7 @@ pub use connection_retry::*; /// The global unique id of a zenoh peer. #[derive( - Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug, Default + Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug, Default, )] #[repr(transparent)] pub struct ZenohId(zenoh_protocol::core::ZenohId); diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index 4016397c0e..f66ade913e 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -144,7 +144,7 @@ impl<'a> Wait for PeersZenohIdBuilder<'a> { s.get_whatami() .ok() .and_then(|what| (what == WhatAmI::Peer).then_some(())) - .and_then(|_| s.get_zid().map(|zid| zid.into()) .ok()) + .and_then(|_| s.get_zid().map(|zid| zid.into()).ok()) }), ) } diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 4456c0f42f..46dd37956e 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -35,8 +35,7 @@ use zenoh_protocol::network::{declare::SubscriberId, ext}; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, - AtomicExprId, CongestionControl, EntityId, ExprId, Reliability, WireExpr, - EMPTY_EXPR_ID, + AtomicExprId, CongestionControl, EntityId, ExprId, Reliability, WireExpr, EMPTY_EXPR_ID, }, network::{ self, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index e42c7b88a4..658c394b9a 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -26,8 +26,8 @@ use std::{ use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ - core::ZenohId, common::ZExtBody, + core::ZenohId, network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, interest::InterestId, diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 17d7fbab57..c34a4c58ca 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -22,8 +22,8 @@ use std::{any::Any, collections::HashMap, sync::Arc}; use zenoh_buffers::ZBuf; use zenoh_config::{unwrap_or_default, Config, WhatAmI}; use zenoh_protocol::{ - core::ZenohId, core::WireExpr, + core::ZenohId, network::{ declare::{ queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, QueryableId, diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 9a65ebf8e4..301b300498 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -27,8 +27,8 @@ use std::{ use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ - core::ZenohId, common::ZExtBody, + core::ZenohId, network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, interest::InterestId, From 8b17ba68b11e727b2de0cff15d954ba08bb18b82 Mon Sep 17 00:00:00 2001 From: Alexander Bushnev Date: Tue, 4 Jun 2024 17:01:23 +0200 Subject: [PATCH 366/598] feat: add default for SourceInfo --- zenoh/src/api/sample.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 117e9c5924..e1799871d1 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -209,6 +209,13 @@ impl From> for SourceInfo { } } +#[zenoh_macros::unstable] +impl Default for SourceInfo { + fn default() -> Self { + Self::empty() + } +} + /// The kind of a `Sample`. #[repr(u8)] #[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] From 5ddccbfe077009b8985c13735440be75784a263c Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 4 Jun 2024 17:02:02 +0200 Subject: [PATCH 367/598] cargo fmt --- .../src/replica/storage.rs | 7 +++++-- zenoh/src/lib.rs | 18 +++++++++--------- zenoh/src/net/routing/hat/mod.rs | 3 +-- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index c99b322a12..8b63f353fb 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -21,10 +21,13 @@ use async_std::sync::{Arc, Mutex, RwLock}; use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; -use zenoh::internal::buffers::{SplitBuffer, ZBuf}; use zenoh::{ core::Result as ZResult, - internal::{bail, zenoh_home, Timed, TimedEvent, Timer}, + internal::{ + bail, + buffers::{SplitBuffer, ZBuf}, + zenoh_home, Timed, TimedEvent, Timer, + }, key_expr::{ keyexpr_tree::{ IKeyExprTree, IKeyExprTreeMut, KeBoxTree, KeyedSetProvider, NonWild, UnknownWildness, diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index ccbaba6402..82cd866433 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -126,8 +126,6 @@ pub mod prelude; /// Zenoh core types pub mod core { - /// Zenoh message priority - pub use crate::api::publisher::Priority; #[allow(deprecated)] pub use zenoh_core::{AsyncResolve, SyncResolve}; pub use zenoh_core::{Resolvable, Resolve, Wait}; @@ -136,6 +134,9 @@ pub mod core { pub use zenoh_result::Error; /// A zenoh result. pub use zenoh_result::ZResult as Result; + + /// Zenoh message priority + pub use crate::api::publisher::Priority; } /// [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. @@ -177,10 +178,9 @@ pub mod key_expr { IKeyExprTree, IKeyExprTreeMut, KeBoxTree, }; } - pub use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; - #[zenoh_macros::unstable] pub use zenoh_keyexpr::SetIntersectionLevel; + pub use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; pub use crate::api::key_expr::{KeyExpr, KeyExprUndeclaration}; // keyexpr format macro support @@ -299,13 +299,12 @@ pub mod query { pub use crate::api::query::ReplyKeyExpr; #[zenoh_macros::unstable] pub use crate::api::query::REPLY_KEY_EXPR_ANY_SEL_PARAM; - pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget, Reply}; - pub use crate::api::queryable::Query; #[zenoh_macros::unstable] #[zenoh_macros::internal] pub use crate::api::queryable::ReplySample; - pub use crate::api::queryable::{ - ReplyBuilder, ReplyBuilderDelete, ReplyBuilderPut, ReplyErrBuilder, + pub use crate::api::{ + query::{ConsolidationMode, QueryConsolidation, QueryTarget, Reply}, + queryable::{Query, ReplyBuilder, ReplyBuilderDelete, ReplyBuilderPut, ReplyErrBuilder}, }; } @@ -324,9 +323,10 @@ pub mod handlers { /// Scouting primitives pub mod scouting { - pub use crate::api::scouting::{scout, Scout, ScoutBuilder}; /// A zenoh Hello message. pub use zenoh_protocol::scouting::Hello; + + pub use crate::api::scouting::{scout, Scout, ScoutBuilder}; } /// Liveliness primitives diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index c34a4c58ca..3039c91348 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -22,8 +22,7 @@ use std::{any::Any, collections::HashMap, sync::Arc}; use zenoh_buffers::ZBuf; use zenoh_config::{unwrap_or_default, Config, WhatAmI}; use zenoh_protocol::{ - core::WireExpr, - core::ZenohId, + core::{WireExpr, ZenohId}, network::{ declare::{ queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, QueryableId, From d44f3016f0e790ca65365313e9c84142ae922ea2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 4 Jun 2024 18:22:41 +0200 Subject: [PATCH 368/598] runtime and plugins in internal --- plugins/zenoh-plugin-example/src/lib.rs | 8 ++-- plugins/zenoh-plugin-rest/src/lib.rs | 19 +++++---- .../zenoh-plugin-storage-manager/src/lib.rs | 23 ++++++----- .../tests/operations.rs | 2 +- .../tests/wildcard.rs | 2 +- zenoh-ext/src/publication_cache.rs | 3 +- zenoh/src/api/loader.rs | 2 +- zenoh/src/lib.rs | 39 +++++++++---------- zenoh/src/net/routing/hat/mod.rs | 2 +- zenoh/tests/session.rs | 2 +- 10 files changed, 52 insertions(+), 50 deletions(-) diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 00341909db..d6c58bed0b 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -26,9 +26,11 @@ use std::{ use futures::select; use tracing::{debug, info}; use zenoh::{ + internal::{ + plugins::{RunningPluginTrait, ZenohPlugin}, + runtime::Runtime, + }, key_expr::{keyexpr, KeyExpr}, - plugins::{RunningPluginTrait, ZenohPlugin}, - runtime::Runtime, sample::Sample, session::SessionDeclarations, }; @@ -50,7 +52,7 @@ const DEFAULT_SELECTOR: &str = "demo/example/**"; impl ZenohPlugin for ExamplePlugin {} impl Plugin for ExamplePlugin { type StartArgs = Runtime; - type Instance = zenoh::plugins::RunningPlugin; + type Instance = zenoh::internal::plugins::RunningPlugin; // A mandatory const to define, in case of the plugin is built as a standalone executable const DEFAULT_NAME: &'static str = "example"; diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 9ca97e385e..4f0ca3f67d 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -28,10 +28,12 @@ use tide::{http::Mime, sse::Sender, Request, Response, Server, StatusCode}; use zenoh::{ bytes::{StringOrBase64, ZBytes}, encoding::Encoding, + internal::{ + plugins::{RunningPluginTrait, ZenohPlugin}, + runtime::Runtime, + }, key_expr::{keyexpr, KeyExpr}, - plugins::{RunningPluginTrait, ZenohPlugin}, query::{QueryConsolidation, Reply}, - runtime::Runtime, sample::{Sample, SampleKind, ValueBuilderTrait}, selector::{Selector, TIME_RANGE_KEY}, session::{Session, SessionDeclarations}, @@ -212,12 +214,15 @@ impl ZenohPlugin for RestPlugin {} impl Plugin for RestPlugin { type StartArgs = Runtime; - type Instance = zenoh::plugins::RunningPlugin; + type Instance = zenoh::internal::plugins::RunningPlugin; const DEFAULT_NAME: &'static str = "rest"; const PLUGIN_VERSION: &'static str = plugin_version!(); const PLUGIN_LONG_VERSION: &'static str = plugin_long_version!(); - fn start(name: &str, runtime: &Self::StartArgs) -> ZResult { + fn start( + name: &str, + runtime: &Self::StartArgs, + ) -> ZResult { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. @@ -249,7 +254,7 @@ impl RunningPluginTrait for RunningPlugin { &'a self, selector: &'a Selector<'a>, plugin_status_key: &str, - ) -> ZResult> { + ) -> ZResult> { let mut responses = Vec::new(); let mut key = String::from(plugin_status_key); with_extended_string(&mut key, &["/version"], |key| { @@ -257,7 +262,7 @@ impl RunningPluginTrait for RunningPlugin { .unwrap() .intersects(selector.key_expr()) { - responses.push(zenoh::plugins::Response::new( + responses.push(zenoh::internal::plugins::Response::new( key.clone(), GIT_VERSION.into(), )) @@ -268,7 +273,7 @@ impl RunningPluginTrait for RunningPlugin { .unwrap() .intersects(selector.key_expr()) { - responses.push(zenoh::plugins::Response::new( + responses.push(zenoh::internal::plugins::Response::new( port_key.clone(), (&self.0).into(), )) diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 027fd0af91..fef746daae 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -31,10 +31,12 @@ use memory_backend::MemoryBackend; use storages_mgt::StorageMessage; use zenoh::{ core::Result as ZResult, - internal::{zlock, LibLoader}, + internal::{ + plugins::{Response, RunningPlugin, RunningPluginTrait, ZenohPlugin}, + runtime::Runtime, + zlock, LibLoader, + }, key_expr::keyexpr, - plugins::{RunningPluginTrait, ZenohPlugin}, - runtime::Runtime, selector::Selector, session::Session, }; @@ -65,7 +67,7 @@ impl Plugin for StoragesPlugin { const PLUGIN_LONG_VERSION: &'static str = plugin_long_version!(); type StartArgs = Runtime; - type Instance = zenoh::plugins::RunningPlugin; + type Instance = RunningPlugin; fn start(name: &str, runtime: &Self::StartArgs) -> ZResult { zenoh_util::try_init_log_from_env(); @@ -305,7 +307,7 @@ impl RunningPluginTrait for StorageRuntime { &'a self, selector: &'a Selector<'a>, plugin_status_key: &str, - ) -> ZResult> { + ) -> ZResult> { let mut responses = Vec::new(); let mut key = String::from(plugin_status_key); // TODO: to be removed when "__version__" is implemented in admoin space @@ -314,7 +316,7 @@ impl RunningPluginTrait for StorageRuntime { .unwrap() .intersects(selector.key_expr()) { - responses.push(zenoh::plugins::Response::new( + responses.push(Response::new( key.clone(), StoragesPlugin::PLUGIN_VERSION.into(), )) @@ -329,17 +331,14 @@ impl RunningPluginTrait for StorageRuntime { .unwrap() .intersects(selector.key_expr()) { - responses.push(zenoh::plugins::Response::new( - key.clone(), - plugin.path().into(), - )) + responses.push(Response::new(key.clone(), plugin.path().into())) } }); if keyexpr::new(key.as_str()) .unwrap() .intersects(selector.key_expr()) { - responses.push(zenoh::plugins::Response::new( + responses.push(Response::new( key.clone(), plugin.instance().get_admin_status(), )) @@ -360,7 +359,7 @@ impl RunningPluginTrait for StorageRuntime { let _ = handle.send(StorageMessage::GetStatus(tx)); rx.recv().await }) { - responses.push(zenoh::plugins::Response::new(key.clone(), value)) + responses.push(Response::new(key.clone(), value)) } } }) diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 78d9cc3b24..c6c473d77b 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -71,7 +71,7 @@ async fn test_updates_in_order() { ) .unwrap(); - let runtime = zenoh::runtime::RuntimeBuilder::new(config) + let runtime = zenoh::internal::runtime::RuntimeBuilder::new(config) .build() .await .unwrap(); diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 969db36c4f..9b29dba77c 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -72,7 +72,7 @@ async fn test_wild_card_in_order() { ) .unwrap(); - let runtime = zenoh::runtime::RuntimeBuilder::new(config) + let runtime = zenoh::internal::runtime::RuntimeBuilder::new(config) .build() .await .unwrap(); diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 510e806a3f..dc01367666 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -20,12 +20,11 @@ use std::{ use zenoh::{ core::{Error, Resolvable, Resolve, Result as ZResult}, - internal::{bail, ResolveFuture, TerminatableTask}, + internal::{bail, runtime::ZRuntime, ResolveFuture, TerminatableTask}, key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}, prelude::Wait, query::Query, queryable::Queryable, - runtime::ZRuntime, sample::{Locality, Sample}, session::{SessionDeclarations, SessionRef}, subscriber::FlumeSubscriber, diff --git a/zenoh/src/api/loader.rs b/zenoh/src/api/loader.rs index fe2420cf01..175e0c6816 100644 --- a/zenoh/src/api/loader.rs +++ b/zenoh/src/api/loader.rs @@ -15,7 +15,7 @@ use zenoh_config::{Config, PluginLoad}; use zenoh_result::ZResult; use super::plugins::{PluginsManager, PLUGIN_PREFIX}; -use crate::runtime::Runtime; +use crate::net::runtime::Runtime; pub(crate) fn load_plugin( plugin_mgr: &mut PluginsManager, diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 82cd866433..afbb998b3a 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -330,7 +330,7 @@ pub mod scouting { } /// Liveliness primitives -#[cfg(feature = "unstable")] +#[zenoh_macros::unstable] pub mod liveliness { pub use crate::api::liveliness::{ Liveliness, LivelinessGetBuilder, LivelinessSubscriberBuilder, LivelinessToken, @@ -345,15 +345,6 @@ pub mod time { pub use crate::api::time::new_reception_timestamp; } -/// Initialize a Session with an existing Runtime. -/// This operation is used by the plugins to share the same Runtime as the router. -#[doc(hidden)] -pub mod runtime { - pub use zenoh_runtime::ZRuntime; - - pub use crate::net::runtime::{AdminSpace, Runtime, RuntimeBuilder}; -} - /// Configuration to pass to [`open`](crate::session::open) and [`scout`](crate::scouting::scout) functions and associated constants pub mod config { // pub use zenoh_config::{ @@ -363,16 +354,7 @@ pub mod config { pub use zenoh_config::*; } -#[doc(hidden)] -#[cfg(all(feature = "unstable", feature = "plugins"))] -pub mod plugins { - pub use crate::api::plugins::{ - PluginsManager, Response, RunningPlugin, RunningPluginTrait, ZenohPlugin, PLUGIN_PREFIX, - }; -} - -#[doc(hidden)] -#[cfg(feature = "internal")] +#[zenoh_macros::internal] pub mod internal { pub use zenoh_core::{zasync_executor_init, zerror, zlock, ztimeout, ResolveFuture}; pub use zenoh_result::bail; @@ -381,7 +363,6 @@ pub mod internal { pub use zenoh_util::{zenoh_home, LibLoader, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; pub use crate::api::encoding::EncodingInternals; - /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate /// reading and writing data. pub mod buffers { @@ -391,6 +372,22 @@ pub mod internal { ZBuf, ZBufReader, ZSlice, ZSliceBuffer, }; } + /// Initialize a Session with an existing Runtime. + /// This operation is used by the plugins to share the same Runtime as the router. + #[zenoh_macros::unstable] + pub mod runtime { + pub use zenoh_runtime::ZRuntime; + + pub use crate::net::runtime::{AdminSpace, Runtime, RuntimeBuilder}; + } + /// Plugins support + #[zenoh_macros::unstable] + #[cfg(feature = "plugins")] + pub mod plugins { + pub use crate::api::plugins::{ + PluginsManager, Response, RunningPlugin, RunningPluginTrait, ZenohPlugin, PLUGIN_PREFIX, + }; + } } #[cfg(all(feature = "unstable", feature = "shared-memory"))] diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 3039c91348..49eeaa1d38 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -42,7 +42,7 @@ use super::{ }, router::RoutesIndexes, }; -use crate::{key_expr::KeyExpr, runtime::Runtime}; +use crate::{key_expr::KeyExpr, net::runtime::Runtime}; mod client; mod linkstate_peer; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 3c22d16b3f..4064cbc8ba 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -20,7 +20,7 @@ use std::{ }; #[cfg(feature = "unstable")] -use zenoh::runtime::{Runtime, RuntimeBuilder}; +use zenoh::internal::runtime::{Runtime, RuntimeBuilder}; use zenoh::{ config, internal::ztimeout, key_expr::KeyExpr, prelude::*, publisher::CongestionControl, sample::SampleKind, subscriber::Reliability, Session, From 87ad54df1ac7377e9ba5e3c016926c3fb00d7f6b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 5 Jun 2024 12:33:26 +0200 Subject: [PATCH 369/598] features reorg --- .../zenoh-keyexpr/src/key_expr/borrowed.rs | 13 ++----- plugins/zenoh-backend-example/Cargo.toml | 6 ++-- plugins/zenoh-plugin-example/Cargo.toml | 13 ++++--- plugins/zenoh-plugin-rest/Cargo.toml | 9 +++-- .../zenoh-plugin-storage-manager/Cargo.toml | 9 +++-- zenoh/src/api/bytes.rs | 28 +++++++-------- zenoh/src/api/mod.rs | 4 +-- zenoh/src/api/queryable.rs | 5 --- zenoh/src/api/session.rs | 22 ++++-------- zenoh/src/lib.rs | 24 ++++++++++--- zenoh/src/net/runtime/adminspace.rs | 30 ++++++++-------- zenoh/src/net/runtime/mod.rs | 36 +++++++++---------- 12 files changed, 103 insertions(+), 96 deletions(-) diff --git a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs index b5632a6cc6..e81cea1663 100644 --- a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs +++ b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs @@ -12,11 +12,12 @@ // ZettaScale Zenoh Team, // +#[cfg(feature = "internal")] +use alloc::vec::Vec; use alloc::{ borrow::{Borrow, ToOwned}, format, string::String, - vec::Vec, }; use core::{ convert::{TryFrom, TryInto}, @@ -127,7 +128,6 @@ impl keyexpr { /// Returns `true` if `self` contains any wildcard character (`**` or `$*`). #[cfg(feature = "internal")] - #[cfg(feature = "unstable")] pub fn is_wild(&self) -> bool { self.is_wild_impl() } @@ -169,11 +169,7 @@ impl keyexpr { /// keyexpr::new("dem$*").unwrap().get_nonwild_prefix()); /// ``` #[cfg(feature = "internal")] - #[cfg(feature = "unstable")] pub fn get_nonwild_prefix(&self) -> Option<&keyexpr> { - self.get_nonwild_prefix_impl() - } - fn get_nonwild_prefix_impl(&self) -> Option<&keyexpr> { match self.0.find('*') { Some(i) => match self.0[..i].rfind('/') { Some(j) => unsafe { Some(keyexpr::from_str_unchecked(&self.0[..j])) }, @@ -238,11 +234,7 @@ impl keyexpr { /// ); /// ``` #[cfg(feature = "internal")] - #[cfg(feature = "unstable")] pub fn strip_prefix(&self, prefix: &Self) -> Vec<&keyexpr> { - self.strip_prefix_impl(prefix) - } - fn strip_prefix_impl(&self, prefix: &Self) -> Vec<&keyexpr> { let mut result = alloc::vec![]; 'chunks: for i in (0..=self.len()).rev() { if if i == self.len() { @@ -309,7 +301,6 @@ impl keyexpr { } #[cfg(feature = "internal")] - #[cfg(feature = "unstable")] pub const fn chunks(&self) -> Chunks { self.chunks_impl() } diff --git a/plugins/zenoh-backend-example/Cargo.toml b/plugins/zenoh-backend-example/Cargo.toml index 5ca4d3096b..e77ce51294 100644 --- a/plugins/zenoh-backend-example/Cargo.toml +++ b/plugins/zenoh-backend-example/Cargo.toml @@ -20,7 +20,7 @@ edition = { workspace = true } publish = false [features] -default = ["dynamic_plugin", "zenoh/default"] +default = ["dynamic_plugin"] dynamic_plugin = [] [lib] @@ -33,9 +33,9 @@ async-std = { workspace = true, features = ["default"] } const_format = { workspace = true } futures = { workspace = true } git-version = { workspace = true } -tracing = {workspace = true} +tracing = { workspace = true } serde_json = { workspace = true } -zenoh = { workspace = true } +zenoh = { workspace = true, features = ["default"] } zenoh-core = { workspace = true } zenoh-plugin-trait = { workspace = true } zenoh-result = { workspace = true } diff --git a/plugins/zenoh-plugin-example/Cargo.toml b/plugins/zenoh-plugin-example/Cargo.toml index 7cb0ad66f6..024c2fb6ef 100644 --- a/plugins/zenoh-plugin-example/Cargo.toml +++ b/plugins/zenoh-plugin-example/Cargo.toml @@ -20,7 +20,7 @@ edition = { workspace = true } publish = false [features] -default = ["dynamic_plugin", "zenoh/default", "zenoh/unstable", "zenoh/plugins"] +default = ["dynamic_plugin"] dynamic_plugin = [] [lib] @@ -36,12 +36,17 @@ crate-type = ["cdylib"] [dependencies] async-std = { workspace = true, features = ["default"] } const_format = { workspace = true } -zenoh-util = {workspace = true } +zenoh-util = { workspace = true } futures = { workspace = true } git-version = { workspace = true } -tracing = {workspace = true} +tracing = { workspace = true } serde_json = { workspace = true } -zenoh = { workspace = true, features = ["unstable"] } +zenoh = { workspace = true, features = [ + "default", + "plugins", + "internal", + "unstable", +] } zenoh-core = { workspace = true } zenoh-plugin-trait = { workspace = true } zenoh-result = { workspace = true } diff --git a/plugins/zenoh-plugin-rest/Cargo.toml b/plugins/zenoh-plugin-rest/Cargo.toml index 632b19a6f5..db1a0f747e 100644 --- a/plugins/zenoh-plugin-rest/Cargo.toml +++ b/plugins/zenoh-plugin-rest/Cargo.toml @@ -24,7 +24,7 @@ categories = ["network-programming", "web-programming::http-server"] description = "The zenoh REST plugin" [features] -default = ["dynamic_plugin", "zenoh/default", "zenoh/unstable", "zenoh/plugins"] +default = ["dynamic_plugin"] dynamic_plugin = [] [lib] @@ -46,7 +46,12 @@ schemars = { workspace = true } serde = { workspace = true, features = ["default"] } serde_json = { workspace = true } tide = { workspace = true } -zenoh = { workspace = true, features = ["unstable"] } +zenoh = { workspace = true, features = [ + "plugins", + "default", + "internal", + "unstable", +] } zenoh-plugin-trait = { workspace = true } zenoh-result = { workspace = true } zenoh-util = { workspace = true } diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index 6f72fc91e6..8fc530125e 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -24,7 +24,7 @@ categories = { workspace = true } description = "The zenoh storages plugin." [features] -default = ["dynamic_plugin", "zenoh/default", "zenoh/unstable", "zenoh/plugins"] +default = ["dynamic_plugin"] dynamic_plugin = [] [lib] @@ -45,7 +45,12 @@ tracing = { workspace = true } serde = { workspace = true, features = ["default"] } serde_json = { workspace = true } urlencoding = { workspace = true } -zenoh = { workspace = true, features = ["unstable", "internal"] } +zenoh = { workspace = true, features = [ + "default", + "plugins", + "internal", + "unstable", +] } zenoh-plugin-trait = { workspace = true } zenoh-util = { workspace = true } zenoh_backend_traits = { workspace = true } diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 446b2bc270..c4bcf6ae5e 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -28,7 +28,7 @@ use zenoh_buffers::{ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; use zenoh_result::{ZError, ZResult}; -#[cfg(all(feature = "shared-memory", feature = "unstable"))] +#[cfg(feature = "shared-memory")] use zenoh_shm::{ api::buffer::{ zshm::{zshm, ZShm}, @@ -1569,7 +1569,7 @@ impl TryFrom<&mut ZBytes> for serde_pickle::Value { } // Shared memory conversion -#[cfg(all(feature = "shared-memory", feature = "unstable"))] +#[cfg(feature = "shared-memory")] impl Serialize for ZSerde { type Output = ZBytes; @@ -1579,7 +1579,7 @@ impl Serialize for ZSerde { } } -#[cfg(all(feature = "shared-memory", feature = "unstable"))] +#[cfg(feature = "shared-memory")] impl From for ZBytes { fn from(t: ZShm) -> Self { ZSerde.serialize(t) @@ -1587,7 +1587,7 @@ impl From for ZBytes { } // Shared memory conversion -#[cfg(all(feature = "shared-memory", feature = "unstable"))] +#[cfg(feature = "shared-memory")] impl Serialize for ZSerde { type Output = ZBytes; @@ -1597,14 +1597,14 @@ impl Serialize for ZSerde { } } -#[cfg(all(feature = "shared-memory", feature = "unstable"))] +#[cfg(feature = "shared-memory")] impl From for ZBytes { fn from(t: ZShmMut) -> Self { ZSerde.serialize(t) } } -#[cfg(all(feature = "shared-memory", feature = "unstable"))] +#[cfg(feature = "shared-memory")] impl<'a> Deserialize<'a, &'a zshm> for ZSerde { type Input = &'a ZBytes; type Error = ZDeserializeError; @@ -1621,7 +1621,7 @@ impl<'a> Deserialize<'a, &'a zshm> for ZSerde { } } -#[cfg(all(feature = "shared-memory", feature = "unstable"))] +#[cfg(feature = "shared-memory")] impl<'a> TryFrom<&'a ZBytes> for &'a zshm { type Error = ZDeserializeError; @@ -1630,7 +1630,7 @@ impl<'a> TryFrom<&'a ZBytes> for &'a zshm { } } -#[cfg(all(feature = "shared-memory", feature = "unstable"))] +#[cfg(feature = "shared-memory")] impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zshm { type Error = ZDeserializeError; @@ -1639,7 +1639,7 @@ impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zshm { } } -#[cfg(all(feature = "shared-memory", feature = "unstable"))] +#[cfg(feature = "shared-memory")] impl<'a> Deserialize<'a, &'a mut zshm> for ZSerde { type Input = &'a mut ZBytes; type Error = ZDeserializeError; @@ -1656,7 +1656,7 @@ impl<'a> Deserialize<'a, &'a mut zshm> for ZSerde { } } -#[cfg(all(feature = "shared-memory", feature = "unstable"))] +#[cfg(feature = "shared-memory")] impl<'a> Deserialize<'a, &'a mut zshmmut> for ZSerde { type Input = &'a mut ZBytes; type Error = ZDeserializeError; @@ -1673,7 +1673,7 @@ impl<'a> Deserialize<'a, &'a mut zshmmut> for ZSerde { } } -#[cfg(all(feature = "shared-memory", feature = "unstable"))] +#[cfg(feature = "shared-memory")] impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zshmmut { type Error = ZDeserializeError; @@ -1875,10 +1875,10 @@ mod tests { use rand::Rng; use zenoh_buffers::{ZBuf, ZSlice}; - #[cfg(all(feature = "shared-memory", feature = "unstable"))] + #[cfg(feature = "shared-memory")] use zenoh_core::Wait; use zenoh_protocol::core::Properties; - #[cfg(all(feature = "shared-memory", feature = "unstable"))] + #[cfg(feature = "shared-memory")] use zenoh_shm::api::{ buffer::zshm::{zshm, ZShm}, protocol_implementations::posix::{ @@ -1992,7 +1992,7 @@ mod tests { basic(); // SHM - #[cfg(all(feature = "shared-memory", feature = "unstable"))] + #[cfg(feature = "shared-memory")] { // create an SHM backend... let backend = PosixSharedMemoryProviderBackend::builder() diff --git a/zenoh/src/api/mod.rs b/zenoh/src/api/mod.rs index 785f47817b..91ae6bed67 100644 --- a/zenoh/src/api/mod.rs +++ b/zenoh/src/api/mod.rs @@ -23,9 +23,9 @@ pub(crate) mod info; pub(crate) mod key_expr; #[cfg(feature = "unstable")] pub(crate) mod liveliness; -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] pub(crate) mod loader; -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] pub(crate) mod plugins; pub(crate) mod publisher; pub(crate) mod query; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 1959dc0d77..0249d99ca1 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -149,7 +149,6 @@ impl Query { /// replying on a disjoint key expression will result in an error when resolving the reply. /// This api is for internal use only. #[inline(always)] - #[zenoh_macros::unstable] #[zenoh_macros::internal] pub fn reply_sample(&self, sample: Sample) -> ReplySample<'_> { ReplySample { @@ -272,20 +271,17 @@ impl fmt::Display for Query { } } -#[zenoh_macros::unstable] #[zenoh_macros::internal] pub struct ReplySample<'a> { query: &'a Query, sample: Sample, } -#[zenoh_macros::unstable] #[zenoh_macros::internal] impl Resolvable for ReplySample<'_> { type To = ZResult<()>; } -#[zenoh_macros::unstable] #[zenoh_macros::internal] impl Wait for ReplySample<'_> { fn wait(self) -> ::To { @@ -293,7 +289,6 @@ impl Wait for ReplySample<'_> { } } -#[zenoh_macros::unstable] #[zenoh_macros::internal] impl IntoFuture for ReplySample<'_> { type Output = ::To; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 46dd37956e..015f6f9769 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -55,7 +55,7 @@ use zenoh_protocol::{ }, }; use zenoh_result::ZResult; -#[cfg(all(feature = "unstable", feature = "shared-memory"))] +#[cfg(feature = "shared-memory")] use zenoh_shm::api::client_storage::SharedMemoryClientStorage; use zenoh_task::TaskController; @@ -842,9 +842,7 @@ impl Session { #[allow(clippy::new_ret_no_self)] pub(super) fn new( config: Config, - #[cfg(all(feature = "unstable", feature = "shared-memory"))] shm_clients: Option< - Arc, - >, + #[cfg(feature = "shared-memory")] shm_clients: Option>, ) -> impl Resolve> { ResolveFuture::new(async move { tracing::debug!("Config: {:?}", &config); @@ -852,7 +850,7 @@ impl Session { let aggregated_publishers = config.aggregation().publishers().clone(); #[allow(unused_mut)] // Required for shared-memory let mut runtime = RuntimeBuilder::new(config); - #[cfg(all(feature = "unstable", feature = "shared-memory"))] + #[cfg(feature = "shared-memory")] { runtime = runtime.shm_clients(shm_clients); } @@ -2714,7 +2712,7 @@ where { OpenBuilder { config, - #[cfg(all(feature = "unstable", feature = "shared-memory"))] + #[cfg(feature = "shared-memory")] shm_clients: None, } } @@ -2737,11 +2735,11 @@ where >::Error: std::fmt::Debug, { config: TryIntoConfig, - #[cfg(all(feature = "unstable", feature = "shared-memory"))] + #[cfg(feature = "shared-memory")] shm_clients: Option>, } -#[cfg(all(feature = "unstable", feature = "shared-memory"))] +#[cfg(feature = "shared-memory")] impl OpenBuilder where TryIntoConfig: std::convert::TryInto + Send + 'static, @@ -2773,7 +2771,7 @@ where .map_err(|e| zerror!("Invalid Zenoh configuration {:?}", &e))?; Session::new( config, - #[cfg(all(feature = "unstable", feature = "shared-memory"))] + #[cfg(feature = "shared-memory")] self.shm_clients, ) .wait() @@ -2795,7 +2793,6 @@ where /// Initialize a Session with an existing Runtime. /// This operation is used by the plugins to share the same Runtime as the router. -#[zenoh_macros::unstable] #[zenoh_macros::internal] pub fn init(runtime: Runtime) -> InitBuilder { InitBuilder { @@ -2808,7 +2805,6 @@ pub fn init(runtime: Runtime) -> InitBuilder { /// A builder returned by [`init`] and used to initialize a Session with an existing Runtime. #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[doc(hidden)] -#[zenoh_macros::unstable] #[zenoh_macros::internal] pub struct InitBuilder { runtime: Runtime, @@ -2816,7 +2812,6 @@ pub struct InitBuilder { aggregated_publishers: Vec, } -#[zenoh_macros::unstable] #[zenoh_macros::internal] impl InitBuilder { #[inline] @@ -2832,13 +2827,11 @@ impl InitBuilder { } } -#[zenoh_macros::unstable] #[zenoh_macros::internal] impl Resolvable for InitBuilder { type To = ZResult; } -#[zenoh_macros::unstable] #[zenoh_macros::internal] impl Wait for InitBuilder { fn wait(self) -> ::To { @@ -2851,7 +2844,6 @@ impl Wait for InitBuilder { } } -#[zenoh_macros::unstable] #[zenoh_macros::internal] impl IntoFuture for InitBuilder { type Output = ::To; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index afbb998b3a..4c953be831 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -196,7 +196,6 @@ pub mod key_expr { /// Zenoh [`Session`](crate::session::Session) and associated types pub mod session { - #[zenoh_macros::unstable] #[zenoh_macros::internal] pub use crate::api::session::{init, InitBuilder}; pub use crate::api::{ @@ -299,7 +298,6 @@ pub mod query { pub use crate::api::query::ReplyKeyExpr; #[zenoh_macros::unstable] pub use crate::api::query::REPLY_KEY_EXPR_ANY_SEL_PARAM; - #[zenoh_macros::unstable] #[zenoh_macros::internal] pub use crate::api::queryable::ReplySample; pub use crate::api::{ @@ -354,6 +352,18 @@ pub mod config { pub use zenoh_config::*; } +#[cfg(all(feature = "internal", not(feature = "unstable")))] +compile_error!( + "All internal functionality is unstable. The `unstable` feature must be enabled to use `internal`." +); +#[cfg(all( + feature = "plugins", + not(all(feature = "unstable", feature = "internal")) +))] +compile_error!( + "The plugins support is internal and unstable. The `unstable` and `internal` features must be enabled to use `plugins`." +); + #[zenoh_macros::internal] pub mod internal { pub use zenoh_core::{zasync_executor_init, zerror, zlock, ztimeout, ResolveFuture}; @@ -374,14 +384,13 @@ pub mod internal { } /// Initialize a Session with an existing Runtime. /// This operation is used by the plugins to share the same Runtime as the router. - #[zenoh_macros::unstable] + #[zenoh_macros::internal] pub mod runtime { pub use zenoh_runtime::ZRuntime; pub use crate::net::runtime::{AdminSpace, Runtime, RuntimeBuilder}; } /// Plugins support - #[zenoh_macros::unstable] #[cfg(feature = "plugins")] pub mod plugins { pub use crate::api::plugins::{ @@ -390,7 +399,12 @@ pub mod internal { } } -#[cfg(all(feature = "unstable", feature = "shared-memory"))] +#[cfg(all(feature = "shared-memory", not(feature = "unstable")))] +compile_error!( + "The shared-memory support is unstable. The `unstable` feature must be enabled to use `shared-memory`." +); + +#[cfg(feature = "shared-memory")] pub mod shm { pub use zenoh_shm::api::{ buffer::{ diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index d62700b142..6b8ac52240 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -21,9 +21,9 @@ use tracing::{error, trace}; use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{unwrap_or_default, ConfigValidator, ValidatedMap, WhatAmI, ZenohId}; use zenoh_core::Wait; -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] use zenoh_plugin_trait::{PluginControl, PluginStatus}; -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] use zenoh_protocol::core::key_expr::keyexpr; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, ExprId, WireExpr, EMPTY_EXPR_ID}, @@ -40,7 +40,7 @@ use zenoh_result::ZResult; use zenoh_transport::unicast::TransportUnicast; use super::{routing::dispatcher::face::Face, Runtime}; -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] use crate::api::plugins::PluginsManager; use crate::{ api::{ @@ -71,7 +71,7 @@ pub struct AdminSpace { context: Arc, } -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] #[derive(Debug, Clone)] enum PluginDiff { Delete(String), @@ -86,7 +86,7 @@ impl ConfigValidator for AdminSpace { current: &serde_json::Map, new: &serde_json::Map, ) -> ZResult>> { - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] { let plugins_mgr = self.context.runtime.plugins_manager(); let Some(plugin) = plugins_mgr.started_plugin(name) else { @@ -97,7 +97,7 @@ impl ConfigValidator for AdminSpace { }; plugin.instance().config_checker(path, current, new) } - #[cfg(not(all(feature = "unstable", feature = "plugins")))] + #[cfg(not(feature = "plugins"))] { let _ = (name, path, current, new); Ok(None) @@ -106,7 +106,7 @@ impl ConfigValidator for AdminSpace { } impl AdminSpace { - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] fn start_plugin( plugin_mgr: &mut PluginsManager, config: &zenoh_config::PluginLoad, @@ -195,7 +195,7 @@ impl AdminSpace { Arc::new(queryables_data), ); - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] handlers.insert( format!("@/{whatami_str}/{zid_str}/plugins/**") .try_into() @@ -203,7 +203,7 @@ impl AdminSpace { Arc::new(plugins_data), ); - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] handlers.insert( format!("@/{whatami_str}/{zid_str}/status/plugins/**") .try_into() @@ -211,7 +211,7 @@ impl AdminSpace { Arc::new(plugins_status), ); - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] let mut active_plugins = runtime .plugins_manager() .started_plugins_iter() @@ -234,7 +234,7 @@ impl AdminSpace { config.set_plugin_validator(Arc::downgrade(&admin)); - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] { let cfg_rx = admin.context.runtime.state.config.subscribe(); @@ -546,7 +546,7 @@ fn local_data(context: &AdminContext, query: Query) { let transport_mgr = context.runtime.manager().clone(); // plugins info - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] let plugins: serde_json::Value = { let plugins_mgr = context.runtime.plugins_manager(); plugins_mgr @@ -756,7 +756,7 @@ fn queryables_data(context: &AdminContext, query: Query) { } } -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] fn plugins_data(context: &AdminContext, query: Query) { let guard = context.runtime.plugins_manager(); let root_key = format!( @@ -783,7 +783,7 @@ fn plugins_data(context: &AdminContext, query: Query) { } } -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] fn plugins_status(context: &AdminContext, query: Query) { use crate::bytes::{Serialize, ZSerde}; @@ -854,7 +854,7 @@ fn plugins_status(context: &AdminContext, query: Query) { } } -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] fn with_extended_string R>( prefix: &mut String, suffixes: &[&str], diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 82fd943f2b..d5b42ecdd2 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -20,7 +20,7 @@ mod adminspace; pub mod orchestrator; -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] use std::sync::{Mutex, MutexGuard}; use std::{ any::Any, @@ -44,9 +44,9 @@ use zenoh_protocol::{ network::NetworkMessage, }; use zenoh_result::{bail, ZResult}; -#[cfg(all(feature = "unstable", feature = "shared-memory"))] +#[cfg(feature = "shared-memory")] use zenoh_shm::api::client_storage::SharedMemoryClientStorage; -#[cfg(all(feature = "unstable", feature = "shared-memory"))] +#[cfg(feature = "shared-memory")] use zenoh_shm::reader::SharedMemoryReader; use zenoh_sync::get_mut_unchecked; use zenoh_task::TaskController; @@ -57,9 +57,9 @@ use zenoh_transport::{ use self::orchestrator::StartConditions; use super::{primitives::DeMux, routing, routing::router::Router}; -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] use crate::api::loader::{load_plugins, start_plugins}; -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] use crate::api::plugins::PluginsManager; use crate::{ config::{unwrap_or_default, Config, ModeDependent, Notifier}, @@ -78,7 +78,7 @@ pub(crate) struct RuntimeState { locators: std::sync::RwLock>, hlc: Option>, task_controller: TaskController, - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] plugins_manager: Mutex, start_conditions: Arc, } @@ -95,9 +95,9 @@ impl WeakRuntime { pub struct RuntimeBuilder { config: Config, - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] plugins_manager: Option, - #[cfg(all(feature = "unstable", feature = "shared-memory"))] + #[cfg(feature = "shared-memory")] shm_clients: Option>, } @@ -105,20 +105,20 @@ impl RuntimeBuilder { pub fn new(config: Config) -> Self { Self { config, - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] plugins_manager: None, - #[cfg(all(feature = "unstable", feature = "shared-memory"))] + #[cfg(feature = "shared-memory")] shm_clients: None, } } - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] pub fn plugins_manager>>(mut self, plugins_manager: T) -> Self { self.plugins_manager = plugins_manager.into(); self } - #[cfg(all(feature = "unstable", feature = "shared-memory"))] + #[cfg(feature = "shared-memory")] pub fn shm_clients(mut self, shm_clients: Option>) -> Self { self.shm_clients = shm_clients; self @@ -127,9 +127,9 @@ impl RuntimeBuilder { pub async fn build(self) -> ZResult { let RuntimeBuilder { config, - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] mut plugins_manager, - #[cfg(all(feature = "unstable", feature = "shared-memory"))] + #[cfg(feature = "shared-memory")] shm_clients, } = self; @@ -166,7 +166,7 @@ impl RuntimeBuilder { let transport_manager = transport_manager.build(handler.clone())?; // Plugins manager - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] let plugins_manager = plugins_manager .take() .unwrap_or_else(|| load_plugins(&config)); @@ -187,7 +187,7 @@ impl RuntimeBuilder { locators: std::sync::RwLock::new(vec![]), hlc, task_controller: TaskController::default(), - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] plugins_manager: Mutex::new(plugins_manager), start_conditions: Arc::new(StartConditions::default()), }), @@ -201,7 +201,7 @@ impl RuntimeBuilder { } // Start plugins - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] start_plugins(&runtime); // Start notifier task @@ -257,7 +257,7 @@ impl Runtime { &self.state.manager } - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] #[inline(always)] pub(crate) fn plugins_manager(&self) -> MutexGuard<'_, PluginsManager> { zlock!(self.state.plugins_manager) From f8f8dd5fb8a324eeafef6b0b1b3c9cc50ef7be38 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 5 Jun 2024 12:47:47 +0200 Subject: [PATCH 370/598] clippy fixes, EncodingInternal removed --- .../src/unicast/establishment/accept.rs | 4 ++-- zenoh/src/api/encoding.rs | 21 +++++++------------ zenoh/src/api/session.rs | 4 ++-- zenoh/src/lib.rs | 1 - 4 files changed, 11 insertions(+), 19 deletions(-) diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index d53374b5db..d074ea9642 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -696,7 +696,7 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - let iack_in = SendInitAckIn { mine_version: manager.config.version, - mine_zid: manager.config.zid.into(), + mine_zid: manager.config.zid, mine_whatami: manager.config.whatami, other_zid: isyn_out.other_zid, other_whatami: isyn_out.other_whatami, @@ -714,7 +714,7 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - // Create the OpenAck but not send it yet let oack_in = SendOpenAckIn { - mine_zid: manager.config.zid.into(), + mine_zid: manager.config.zid, mine_lease: manager.config.unicast.lease, other_zid: osyn_out.other_zid, }; diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index db5c28ed98..fcc5ae119a 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -836,24 +836,17 @@ impl EncodingMapping for serde_pickle::Value { const ENCODING: Encoding = Encoding::APPLICATION_PYTHON_SERIALIZED_OBJECT; } -pub trait EncodingInternals { - fn id(&self) -> u16; - - fn schema(&self) -> Option<&ZSlice>; - - fn new(id: u16, schema: Option) -> Self; -} - -impl EncodingInternals for Encoding { - fn id(&self) -> u16 { +impl Encoding { + #[zenoh_macros::internal] + pub fn id(&self) -> u16 { self.0.id } - - fn schema(&self) -> Option<&ZSlice> { + #[zenoh_macros::internal] + pub fn schema(&self) -> Option<&ZSlice> { self.0.schema.as_ref() } - - fn new(id: u16, schema: Option) -> Self { + #[zenoh_macros::internal] + pub fn new(id: u16, schema: Option) -> Self { Encoding(zenoh_protocol::core::Encoding { id, schema }) } } diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 015f6f9769..99838e8c58 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -2224,7 +2224,7 @@ impl Primitives for Session { }; let replier_id = match e.ext_sinfo { Some(info) => info.id.zid, - None => zenoh_protocol::core::ZenohId::rand().into(), + None => zenoh_protocol::core::ZenohId::rand(), }; let new_reply = Reply { replier_id, @@ -2351,7 +2351,7 @@ impl Primitives for Session { ); let new_reply = Reply { result: Ok(sample), - replier_id: zenoh_protocol::core::ZenohId::rand().into(), // TODO + replier_id: zenoh_protocol::core::ZenohId::rand(), // TODO }; let callback = match query.reception_mode { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 4c953be831..67d9764f20 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -372,7 +372,6 @@ pub mod internal { pub use zenoh_task::{TaskController, TerminatableTask}; pub use zenoh_util::{zenoh_home, LibLoader, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; - pub use crate::api::encoding::EncodingInternals; /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate /// reading and writing data. pub mod buffers { From 2f36e614e11792fd9f661f4b2861deb7a254eb38 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 5 Jun 2024 13:01:35 +0200 Subject: [PATCH 371/598] relation_to unstable --- commons/zenoh-keyexpr/src/key_expr/borrowed.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs index e81cea1663..2295156ab9 100644 --- a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs +++ b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs @@ -93,6 +93,7 @@ impl keyexpr { /// Returns the relation between `self` and `other` from `self`'s point of view ([`SetIntersectionLevel::Includes`] signifies that `self` includes `other`). /// /// Note that this is slower than [`keyexpr::intersects`] and [`keyexpr::includes`], so you should favor these methods for most applications. + #[cfg(feature = "unstable")] pub fn relation_to(&self, other: &Self) -> SetIntersectionLevel { use SetIntersectionLevel::*; if self.intersects(other) { @@ -563,6 +564,7 @@ impl Div for &keyexpr { /// /// You can check for intersection with `level >= SetIntersecionLevel::Intersection` and for inclusion with `level >= SetIntersectionLevel::Includes`. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg(feature = "unstable")] pub enum SetIntersectionLevel { Disjoint, Intersects, From efd9062d06be8e88663fa67dd5a8ff5a43c8847b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 5 Jun 2024 13:22:23 +0200 Subject: [PATCH 372/598] attachment made stable --- zenoh/src/api/publisher.rs | 67 ++++++++++++-------------------------- zenoh/src/api/query.rs | 7 ---- zenoh/src/api/queryable.rs | 17 ---------- zenoh/src/api/sample.rs | 22 +++---------- zenoh/src/api/session.rs | 5 +-- 5 files changed, 26 insertions(+), 92 deletions(-) diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index 9287d7d508..c4cff83848 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -247,7 +247,6 @@ impl<'a> Publisher<'a> { timestamp: None, #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] attachment: None, } } @@ -272,7 +271,6 @@ impl<'a> Publisher<'a> { timestamp: None, #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] attachment: None, } } @@ -512,7 +510,6 @@ impl<'a> Sink for Publisher<'a> { payload, kind, encoding, - #[cfg(feature = "unstable")] attachment, .. } = item.into(); @@ -523,7 +520,6 @@ impl<'a> Sink for Publisher<'a> { None, #[cfg(feature = "unstable")] SourceInfo::empty(), - #[cfg(feature = "unstable")] attachment, ) } @@ -547,7 +543,7 @@ impl Publisher<'_> { encoding: Encoding, timestamp: Option, #[cfg(feature = "unstable")] source_info: SourceInfo, - #[cfg(feature = "unstable")] attachment: Option, + attachment: Option, ) -> ZResult<()> { tracing::trace!("write({:?}, [...])", &self.key_expr); let primitives = zread!(self.session.state) @@ -571,48 +567,28 @@ impl Publisher<'_> { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, payload: match kind { - SampleKind::Put => { - #[allow(unused_mut)] - let mut ext_attachment = None; + SampleKind::Put => PushBody::Put(Put { + timestamp, + encoding: encoding.clone().into(), #[cfg(feature = "unstable")] - { - if let Some(attachment) = attachment.clone() { - ext_attachment = Some(attachment.into()); - } - } - PushBody::Put(Put { - timestamp, - encoding: encoding.clone().into(), - #[cfg(feature = "unstable")] - ext_sinfo: source_info.into(), - #[cfg(not(feature = "unstable"))] - ext_sinfo: None, - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment, - ext_unknown: vec![], - payload: payload.clone().into(), - }) - } - SampleKind::Delete => { - #[allow(unused_mut)] - let mut ext_attachment = None; + ext_sinfo: source_info.into(), + #[cfg(not(feature = "unstable"))] + ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_attachment: attachment.clone().map(|a| a.into()), + ext_unknown: vec![], + payload: payload.clone().into(), + }), + SampleKind::Delete => PushBody::Del(Del { + timestamp, #[cfg(feature = "unstable")] - { - if let Some(attachment) = attachment.clone() { - ext_attachment = Some(attachment.into()); - } - } - PushBody::Del(Del { - timestamp, - #[cfg(feature = "unstable")] - ext_sinfo: source_info.into(), - #[cfg(not(feature = "unstable"))] - ext_sinfo: None, - ext_attachment, - ext_unknown: vec![], - }) - } + ext_sinfo: source_info.into(), + #[cfg(not(feature = "unstable"))] + ext_sinfo: None, + ext_attachment: attachment.clone().map(|a| a.into()), + ext_unknown: vec![], + }), }, }); } @@ -635,7 +611,6 @@ impl Publisher<'_> { &self.key_expr.to_wire(&self.session), Some(data_info), payload.into(), - #[cfg(feature = "unstable")] attachment, ); } diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index d0ac0e0044..562069566b 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -157,7 +157,6 @@ pub struct SessionGetBuilder<'a, 'b, Handler> { pub(crate) timeout: Duration, pub(crate) handler: Handler, pub(crate) value: Option, - #[cfg(feature = "unstable")] pub(crate) attachment: Option, #[cfg(feature = "unstable")] pub(crate) source_info: SourceInfo, @@ -173,7 +172,6 @@ impl SampleBuilderTrait for SessionGetBuilder<'_, '_, Handler> { } } - #[cfg(feature = "unstable")] fn attachment>(self, attachment: T) -> Self { let attachment: OptionZBytes = attachment.into(); Self { @@ -259,7 +257,6 @@ impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { destination, timeout, value, - #[cfg(feature = "unstable")] attachment, #[cfg(feature = "unstable")] source_info, @@ -275,7 +272,6 @@ impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { destination, timeout, value, - #[cfg(feature = "unstable")] attachment, #[cfg(feature = "unstable")] source_info, @@ -348,7 +344,6 @@ impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { destination, timeout, value, - #[cfg(feature = "unstable")] attachment, #[cfg(feature = "unstable")] source_info, @@ -364,7 +359,6 @@ impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { destination, timeout, value, - #[cfg(feature = "unstable")] attachment, #[cfg(feature = "unstable")] source_info, @@ -462,7 +456,6 @@ where self.destination, self.timeout, self.value, - #[cfg(feature = "unstable")] self.attachment, #[cfg(feature = "unstable")] self.source_info, diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 0249d99ca1..bb41a37c2f 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -74,7 +74,6 @@ pub struct Query { pub(crate) inner: Arc, pub(crate) eid: EntityId, pub(crate) value: Option, - #[cfg(feature = "unstable")] pub(crate) attachment: Option, } @@ -131,13 +130,11 @@ impl Query { } /// This Query's attachment. - #[zenoh_macros::unstable] pub fn attachment(&self) -> Option<&ZBytes> { self.attachment.as_ref() } /// This Query's attachment. - #[zenoh_macros::unstable] pub fn attachment_mut(&mut self) -> Option<&mut ZBytes> { self.attachment.as_mut() } @@ -184,7 +181,6 @@ impl Query { timestamp: None, #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] attachment: None, } } @@ -224,7 +220,6 @@ impl Query { timestamp: None, #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] attachment: None, } } @@ -316,11 +311,8 @@ pub struct ReplyBuilder<'a, 'b, T> { kind: T, timestamp: Option, qos: QoSBuilder, - #[cfg(feature = "unstable")] source_info: SourceInfo, - - #[cfg(feature = "unstable")] attachment: Option, } @@ -335,7 +327,6 @@ impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { - #[cfg(feature = "unstable")] fn attachment>(self, attachment: U) -> Self { let attachment: OptionZBytes = attachment.into(); Self { @@ -412,7 +403,6 @@ impl Wait for ReplyBuilder<'_, '_, ReplyBuilderPut> { .qos(self.qos.into()); #[cfg(feature = "unstable")] let sample = sample.source_info(self.source_info); - #[cfg(feature = "unstable")] let sample = sample.attachment(self.attachment); self.query._reply_sample(sample.into()) } @@ -426,7 +416,6 @@ impl Wait for ReplyBuilder<'_, '_, ReplyBuilderDelete> { .qos(self.qos.into()); #[cfg(feature = "unstable")] let sample = sample.source_info(self.source_info); - #[cfg(feature = "unstable")] let sample = sample.attachment(self.attachment); self.query._reply_sample(sample.into()) } @@ -463,20 +452,14 @@ impl Query { ext_sinfo, #[cfg(feature = "shared-memory")] ext_shm: None, - #[cfg(feature = "unstable")] ext_attachment: sample.attachment.map(|a| a.into()), - #[cfg(not(feature = "unstable"))] - ext_attachment: None, ext_unknown: vec![], payload: sample.payload.into(), }), SampleKind::Delete => ReplyBody::Del(Del { timestamp: sample.timestamp, ext_sinfo, - #[cfg(feature = "unstable")] ext_attachment: sample.attachment.map(|a| a.into()), - #[cfg(not(feature = "unstable"))] - ext_attachment: None, ext_unknown: vec![], }), }, diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 117e9c5924..b3e164924b 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -62,7 +62,7 @@ pub(crate) trait DataInfoIntoSample { self, key_expr: IntoKeyExpr, payload: IntoZBytes, - #[cfg(feature = "unstable")] attachment: Option, + attachment: Option, ) -> Sample where IntoKeyExpr: Into>, @@ -79,7 +79,7 @@ impl DataInfoIntoSample for DataInfo { self, key_expr: IntoKeyExpr, payload: IntoZBytes, - #[cfg(feature = "unstable")] attachment: Option, + attachment: Option, ) -> Sample where IntoKeyExpr: Into>, @@ -97,7 +97,6 @@ impl DataInfoIntoSample for DataInfo { source_id: self.source_id, source_sn: self.source_sn, }, - #[cfg(feature = "unstable")] attachment, } } @@ -109,19 +108,14 @@ impl DataInfoIntoSample for Option { self, key_expr: IntoKeyExpr, payload: IntoZBytes, - #[cfg(feature = "unstable")] attachment: Option, + attachment: Option, ) -> Sample where IntoKeyExpr: Into>, IntoZBytes: Into, { if let Some(data_info) = self { - data_info.into_sample( - key_expr, - payload, - #[cfg(feature = "unstable")] - attachment, - ) + data_info.into_sample(key_expr, payload, attachment) } else { Sample { key_expr: key_expr.into(), @@ -132,7 +126,6 @@ impl DataInfoIntoSample for Option { qos: QoS::default(), #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] attachment, } } @@ -252,7 +245,6 @@ pub struct SampleFields { pub congestion_control: CongestionControl, #[cfg(feature = "unstable")] pub source_info: SourceInfo, - #[cfg(feature = "unstable")] pub attachment: Option, } @@ -269,7 +261,6 @@ impl From for SampleFields { congestion_control: sample.qos.congestion_control(), #[cfg(feature = "unstable")] source_info: sample.source_info, - #[cfg(feature = "unstable")] attachment: sample.attachment, } } @@ -285,11 +276,8 @@ pub struct Sample { pub(crate) encoding: Encoding, pub(crate) timestamp: Option, pub(crate) qos: QoS, - #[cfg(feature = "unstable")] pub(crate) source_info: SourceInfo, - - #[cfg(feature = "unstable")] pub(crate) attachment: Option, } @@ -353,14 +341,12 @@ impl Sample { } /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. - #[zenoh_macros::unstable] #[inline] pub fn attachment(&self) -> Option<&ZBytes> { self.attachment.as_ref() } /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. - #[zenoh_macros::unstable] #[inline] pub fn attachment_mut(&mut self) -> Option<&mut ZBytes> { self.attachment.as_mut() diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 99838e8c58..41c01d99ec 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -730,7 +730,6 @@ impl Session { encoding: Encoding::default(), }, timestamp: None, - #[cfg(feature = "unstable")] attachment: None, #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), @@ -766,7 +765,6 @@ impl Session { publisher: self.declare_publisher(key_expr), kind: PublicationBuilderDelete, timestamp: None, - #[cfg(feature = "unstable")] attachment: None, #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), @@ -818,7 +816,6 @@ impl Session { destination: Locality::default(), timeout, value: None, - #[cfg(feature = "unstable")] attachment: None, handler: DefaultHandler::default(), #[cfg(feature = "unstable")] @@ -1536,7 +1533,7 @@ impl Session { key_expr: &WireExpr, info: Option, payload: ZBuf, - #[cfg(feature = "unstable")] attachment: Option, + attachment: Option, ) { let mut callbacks = SingleOrVec::default(); let state = zread!(self.state); From 0a04d867a1be0694c35987304d26b08167308ffb Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 5 Jun 2024 13:27:23 +0200 Subject: [PATCH 373/598] selector::time_range unstable --- zenoh/src/api/selector.rs | 1 + zenoh/src/lib.rs | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 2dc77dc967..7477ea65e9 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -73,6 +73,7 @@ pub struct Selector<'a> { pub(crate) parameters: Parameters<'a>, } +#[zenoh_macros::unstable] pub const TIME_RANGE_KEY: &str = "_time"; impl<'a> Selector<'a> { /// Builds a new selector diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 67d9764f20..34ca5a3c47 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -247,10 +247,12 @@ pub mod bytes { /// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries pub mod selector { + #[zenoh_macros::unstable] + pub use crate::api::selector::TIME_RANGE_KEY; + pub use crate::api::selector::{Parameters, Selector}; pub use zenoh_protocol::core::Properties; + #[zenoh_macros::unstable] pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; - - pub use crate::api::selector::{Parameters, Selector, TIME_RANGE_KEY}; } /// Subscribing primitives From 234a4fdc9eccb6e18bf46e1798435dc04cef5ff6 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 5 Jun 2024 13:27:46 +0200 Subject: [PATCH 374/598] cargo fmt --- zenoh/src/lib.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 34ca5a3c47..af7f42c885 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -247,12 +247,13 @@ pub mod bytes { /// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries pub mod selector { - #[zenoh_macros::unstable] - pub use crate::api::selector::TIME_RANGE_KEY; - pub use crate::api::selector::{Parameters, Selector}; pub use zenoh_protocol::core::Properties; #[zenoh_macros::unstable] pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; + + #[zenoh_macros::unstable] + pub use crate::api::selector::TIME_RANGE_KEY; + pub use crate::api::selector::{Parameters, Selector}; } /// Subscribing primitives From 8ca6a233c6f3f1181faa4b17f9fc5d230edfe37e Mon Sep 17 00:00:00 2001 From: Alexander Bushnev Date: Wed, 5 Jun 2024 13:33:31 +0200 Subject: [PATCH 375/598] fix: add Copy to EntityGlobalId --- commons/zenoh-protocol/src/core/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 9f10cab391..844f6a3ad6 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -271,7 +271,7 @@ impl<'de> serde::Deserialize<'de> for ZenohId { pub type EntityId = u32; /// The global unique id of a zenoh entity. -#[derive(Debug, Default, Clone, Eq, Hash, PartialEq)] +#[derive(Debug, Default, Copy, Clone, Eq, Hash, PartialEq)] pub struct EntityGlobalId { pub zid: ZenohId, pub eid: EntityId, From 4a02a125b6b9613c779db6871f0903b682c53a92 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 5 Jun 2024 14:28:29 +0200 Subject: [PATCH 376/598] `now` added to session --- commons/zenoh-protocol/src/core/mod.rs | 6 ++++++ zenoh/src/api/session.rs | 13 +++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 844f6a3ad6..9c8eee58a1 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -202,6 +202,12 @@ impl From<&ZenohId> for uhlc::ID { } } +impl From for uhlc::ID { + fn from(zid: ZenohId) -> Self { + zid.0 + } +} + impl From for OwnedKeyExpr { fn from(zid: ZenohId) -> Self { // SAFETY: zid.to_string() returns an stringified hexadecimal diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index a81835cadc..a317863a6f 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -35,8 +35,8 @@ use zenoh_protocol::network::{declare::SubscriberId, ext}; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, - AtomicExprId, CongestionControl, EntityId, ExprId, Reliability, WireExpr, ZenohId, - EMPTY_EXPR_ID, + AtomicExprId, CongestionControl, EntityId, ExprId, Reliability, Timestamp, TimestampId, + WireExpr, ZenohId, EMPTY_EXPR_ID, }, network::{ self, @@ -824,6 +824,15 @@ impl Session { source_info: SourceInfo::empty(), } } + /// Generates a reception [`Timestamp`] with id=0x01. + /// This operation should be called if a timestamp is required for an incoming [`zenoh::Sample`](crate::Sample) + /// that doesn't contain any timestamp. + pub fn now(&self) -> Timestamp { + use std::time::{SystemTime, UNIX_EPOCH}; + + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); + Timestamp::new(now.into(), self.zid().into()) + } } impl Session { From 2960447c0e7c2638083aeb662100bffb951db6f5 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 5 Jun 2024 14:31:32 +0200 Subject: [PATCH 377/598] removed now unnecessary clone (clippy warnings) --- zenoh/src/api/session.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 41c01d99ec..187ec27be7 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -2156,7 +2156,7 @@ impl Primitives for Session { encoding: Some(m.encoding.into()), timestamp: m.timestamp, qos: QoS::from(msg.ext_qos), - source_id: m.ext_sinfo.as_ref().map(|i| i.id.clone()), + source_id: m.ext_sinfo.as_ref().map(|i| i.id), source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; self.handle_data( @@ -2174,7 +2174,7 @@ impl Primitives for Session { encoding: None, timestamp: m.timestamp, qos: QoS::from(msg.ext_qos), - source_id: m.ext_sinfo.as_ref().map(|i| i.id.clone()), + source_id: m.ext_sinfo.as_ref().map(|i| i.id), source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; self.handle_data( @@ -2315,7 +2315,7 @@ impl Primitives for Session { encoding: Some(encoding.into()), timestamp, qos: QoS::from(msg.ext_qos), - source_id: ext_sinfo.as_ref().map(|i| i.id.clone()), + source_id: ext_sinfo.as_ref().map(|i| i.id), source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), }, #[cfg(feature = "unstable")] @@ -2333,7 +2333,7 @@ impl Primitives for Session { encoding: None, timestamp, qos: QoS::from(msg.ext_qos), - source_id: ext_sinfo.as_ref().map(|i| i.id.clone()), + source_id: ext_sinfo.as_ref().map(|i| i.id), source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), }, #[cfg(feature = "unstable")] From 744026b433b4dbb08f6eb342414fa618d571f9f1 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 5 Jun 2024 15:51:15 +0200 Subject: [PATCH 378/598] id parameter to timestamp added --- .../src/replica/mod.rs | 6 ++++-- .../src/replica/snapshotter.rs | 13 ++++++++++--- .../src/replica/storage.rs | 7 +++++-- zenoh-ext/src/querying_subscriber.rs | 5 +++-- zenoh/src/api/session.rs | 13 ++----------- zenoh/src/api/time.rs | 16 ++++++---------- zenoh/src/lib.rs | 2 +- 7 files changed, 31 insertions(+), 31 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 97bf86e764..0e4ffbd70a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -114,6 +114,9 @@ impl Replica { } }; + // Zid of session for generating timestamps + let zid = session.zid(); + let replica = Replica { name: name.to_string(), session, @@ -121,7 +124,6 @@ impl Replica { replica_config: storage_config.replica_config.clone().unwrap(), digests_published: RwLock::new(HashSet::new()), }; - // Create channels for communication between components // channel to queue digests to be aligned let (tx_digest, rx_digest) = flume::unbounded(); @@ -132,7 +134,7 @@ impl Replica { let config = replica.replica_config.clone(); // snapshotter - let snapshotter = Arc::new(Snapshotter::new(rx_log, &startup_entries, &config).await); + let snapshotter = Arc::new(Snapshotter::new(zid, rx_log, &startup_entries, &config).await); // digest sub let digest_sub = replica.start_digest_sub(tx_digest).fuse(); // queryable for alignment diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs index d5708686ee..bfabe92cd3 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs @@ -24,12 +24,14 @@ use async_std::{ }; use flume::Receiver; use futures::join; -use zenoh::{key_expr::OwnedKeyExpr, time::Timestamp}; +use zenoh::{config::ZenohId, key_expr::OwnedKeyExpr, time::Timestamp}; use zenoh_backend_traits::config::ReplicaConfig; use super::{Digest, DigestConfig, LogEntry}; pub struct Snapshotter { + // session id for timestamp generation + id: ZenohId, // channel to get updates from the storage storage_update: Receiver<(OwnedKeyExpr, Timestamp)>, // configuration parameters of the replica @@ -55,6 +57,7 @@ pub struct ReplicationInfo { impl Snapshotter { // Initialize the snapshot parameters, logs and digest pub async fn new( + id: ZenohId, rx_sample: Receiver<(OwnedKeyExpr, Timestamp)>, initial_entries: &Vec<(OwnedKeyExpr, Timestamp)>, replica_config: &ReplicaConfig, @@ -63,10 +66,12 @@ impl Snapshotter { // from initial entries, populate the log - stable and volatile // compute digest let (last_snapshot_time, last_interval) = Snapshotter::compute_snapshot_params( + id, replica_config.propagation_delay, replica_config.delta, ); let snapshotter = Snapshotter { + id, storage_update: rx_sample, replica_config: replica_config.clone(), content: ReplicationInfo { @@ -126,6 +131,7 @@ impl Snapshotter { let mut last_snapshot_time = self.content.last_snapshot_time.write().await; let mut last_interval = self.content.last_interval.write().await; let (time, interval) = Snapshotter::compute_snapshot_params( + self.id, self.replica_config.propagation_delay, self.replica_config.delta, ); @@ -139,10 +145,11 @@ impl Snapshotter { // Compute latest snapshot time and latest interval with respect to the current time pub fn compute_snapshot_params( + id: ZenohId, propagation_delay: Duration, delta: Duration, ) -> (Timestamp, u64) { - let now = zenoh::time::new_reception_timestamp(); + let now = zenoh::time::new_timestamp(id); let latest_interval = (now .get_time() .to_system_time() @@ -199,7 +206,7 @@ impl Snapshotter { // Create digest from the stable log at startup async fn initialize_digest(&self) { - let now = zenoh::time::new_reception_timestamp(); + let now = zenoh::time::new_timestamp(self.id); let replica_data = &self.content; let log_locked = replica_data.stable_log.read().await; let latest_interval = replica_data.last_interval.read().await; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index bd7d56f7fc..8b1fc7e1c9 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -35,7 +35,7 @@ use zenoh::{ sample::{Sample, SampleBuilder, SampleKind, TimestampBuilderTrait, ValueBuilderTrait}, selector::Selector, session::{Session, SessionDeclarations}, - time::{new_reception_timestamp, Timestamp, NTP64}, + time::{new_timestamp, Timestamp, NTP64}, value::Value, }; use zenoh_backend_traits::{ @@ -146,6 +146,9 @@ impl StorageService { ); t.add_async(gc).await; + // get session id for timestamp generation + let zid = self.session.info().zid().await; + // subscribe on key_expr let storage_sub = match self.session.declare_subscriber(&self.key_expr).await { Ok(storage_sub) => storage_sub, @@ -235,7 +238,7 @@ impl StorageService { continue; } }; - let timestamp = sample.timestamp().cloned().unwrap_or(new_reception_timestamp()); + let timestamp = sample.timestamp().cloned().unwrap_or(new_timestamp(zid)); let sample = SampleBuilder::from(sample).timestamp(timestamp).into(); self.process_sample(sample).await; }, diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 4bcaca0565..54f3ff0224 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -31,7 +31,7 @@ use zenoh::{ selector::Selector, session::{SessionDeclarations, SessionRef}, subscriber::{Reliability, Subscriber}, - time::{new_reception_timestamp, Timestamp}, + time::{new_timestamp, Timestamp}, }; use crate::ExtractSample; @@ -655,6 +655,7 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { InputHandler: IntoHandler<'static, Sample, Handler = Handler> + Send, TryIntoSample: ExtractSample + Send + Sync, { + let zid = conf.session.zid(); let state = Arc::new(Mutex::new(InnerState { pending_fetches: 0, merge_queue: MergeQueue::new(), @@ -674,7 +675,7 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { ); // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. - let timestamp = s.timestamp().cloned().unwrap_or(new_reception_timestamp()); + let timestamp = s.timestamp().cloned().unwrap_or(new_timestamp(zid)); state .merge_queue .push(SampleBuilder::from(s).timestamp(timestamp).into()); diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index a317863a6f..a81835cadc 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -35,8 +35,8 @@ use zenoh_protocol::network::{declare::SubscriberId, ext}; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, - AtomicExprId, CongestionControl, EntityId, ExprId, Reliability, Timestamp, TimestampId, - WireExpr, ZenohId, EMPTY_EXPR_ID, + AtomicExprId, CongestionControl, EntityId, ExprId, Reliability, WireExpr, ZenohId, + EMPTY_EXPR_ID, }, network::{ self, @@ -824,15 +824,6 @@ impl Session { source_info: SourceInfo::empty(), } } - /// Generates a reception [`Timestamp`] with id=0x01. - /// This operation should be called if a timestamp is required for an incoming [`zenoh::Sample`](crate::Sample) - /// that doesn't contain any timestamp. - pub fn now(&self) -> Timestamp { - use std::time::{SystemTime, UNIX_EPOCH}; - - let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); - Timestamp::new(now.into(), self.zid().into()) - } } impl Session { diff --git a/zenoh/src/api/time.rs b/zenoh/src/api/time.rs index cbdabe3a7e..2a2cc3dd4d 100644 --- a/zenoh/src/api/time.rs +++ b/zenoh/src/api/time.rs @@ -11,16 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::convert::TryFrom; - +use std::time::{SystemTime, UNIX_EPOCH}; use zenoh_protocol::core::{Timestamp, TimestampId}; -/// Generates a reception [`Timestamp`] with id=0x01. -/// This operation should be called if a timestamp is required for an incoming [`zenoh::Sample`](crate::Sample) -/// that doesn't contain any timestamp. -pub fn new_reception_timestamp() -> Timestamp { - use std::time::{SystemTime, UNIX_EPOCH}; - - let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); - Timestamp::new(now.into(), TimestampId::try_from([1]).unwrap()) +/// Generates a [`Timestamp`] with [`TimestampId`] and current system time +/// The [`TimestampId`] can be taken from session id returned by [`SessionInfo::zid()`](crate::api::info::SessionInfo::zid). +pub fn new_timestamp>(id: T) -> Timestamp { + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().into(); + Timestamp::new(now, id.into()) } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c0975c6f13..5467b44692 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -343,7 +343,7 @@ pub mod liveliness { pub mod time { pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; - pub use crate::api::time::new_reception_timestamp; + pub use crate::api::time::new_timestamp; } /// Initialize a Session with an existing Runtime. From c94f0f1436096a50a9fcae8fca72304f639f5386 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 5 Jun 2024 15:53:23 +0200 Subject: [PATCH 379/598] cargo fmt --- zenoh/src/api/time.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zenoh/src/api/time.rs b/zenoh/src/api/time.rs index 2a2cc3dd4d..a617c2004c 100644 --- a/zenoh/src/api/time.rs +++ b/zenoh/src/api/time.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::time::{SystemTime, UNIX_EPOCH}; + use zenoh_protocol::core::{Timestamp, TimestampId}; /// Generates a [`Timestamp`] with [`TimestampId`] and current system time From 726d3244e1de31ee895291aa99d1df1024f7f251 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 5 Jun 2024 15:58:34 +0200 Subject: [PATCH 380/598] added doc(hidden) in keyepr crate --- commons/zenoh-keyexpr/src/key_expr/borrowed.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs index 2295156ab9..215d4e2d9e 100644 --- a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs +++ b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs @@ -129,6 +129,7 @@ impl keyexpr { /// Returns `true` if `self` contains any wildcard character (`**` or `$*`). #[cfg(feature = "internal")] + #[doc(hidden)] pub fn is_wild(&self) -> bool { self.is_wild_impl() } @@ -170,6 +171,7 @@ impl keyexpr { /// keyexpr::new("dem$*").unwrap().get_nonwild_prefix()); /// ``` #[cfg(feature = "internal")] + #[doc(hidden)] pub fn get_nonwild_prefix(&self) -> Option<&keyexpr> { match self.0.find('*') { Some(i) => match self.0[..i].rfind('/') { @@ -235,6 +237,7 @@ impl keyexpr { /// ); /// ``` #[cfg(feature = "internal")] + #[doc(hidden)] pub fn strip_prefix(&self, prefix: &Self) -> Vec<&keyexpr> { let mut result = alloc::vec![]; 'chunks: for i in (0..=self.len()).rev() { @@ -302,6 +305,7 @@ impl keyexpr { } #[cfg(feature = "internal")] + #[doc(hidden)] pub const fn chunks(&self) -> Chunks { self.chunks_impl() } From e6e8ff08e397816376c05197f6fa7abbb9fc0fb1 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 5 Jun 2024 22:55:10 +0200 Subject: [PATCH 381/598] Update commons/zenoh-macros/src/lib.rs Co-authored-by: Joseph Perez --- commons/zenoh-macros/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index 3b3b14cd3d..c9e327364e 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -59,7 +59,7 @@ pub fn rustc_version_release(_tokens: TokenStream) -> TokenStream { (quote! {(#release, #commit)}).into() } -/// An enumeration of items which can be annotated with `#[zenoh_macros::unstable]`, #[zenoh_macros::unstable]`, `#[zenoh_macros::internal]` +/// An enumeration of items which can be annotated with `#[zenoh_macros::unstable_doc]`, #[zenoh_macros::unstable]`, `#[zenoh_macros::internal]` enum AnnotableItem { /// Wrapper around [`syn::Item`]. Item(Item), From 03d45be97b3b4f6394f706640e204adc7293c405 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 5 Jun 2024 23:01:04 +0200 Subject: [PATCH 382/598] sorted features --- zenoh/Cargo.toml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index bfb7e3525a..155a906852 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -31,6 +31,19 @@ maintenance = { status = "actively-developed" } [features] auth_pubkey = ["zenoh-transport/auth_pubkey"] auth_usrpwd = ["zenoh-transport/auth_usrpwd"] +default = [ + "auth_pubkey", + "auth_usrpwd", + "transport_multilink", + "transport_compression", + "transport_quic", + "transport_tcp", + "transport_tls", + "transport_udp", + "transport_unixsock-stream", + "transport_ws", +] +internal = ["zenoh-keyexpr/internal"] plugins = [] shared-memory = [ "zenoh-shm", @@ -51,19 +64,6 @@ transport_unixsock-stream = ["zenoh-transport/transport_unixsock-stream"] transport_ws = ["zenoh-transport/transport_ws"] transport_vsock = ["zenoh-transport/transport_vsock"] unstable = ["zenoh-keyexpr/unstable"] -internal = ["zenoh-keyexpr/internal"] -default = [ - "auth_pubkey", - "auth_usrpwd", - "transport_multilink", - "transport_compression", - "transport_quic", - "transport_tcp", - "transport_tls", - "transport_udp", - "transport_unixsock-stream", - "transport_ws", -] [dependencies] tokio = { workspace = true, features = ["rt", "macros", "time"] } From a0d9f98b9ef3a52a18647fa3c7dd0b3fdbf06aa5 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 6 Jun 2024 01:06:31 +0200 Subject: [PATCH 383/598] Update zenoh/src/lib.rs doument "unstable" shared memory reexport Co-authored-by: Joseph Perez --- zenoh/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index af7f42c885..b8d78bceff 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -406,6 +406,7 @@ compile_error!( "The shared-memory support is unstable. The `unstable` feature must be enabled to use `shared-memory`." ); +#[zenoh_macros::unstable] #[cfg(feature = "shared-memory")] pub mod shm { pub use zenoh_shm::api::{ From ae17415b3c01623ed4422b9fd6d718f23113a7c2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 6 Jun 2024 13:32:18 +0200 Subject: [PATCH 384/598] Update zenoh/src/api/info.rs Co-authored-by: Joseph Perez --- zenoh/src/api/info.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index f66ade913e..01de4a34d5 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -94,7 +94,7 @@ impl<'a> Wait for RoutersZenohIdBuilder<'a> { s.get_whatami() .ok() .and_then(|what| (what == WhatAmI::Router).then_some(())) - .and_then(|_| s.get_zid().map(Into::::into).ok()) + .and_then(|_| s.get_zid().map(Into::into).ok()) }), ) } From 52416374b7a5afface2a4f84c1dad795c0f56389 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 6 Jun 2024 13:32:44 +0200 Subject: [PATCH 385/598] Update zenoh/src/api/info.rs Co-authored-by: Joseph Perez --- zenoh/src/api/info.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index 01de4a34d5..38e1fc6967 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -144,7 +144,7 @@ impl<'a> Wait for PeersZenohIdBuilder<'a> { s.get_whatami() .ok() .and_then(|what| (what == WhatAmI::Peer).then_some(())) - .and_then(|_| s.get_zid().map(|zid| zid.into()).ok()) + .and_then(|_| s.get_zid().map(Into::into).ok()) }), ) } From 4eb41931d9ac4319c2e3354d5bfadc83b0fc41c5 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 6 Jun 2024 17:02:32 +0200 Subject: [PATCH 386/598] fix after wrapping ZenohId in config --- Cargo.lock | 1 + commons/zenoh-config/Cargo.toml | 1 + commons/zenoh-config/src/lib.rs | 6 ++++++ 3 files changed, 8 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index aff6c4950a..2842003f2d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5186,6 +5186,7 @@ dependencies = [ "serde_json", "serde_yaml", "tracing", + "uhlc", "validated_struct", "zenoh-core", "zenoh-protocol", diff --git a/commons/zenoh-config/Cargo.toml b/commons/zenoh-config/Cargo.toml index cb502881d9..1b860fa7e0 100644 --- a/commons/zenoh-config/Cargo.toml +++ b/commons/zenoh-config/Cargo.toml @@ -37,3 +37,4 @@ zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } zenoh-util = { workspace = true } secrecy = { workspace = true } +uhlc = { workspace = true } \ No newline at end of file diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 2b141a0951..459d7be6f3 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -77,6 +77,12 @@ impl From for zenoh_protocol::core::ZenohId { } } +impl From for uhlc::ID { + fn from(zid: ZenohId) -> Self { + zid.0.into() + } +} + impl FromStr for ZenohId { type Err = zenoh_result::Error; From 2d778ee860362a288228f3b721bd105cda4433ea Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Fri, 7 Jun 2024 13:16:37 +0300 Subject: [PATCH 387/598] massive renaming for shm --- commons/zenoh-codec/src/core/shm.rs | 14 +- commons/zenoh-codec/tests/codec.rs | 6 +- commons/zenoh-config/src/defaults.rs | 2 +- commons/zenoh-config/src/lib.rs | 2 +- commons/zenoh-shm/src/api/buffer/traits.rs | 4 +- commons/zenoh-shm/src/api/buffer/zshm.rs | 32 +-- commons/zenoh-shm/src/api/buffer/zshmmut.rs | 32 +-- commons/zenoh-shm/src/api/client/mod.rs | 4 +- ...{shared_memory_client.rs => shm_client.rs} | 8 +- ...hared_memory_segment.rs => shm_segment.rs} | 4 +- .../zenoh-shm/src/api/client_storage/mod.rs | 74 ++++--- commons/zenoh-shm/src/api/common/types.rs | 4 +- .../api/protocol_implementations/posix/mod.rs | 6 +- ...d_memory_client.rs => posix_shm_client.rs} | 14 +- ...ckend.rs => posix_shm_provider_backend.rs} | 56 +++--- ...memory_segment.rs => posix_shm_segment.rs} | 8 +- .../posix/protocol_id.rs | 2 +- commons/zenoh-shm/src/api/provider/mod.rs | 4 +- ...red_memory_provider.rs => shm_provider.rs} | 184 +++++++++--------- ...der_backend.rs => shm_provider_backend.rs} | 2 +- commons/zenoh-shm/src/lib.rs | 51 +++-- commons/zenoh-shm/src/reader.rs | 24 +-- commons/zenoh-shm/tests/posix_shm_provider.rs | 33 ++-- examples/examples/z_alloc_shm.rs | 12 +- examples/examples/z_bytes_shm.rs | 10 +- examples/examples/z_get_shm.rs | 14 +- examples/examples/z_ping_shm.rs | 8 +- examples/examples/z_posix_shm_provider.rs | 9 +- examples/examples/z_pub_shm.rs | 11 +- examples/examples/z_pub_shm_thr.rs | 8 +- examples/examples/z_queryable_shm.rs | 14 +- examples/examples/z_sub_shm.rs | 4 +- io/zenoh-transport/src/manager.rs | 12 +- io/zenoh-transport/src/multicast/manager.rs | 4 +- io/zenoh-transport/src/shm.rs | 33 ++-- io/zenoh-transport/src/unicast/manager.rs | 8 +- io/zenoh-transport/tests/unicast_shm.rs | 22 +-- zenoh/src/api/bytes.rs | 17 +- zenoh/src/api/session.rs | 8 +- zenoh/src/lib.rs | 23 +-- zenoh/src/net/runtime/mod.rs | 10 +- zenoh/tests/bytes.rs | 8 +- zenoh/tests/shm.rs | 7 +- 43 files changed, 389 insertions(+), 423 deletions(-) rename commons/zenoh-shm/src/api/client/{shared_memory_client.rs => shm_client.rs} (70%) rename commons/zenoh-shm/src/api/client/{shared_memory_segment.rs => shm_segment.rs} (84%) rename commons/zenoh-shm/src/api/protocol_implementations/posix/{posix_shared_memory_client.rs => posix_shm_client.rs} (65%) rename commons/zenoh-shm/src/api/protocol_implementations/posix/{posix_shared_memory_provider_backend.rs => posix_shm_provider_backend.rs} (79%) rename commons/zenoh-shm/src/api/protocol_implementations/posix/{posix_shared_memory_segment.rs => posix_shm_segment.rs} (86%) rename commons/zenoh-shm/src/api/provider/{shared_memory_provider.rs => shm_provider.rs} (83%) rename commons/zenoh-shm/src/api/provider/{shared_memory_provider_backend.rs => shm_provider_backend.rs} (97%) diff --git a/commons/zenoh-codec/src/core/shm.rs b/commons/zenoh-codec/src/core/shm.rs index e25496a268..4f272f0ed4 100644 --- a/commons/zenoh-codec/src/core/shm.rs +++ b/commons/zenoh-codec/src/core/shm.rs @@ -17,7 +17,7 @@ use zenoh_buffers::{ }; use zenoh_shm::{ api::provider::chunk::ChunkDescriptor, header::descriptor::HeaderDescriptor, - watchdog::descriptor::Descriptor, SharedMemoryBufInfo, + watchdog::descriptor::Descriptor, ShmBufInfo, }; use crate::{RCodec, WCodec, Zenoh080}; @@ -62,14 +62,14 @@ where } } -impl WCodec<&SharedMemoryBufInfo, &mut W> for Zenoh080 +impl WCodec<&ShmBufInfo, &mut W> for Zenoh080 where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: &SharedMemoryBufInfo) -> Self::Output { - let SharedMemoryBufInfo { + fn write(self, writer: &mut W, x: &ShmBufInfo) -> Self::Output { + let ShmBufInfo { data_descriptor, shm_protocol, data_len, @@ -138,13 +138,13 @@ where } } -impl RCodec for Zenoh080 +impl RCodec for Zenoh080 where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result { + fn read(self, reader: &mut R) -> Result { let data_descriptor = self.read(&mut *reader)?; let shm_protocol = self.read(&mut *reader)?; let data_len = self.read(&mut *reader)?; @@ -152,7 +152,7 @@ where let header_descriptor = self.read(&mut *reader)?; let generation = self.read(&mut *reader)?; - let shm_info = SharedMemoryBufInfo::new( + let shm_info = ShmBufInfo::new( data_descriptor, shm_protocol, data_len, diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index c26b681336..c2cc71ea17 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -363,12 +363,12 @@ fn codec_encoding() { fn codec_shm_info() { use zenoh_shm::{ api::provider::chunk::ChunkDescriptor, header::descriptor::HeaderDescriptor, - watchdog::descriptor::Descriptor, SharedMemoryBufInfo, + watchdog::descriptor::Descriptor, ShmBufInfo, }; - run!(SharedMemoryBufInfo, { + run!(ShmBufInfo, { let mut rng = rand::thread_rng(); - SharedMemoryBufInfo::new( + ShmBufInfo::new( ChunkDescriptor::new(rng.gen(), rng.gen(), rng.gen()), rng.gen(), rng.gen(), diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index 1a1080cd49..9d593fabb1 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -216,7 +216,7 @@ impl Default for LinkRxConf { // Make explicit the value and ignore clippy warning #[allow(clippy::derivable_impls)] -impl Default for SharedMemoryConf { +impl Default for ShmConf { fn default() -> Self { Self { enabled: false } } diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 459d7be6f3..07112b2c5f 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -488,7 +488,7 @@ validated_struct::validator! { }, }, pub shared_memory: - SharedMemoryConf { + ShmConf { /// Whether shared memory is enabled or not. /// If set to `true`, the SHM buffer optimization support will be announced to other parties. (default `false`). /// This option doesn't make SHM buffer optimization mandatory, the real support depends on other party setting diff --git a/commons/zenoh-shm/src/api/buffer/traits.rs b/commons/zenoh-shm/src/api/buffer/traits.rs index 9104abc4a1..a5d6b9eba5 100644 --- a/commons/zenoh-shm/src/api/buffer/traits.rs +++ b/commons/zenoh-shm/src/api/buffer/traits.rs @@ -15,10 +15,10 @@ use std::ops::{Deref, DerefMut}; #[zenoh_macros::unstable_doc] -pub trait SHMBuf: Deref + AsRef<[u8]> { +pub trait ShmBuf: Deref + AsRef<[u8]> { #[zenoh_macros::unstable_doc] fn is_valid(&self) -> bool; } #[zenoh_macros::unstable_doc] -pub trait SHMBufMut: SHMBuf + DerefMut + AsMut<[u8]> {} +pub trait ShmBufMut: ShmBuf + DerefMut + AsMut<[u8]> {} diff --git a/commons/zenoh-shm/src/api/buffer/zshm.rs b/commons/zenoh-shm/src/api/buffer/zshm.rs index d6f34f293a..23b902ac4c 100644 --- a/commons/zenoh-shm/src/api/buffer/zshm.rs +++ b/commons/zenoh-shm/src/api/buffer/zshm.rs @@ -20,16 +20,16 @@ use std::{ use zenoh_buffers::{ZBuf, ZSlice}; -use super::{traits::SHMBuf, zshmmut::zshmmut}; -use crate::SharedMemoryBuf; +use super::{traits::ShmBuf, zshmmut::zshmmut}; +use crate::ShmBufInner; /// An immutable SHM buffer #[zenoh_macros::unstable_doc] #[repr(transparent)] #[derive(Clone, Debug, PartialEq, Eq)] -pub struct ZShm(pub(crate) SharedMemoryBuf); +pub struct ZShm(pub(crate) ShmBufInner); -impl SHMBuf for ZShm { +impl ShmBuf for ZShm { fn is_valid(&self) -> bool { self.0.is_valid() } @@ -44,7 +44,7 @@ impl PartialEq<&zshm> for ZShm { impl Borrow for ZShm { fn borrow(&self) -> &zshm { // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] - // to SharedMemoryBuf type, so it is safe to transmute them in any direction + // to ShmBufInner type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } @@ -52,7 +52,7 @@ impl Borrow for ZShm { impl BorrowMut for ZShm { fn borrow_mut(&mut self) -> &mut zshm { // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] - // to SharedMemoryBuf type, so it is safe to transmute them in any direction + // to ShmBufInner type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } @@ -71,8 +71,8 @@ impl AsRef<[u8]> for ZShm { } } -impl From for ZShm { - fn from(value: SharedMemoryBuf) -> Self { +impl From for ZShm { + fn from(value: ShmBufInner) -> Self { Self(value) } } @@ -96,7 +96,7 @@ impl TryFrom<&mut ZShm> for &mut zshmmut { match value.0.is_unique() && value.0.is_valid() { true => { // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] - // to SharedMemoryBuf type, so it is safe to transmute them in any direction + // to ShmBufInner type, so it is safe to transmute them in any direction Ok(unsafe { core::mem::transmute(value) }) } false => Err(()), @@ -139,18 +139,18 @@ impl DerefMut for zshm { } } -impl From<&SharedMemoryBuf> for &zshm { - fn from(value: &SharedMemoryBuf) -> Self { +impl From<&ShmBufInner> for &zshm { + fn from(value: &ShmBufInner) -> Self { // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] - // to SharedMemoryBuf type, so it is safe to transmute them in any direction + // to ShmBufInner type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(value) } } } -impl From<&mut SharedMemoryBuf> for &mut zshm { - fn from(value: &mut SharedMemoryBuf) -> Self { +impl From<&mut ShmBufInner> for &mut zshm { + fn from(value: &mut ShmBufInner) -> Self { // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] - // to SharedMemoryBuf type, so it is safe to transmute them in any direction + // to ShmBufInner type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(value) } } } @@ -162,7 +162,7 @@ impl TryFrom<&mut zshm> for &mut zshmmut { match value.0 .0.is_unique() && value.0 .0.is_valid() { true => { // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] - // to SharedMemoryBuf type, so it is safe to transmute them in any direction + // to ShmBufInner type, so it is safe to transmute them in any direction Ok(unsafe { core::mem::transmute(value) }) } false => Err(()), diff --git a/commons/zenoh-shm/src/api/buffer/zshmmut.rs b/commons/zenoh-shm/src/api/buffer/zshmmut.rs index 7341b7600c..39a01dff74 100644 --- a/commons/zenoh-shm/src/api/buffer/zshmmut.rs +++ b/commons/zenoh-shm/src/api/buffer/zshmmut.rs @@ -18,27 +18,27 @@ use std::borrow::{Borrow, BorrowMut}; use zenoh_buffers::{ZBuf, ZSlice}; use super::{ - traits::{SHMBuf, SHMBufMut}, + traits::{ShmBuf, ShmBufMut}, zshm::{zshm, ZShm}, }; -use crate::SharedMemoryBuf; +use crate::ShmBufInner; /// A mutable SHM buffer #[zenoh_macros::unstable_doc] #[derive(Debug, PartialEq, Eq)] #[repr(transparent)] -pub struct ZShmMut(SharedMemoryBuf); +pub struct ZShmMut(ShmBufInner); -impl SHMBuf for ZShmMut { +impl ShmBuf for ZShmMut { fn is_valid(&self) -> bool { self.0.is_valid() } } -impl SHMBufMut for ZShmMut {} +impl ShmBufMut for ZShmMut {} impl ZShmMut { - pub(crate) unsafe fn new_unchecked(data: SharedMemoryBuf) -> Self { + pub(crate) unsafe fn new_unchecked(data: ShmBufInner) -> Self { Self(data) } } @@ -49,10 +49,10 @@ impl PartialEq for &ZShmMut { } } -impl TryFrom for ZShmMut { - type Error = SharedMemoryBuf; +impl TryFrom for ZShmMut { + type Error = ShmBufInner; - fn try_from(value: SharedMemoryBuf) -> Result { + fn try_from(value: ShmBufInner) -> Result { match value.is_unique() && value.is_valid() { true => Ok(Self(value)), false => Err(value), @@ -74,7 +74,7 @@ impl TryFrom for ZShmMut { impl Borrow for ZShmMut { fn borrow(&self) -> &zshm { // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] - // to SharedMemoryBuf type, so it is safe to transmute them in any direction + // to ShmBufInner type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } @@ -82,7 +82,7 @@ impl Borrow for ZShmMut { impl BorrowMut for ZShmMut { fn borrow_mut(&mut self) -> &mut zshm { // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] - // to SharedMemoryBuf type, so it is safe to transmute them in any direction + // to ShmBufInner type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } @@ -90,7 +90,7 @@ impl BorrowMut for ZShmMut { impl Borrow for ZShmMut { fn borrow(&self) -> &zshmmut { // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] - // to SharedMemoryBuf type, so it is safe to transmute them in any direction + // to ShmBufInner type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } @@ -98,7 +98,7 @@ impl Borrow for ZShmMut { impl BorrowMut for ZShmMut { fn borrow_mut(&mut self) -> &mut zshmmut { // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] - // to SharedMemoryBuf type, so it is safe to transmute them in any direction + // to ShmBufInner type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } @@ -174,13 +174,13 @@ impl DerefMut for zshmmut { } } -impl TryFrom<&mut SharedMemoryBuf> for &mut zshmmut { +impl TryFrom<&mut ShmBufInner> for &mut zshmmut { type Error = (); - fn try_from(value: &mut SharedMemoryBuf) -> Result { + fn try_from(value: &mut ShmBufInner) -> Result { match value.is_unique() && value.is_valid() { // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] - // to SharedMemoryBuf type, so it is safe to transmute them in any direction + // to ShmBufInner type, so it is safe to transmute them in any direction true => Ok(unsafe { core::mem::transmute(value) }), false => Err(()), } diff --git a/commons/zenoh-shm/src/api/client/mod.rs b/commons/zenoh-shm/src/api/client/mod.rs index eab20733e7..4a147cbf67 100644 --- a/commons/zenoh-shm/src/api/client/mod.rs +++ b/commons/zenoh-shm/src/api/client/mod.rs @@ -12,5 +12,5 @@ // ZettaScale Zenoh Team, // -pub mod shared_memory_client; -pub mod shared_memory_segment; +pub mod shm_client; +pub mod shm_segment; diff --git a/commons/zenoh-shm/src/api/client/shared_memory_client.rs b/commons/zenoh-shm/src/api/client/shm_client.rs similarity index 70% rename from commons/zenoh-shm/src/api/client/shared_memory_client.rs rename to commons/zenoh-shm/src/api/client/shm_client.rs index dd3cf5db12..e25f818912 100644 --- a/commons/zenoh-shm/src/api/client/shared_memory_client.rs +++ b/commons/zenoh-shm/src/api/client/shm_client.rs @@ -16,13 +16,13 @@ use std::{fmt::Debug, sync::Arc}; use zenoh_result::ZResult; -use super::shared_memory_segment::SharedMemorySegment; +use super::shm_segment::ShmSegment; use crate::api::common::types::SegmentID; -/// SharedMemoryClient - client factory implementation for particular shared memory protocol +/// ShmClient - client factory implementation for particular shared memory protocol #[zenoh_macros::unstable_doc] -pub trait SharedMemoryClient: Debug + Send + Sync { +pub trait ShmClient: Debug + Send + Sync { /// Attach to particular shared memory segment #[zenoh_macros::unstable_doc] - fn attach(&self, segment: SegmentID) -> ZResult>; + fn attach(&self, segment: SegmentID) -> ZResult>; } diff --git a/commons/zenoh-shm/src/api/client/shared_memory_segment.rs b/commons/zenoh-shm/src/api/client/shm_segment.rs similarity index 84% rename from commons/zenoh-shm/src/api/client/shared_memory_segment.rs rename to commons/zenoh-shm/src/api/client/shm_segment.rs index e3aaf9ba39..8744fbb765 100644 --- a/commons/zenoh-shm/src/api/client/shared_memory_segment.rs +++ b/commons/zenoh-shm/src/api/client/shm_segment.rs @@ -18,9 +18,9 @@ use zenoh_result::ZResult; use crate::api::common::types::ChunkID; -/// SharedMemorySegment - RAII interface to interact with particular shared memory segment +/// ShmSegment - RAII interface to interact with particular shared memory segment #[zenoh_macros::unstable_doc] -pub trait SharedMemorySegment: Debug + Send + Sync { +pub trait ShmSegment: Debug + Send + Sync { /// Obtain the actual region of memory identified by it's id #[zenoh_macros::unstable_doc] fn map(&self, chunk: ChunkID) -> ZResult>; diff --git a/commons/zenoh-shm/src/api/client_storage/mod.rs b/commons/zenoh-shm/src/api/client_storage/mod.rs index 7b78c23182..205bc3a9dc 100644 --- a/commons/zenoh-shm/src/api/client_storage/mod.rs +++ b/commons/zenoh-shm/src/api/client_storage/mod.rs @@ -22,12 +22,10 @@ use zenoh_result::{bail, ZResult}; use crate::{ api::{ - client::{ - shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, - }, + client::{shm_client::ShmClient, shm_segment::ShmSegment}, common::types::ProtocolID, protocol_implementations::posix::{ - posix_shared_memory_client::PosixSharedMemoryClient, protocol_id::POSIX_PROTOCOL_ID, + posix_shm_client::PosixShmClient, protocol_id::POSIX_PROTOCOL_ID, }, }, reader::{ClientStorage, GlobalDataSegmentID}, @@ -36,10 +34,10 @@ use crate::{ lazy_static! { /// A global lazily-initialized SHM client storage. /// When initialized, contains default client set, - /// see SharedMemoryClientStorage::with_default_client_set + /// see ShmClientStorage::with_default_client_set #[zenoh_macros::unstable_doc] - pub static ref GLOBAL_CLIENT_STORAGE: Arc = Arc::new( - SharedMemoryClientStorage::builder() + pub static ref GLOBAL_CLIENT_STORAGE: Arc = Arc::new( + ShmClientStorage::builder() .with_default_client_set() .build() ); @@ -47,64 +45,60 @@ lazy_static! { /// Builder to create new client storages #[zenoh_macros::unstable_doc] -pub struct SharedMemoryClientSetBuilder; +pub struct ShmClientSetBuilder; -impl SharedMemoryClientSetBuilder { +impl ShmClientSetBuilder { /// Add client to the storage (without including the default client set) #[zenoh_macros::unstable_doc] pub fn with_client( self, id: ProtocolID, - client: Arc, - ) -> SharedMemoryClientStorageBuilder { + client: Arc, + ) -> ShmClientStorageBuilder { let clients = HashMap::from([(id, client)]); - SharedMemoryClientStorageBuilder::new(clients) + ShmClientStorageBuilder::new(clients) } /// Add list of clients to the storage (without including the default client set) #[zenoh_macros::unstable_doc] pub fn with_clients( self, - clients: &[(ProtocolID, Arc)], - ) -> SharedMemoryClientStorageBuilder { + clients: &[(ProtocolID, Arc)], + ) -> ShmClientStorageBuilder { let clients = clients.iter().cloned().collect(); - SharedMemoryClientStorageBuilder::new(clients) + ShmClientStorageBuilder::new(clients) } /// Include default clients #[zenoh_macros::unstable_doc] - pub fn with_default_client_set(self) -> SharedMemoryClientStorageBuilder { + pub fn with_default_client_set(self) -> ShmClientStorageBuilder { let clients = HashMap::from([( POSIX_PROTOCOL_ID, - Arc::new(PosixSharedMemoryClient {}) as Arc, + Arc::new(PosixShmClient {}) as Arc, )]); - SharedMemoryClientStorageBuilder::new(clients) + ShmClientStorageBuilder::new(clients) } } #[zenoh_macros::unstable_doc] -pub struct SharedMemoryClientStorageBuilder { - clients: HashMap>, +pub struct ShmClientStorageBuilder { + clients: HashMap>, } -impl SharedMemoryClientStorageBuilder { - fn new(clients: HashMap>) -> Self { +impl ShmClientStorageBuilder { + fn new(clients: HashMap>) -> Self { Self { clients } } /// Add client to the storage #[zenoh_macros::unstable_doc] - pub fn with_client( - mut self, - id: ProtocolID, - client: Arc, - ) -> ZResult { + pub fn with_client(mut self, id: ProtocolID, client: Arc) -> ZResult { match self.clients.entry(id) { std::collections::hash_map::Entry::Occupied(occupied) => { bail!("Client already exists for id {id}: {:?}!", occupied) } std::collections::hash_map::Entry::Vacant(vacant) => { - vacant.insert(client as Arc); + vacant.insert(client as Arc); Ok(self) } } @@ -112,15 +106,15 @@ impl SharedMemoryClientStorageBuilder { /// Add list of clients to the storage #[zenoh_macros::unstable_doc] - pub fn with_clients(mut self, clients: &[(ProtocolID, Arc)]) -> Self { + pub fn with_clients(mut self, clients: &[(ProtocolID, Arc)]) -> Self { self.clients.extend(clients.iter().cloned()); self } /// Build the storage with parameters specified on previous step #[zenoh_macros::unstable_doc] - pub fn build(self) -> SharedMemoryClientStorage { - SharedMemoryClientStorage::new(self.clients) + pub fn build(self) -> ShmClientStorage { + ShmClientStorage::new(self.clients) } } @@ -129,24 +123,24 @@ impl SharedMemoryClientStorageBuilder { /// SHM buffers for Protocols added to this instance. #[zenoh_macros::unstable_doc] #[derive(Debug)] -pub struct SharedMemoryClientStorage { - pub(crate) clients: ClientStorage>, - pub(crate) segments: RwLock>>, +pub struct ShmClientStorage { + pub(crate) clients: ClientStorage>, + pub(crate) segments: RwLock>>, } -impl Eq for SharedMemoryClientStorage {} +impl Eq for ShmClientStorage {} -impl PartialEq for SharedMemoryClientStorage { +impl PartialEq for ShmClientStorage { fn eq(&self, other: &Self) -> bool { std::ptr::eq(self, other) } } -impl SharedMemoryClientStorage { +impl ShmClientStorage { /// Get the builder to construct a new storage #[zenoh_macros::unstable_doc] - pub fn builder() -> SharedMemoryClientSetBuilder { - SharedMemoryClientSetBuilder + pub fn builder() -> ShmClientSetBuilder { + ShmClientSetBuilder } /// Get the list of supported SHM protocols. @@ -155,7 +149,7 @@ impl SharedMemoryClientStorage { self.clients.get_clients().keys().copied().collect() } - fn new(clients: HashMap>) -> Self { + fn new(clients: HashMap>) -> Self { Self { clients: ClientStorage::new(clients), segments: RwLock::default(), diff --git a/commons/zenoh-shm/src/api/common/types.rs b/commons/zenoh-shm/src/api/common/types.rs index 02e009aff3..5f423e7459 100644 --- a/commons/zenoh-shm/src/api/common/types.rs +++ b/commons/zenoh-shm/src/api/common/types.rs @@ -13,8 +13,8 @@ // /// Unique protocol identifier. -/// Here is a contract: it is up to user to make sure that incompatible SharedMemoryClient -/// and SharedMemoryProviderBackend implementations will never use the same ProtocolID +/// Here is a contract: it is up to user to make sure that incompatible ShmClient +/// and ShmProviderBackend implementations will never use the same ProtocolID #[zenoh_macros::unstable_doc] pub type ProtocolID = u32; diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/mod.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/mod.rs index 12c8aba0b6..e5dd7db33e 100644 --- a/commons/zenoh-shm/src/api/protocol_implementations/posix/mod.rs +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/mod.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // -pub mod posix_shared_memory_client; -pub mod posix_shared_memory_provider_backend; +pub mod posix_shm_client; +pub mod posix_shm_provider_backend; pub mod protocol_id; -pub(crate) mod posix_shared_memory_segment; +pub(crate) mod posix_shm_segment; diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_client.rs similarity index 65% rename from commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs rename to commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_client.rs index 5684b0b15f..73e2a96cd9 100644 --- a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_client.rs @@ -16,23 +16,21 @@ use std::sync::Arc; use zenoh_result::ZResult; -use super::posix_shared_memory_segment::PosixSharedMemorySegment; +use super::posix_shm_segment::PosixShmSegment; use crate::api::{ - client::{ - shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, - }, + client::{shm_client::ShmClient, shm_segment::ShmSegment}, common::types::SegmentID, }; /// Client factory implementation for particular shared memory protocol #[zenoh_macros::unstable_doc] #[derive(Debug)] -pub struct PosixSharedMemoryClient; +pub struct PosixShmClient; -impl SharedMemoryClient for PosixSharedMemoryClient { +impl ShmClient for PosixShmClient { /// Attach to particular shared memory segment #[zenoh_macros::unstable_doc] - fn attach(&self, segment: SegmentID) -> ZResult> { - Ok(Arc::new(PosixSharedMemorySegment::open(segment)?)) + fn attach(&self, segment: SegmentID) -> ZResult> { + Ok(Arc::new(PosixShmSegment::open(segment)?)) } } diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_provider_backend.rs similarity index 79% rename from commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs rename to commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_provider_backend.rs index 60e2a10891..7de9e9f22d 100644 --- a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_provider_backend.rs @@ -25,12 +25,12 @@ use std::{ use zenoh_core::zlock; use zenoh_result::ZResult; -use super::posix_shared_memory_segment::PosixSharedMemorySegment; +use super::posix_shm_segment::PosixShmSegment; use crate::api::{ common::types::ChunkID, provider::{ chunk::{AllocatedChunk, ChunkDescriptor}, - shared_memory_provider_backend::SharedMemoryProviderBackend, + shm_provider_backend::ShmProviderBackend, types::{AllocAlignment, ChunkAllocResult, MemoryLayout, ZAllocError}, }, }; @@ -68,16 +68,16 @@ impl PartialEq for Chunk { /// Builder to create posix SHM provider #[zenoh_macros::unstable_doc] -pub struct PosixSharedMemoryProviderBackendBuilder; +pub struct PosixShmProviderBackendBuilder; -impl PosixSharedMemoryProviderBackendBuilder { +impl PosixShmProviderBackendBuilder { /// Use existing layout #[zenoh_macros::unstable_doc] pub fn with_layout>( self, layout: Layout, - ) -> LayoutedPosixSharedMemoryProviderBackendBuilder { - LayoutedPosixSharedMemoryProviderBackendBuilder { layout } + ) -> LayoutedPosixShmProviderBackendBuilder { + LayoutedPosixShmProviderBackendBuilder { layout } } /// Construct layout in-place using arguments @@ -86,9 +86,9 @@ impl PosixSharedMemoryProviderBackendBuilder { self, size: usize, alignment: AllocAlignment, - ) -> ZResult> { + ) -> ZResult> { let layout = MemoryLayout::new(size, alignment)?; - Ok(LayoutedPosixSharedMemoryProviderBackendBuilder { layout }) + Ok(LayoutedPosixShmProviderBackendBuilder { layout }) } /// Construct layout in-place from size (default alignment will be used) @@ -96,44 +96,44 @@ impl PosixSharedMemoryProviderBackendBuilder { pub fn with_size( self, size: usize, - ) -> ZResult> { + ) -> ZResult> { let layout = MemoryLayout::new(size, AllocAlignment::default())?; - Ok(LayoutedPosixSharedMemoryProviderBackendBuilder { layout }) + Ok(LayoutedPosixShmProviderBackendBuilder { layout }) } } #[zenoh_macros::unstable_doc] -pub struct LayoutedPosixSharedMemoryProviderBackendBuilder> { +pub struct LayoutedPosixShmProviderBackendBuilder> { layout: Layout, } -impl> LayoutedPosixSharedMemoryProviderBackendBuilder { - /// try to create PosixSharedMemoryProviderBackend +impl> LayoutedPosixShmProviderBackendBuilder { + /// try to create PosixShmProviderBackend #[zenoh_macros::unstable_doc] - pub fn res(self) -> ZResult { - PosixSharedMemoryProviderBackend::new(self.layout.borrow()) + pub fn res(self) -> ZResult { + PosixShmProviderBackend::new(self.layout.borrow()) } } -/// A backend for SharedMemoryProvider based on POSIX shared memory. +/// A backend for ShmProvider based on POSIX shared memory. /// This is the default general-purpose backed shipped with Zenoh. #[zenoh_macros::unstable_doc] -pub struct PosixSharedMemoryProviderBackend { +pub struct PosixShmProviderBackend { available: AtomicUsize, - segment: PosixSharedMemorySegment, + segment: PosixShmSegment, free_list: Mutex>, alignment: AllocAlignment, } -impl PosixSharedMemoryProviderBackend { +impl PosixShmProviderBackend { /// Get the builder to construct a new instance #[zenoh_macros::unstable_doc] - pub fn builder() -> PosixSharedMemoryProviderBackendBuilder { - PosixSharedMemoryProviderBackendBuilder + pub fn builder() -> PosixShmProviderBackendBuilder { + PosixShmProviderBackendBuilder } fn new(layout: &MemoryLayout) -> ZResult { - let segment = PosixSharedMemorySegment::create(layout.size())?; + let segment = PosixShmSegment::create(layout.size())?; let mut free_list = BinaryHeap::new(); let root_chunk = Chunk { @@ -143,7 +143,7 @@ impl PosixSharedMemoryProviderBackend { free_list.push(root_chunk); tracing::trace!( - "Created PosixSharedMemoryProviderBackend id {}, layout {:?}", + "Created PosixShmProviderBackend id {}, layout {:?}", segment.segment.id(), layout ); @@ -157,14 +157,14 @@ impl PosixSharedMemoryProviderBackend { } } -impl SharedMemoryProviderBackend for PosixSharedMemoryProviderBackend { +impl ShmProviderBackend for PosixShmProviderBackend { fn alloc(&self, layout: &MemoryLayout) -> ChunkAllocResult { - tracing::trace!("PosixSharedMemoryProviderBackend::alloc({:?})", layout); + tracing::trace!("PosixShmProviderBackend::alloc({:?})", layout); let required_len = layout.size(); if self.available.load(Ordering::Relaxed) < required_len { - tracing::trace!( "PosixSharedMemoryProviderBackend does not have sufficient free memory to allocate {:?}, try de-fragmenting!", layout); + tracing::trace!( "PosixShmProviderBackend does not have sufficient free memory to allocate {:?}, try de-fragmenting!", layout); return Err(ZAllocError::OutOfMemory); } @@ -196,13 +196,13 @@ impl SharedMemoryProviderBackend for PosixSharedMemoryProviderBackend { }) } Some(c) => { - tracing::trace!("PosixSharedMemoryProviderBackend::alloc({:?}) cannot find any big enough chunk\nSharedMemoryManager::free_list = {:?}", layout, self.free_list); + tracing::trace!("PosixShmProviderBackend::alloc({:?}) cannot find any big enough chunk\nShmManager::free_list = {:?}", layout, self.free_list); guard.push(c); Err(ZAllocError::NeedDefragment) } None => { // NOTE: that should never happen! If this happens - there is a critical bug somewhere around! - let err = format!("PosixSharedMemoryProviderBackend::alloc({:?}) cannot find any available chunk\nSharedMemoryManager::free_list = {:?}", layout, self.free_list); + let err = format!("PosixShmProviderBackend::alloc({:?}) cannot find any available chunk\nShmManager::free_list = {:?}", layout, self.free_list); #[cfg(feature = "test")] panic!("{err}"); #[cfg(not(feature = "test"))] diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_segment.rs similarity index 86% rename from commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs rename to commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_segment.rs index 3f74594ad0..dd103462e4 100644 --- a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_segment.rs @@ -18,7 +18,7 @@ use zenoh_result::ZResult; use crate::{ api::{ - client::shared_memory_segment::SharedMemorySegment, + client::shm_segment::ShmSegment, common::types::{ChunkID, SegmentID}, }, posix_shm::array::ArrayInSHM, @@ -27,11 +27,11 @@ use crate::{ const POSIX_SHM_SEGMENT_PREFIX: &str = "posix_shm_provider_segment"; #[derive(Debug)] -pub(crate) struct PosixSharedMemorySegment { +pub(crate) struct PosixShmSegment { pub(crate) segment: ArrayInSHM, } -impl PosixSharedMemorySegment { +impl PosixShmSegment { pub(crate) fn create(alloc_size: usize) -> ZResult { let segment = ArrayInSHM::create(alloc_size, POSIX_SHM_SEGMENT_PREFIX)?; Ok(Self { segment }) @@ -43,7 +43,7 @@ impl PosixSharedMemorySegment { } } -impl SharedMemorySegment for PosixSharedMemorySegment { +impl ShmSegment for PosixShmSegment { fn map(&self, chunk: ChunkID) -> ZResult> { unsafe { Ok(AtomicPtr::new(self.segment.elem_mut(chunk))) } } diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/protocol_id.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/protocol_id.rs index b2eec8d7a5..cff39f921a 100644 --- a/commons/zenoh-shm/src/api/protocol_implementations/posix/protocol_id.rs +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/protocol_id.rs @@ -14,6 +14,6 @@ use crate::api::common::types::ProtocolID; -/// Protocol identifier to use when creating SharedMemoryProvider +/// Protocol identifier to use when creating ShmProvider #[zenoh_macros::unstable_doc] pub const POSIX_PROTOCOL_ID: ProtocolID = 0; diff --git a/commons/zenoh-shm/src/api/provider/mod.rs b/commons/zenoh-shm/src/api/provider/mod.rs index a769baacb3..2d25e37c3d 100644 --- a/commons/zenoh-shm/src/api/provider/mod.rs +++ b/commons/zenoh-shm/src/api/provider/mod.rs @@ -13,6 +13,6 @@ // pub mod chunk; -pub mod shared_memory_provider; -pub mod shared_memory_provider_backend; +pub mod shm_provider; +pub mod shm_provider_backend; pub mod types; diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs b/commons/zenoh-shm/src/api/provider/shm_provider.rs similarity index 83% rename from commons/zenoh-shm/src/api/provider/shared_memory_provider.rs rename to commons/zenoh-shm/src/api/provider/shm_provider.rs index 9c0c497044..8773498b61 100644 --- a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs +++ b/commons/zenoh-shm/src/api/provider/shm_provider.rs @@ -27,7 +27,7 @@ use zenoh_result::ZResult; use super::{ chunk::{AllocatedChunk, ChunkDescriptor}, - shared_memory_provider_backend::SharedMemoryProviderBackend, + shm_provider_backend::ShmProviderBackend, types::{ AllocAlignment, BufAllocResult, BufLayoutAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError, ZLayoutAllocError, ZLayoutError, @@ -46,7 +46,7 @@ use crate::{ storage::GLOBAL_STORAGE, validator::GLOBAL_VALIDATOR, }, - SharedMemoryBuf, SharedMemoryBufInfo, + ShmBufInfo, ShmBufInner, }; #[derive(Debug)] @@ -73,25 +73,25 @@ impl BusyChunk { struct AllocData<'a, IDSource, Backend> where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, { size: usize, alignment: AllocAlignment, - provider: &'a SharedMemoryProvider, + provider: &'a ShmProvider, } #[zenoh_macros::unstable_doc] pub struct AllocLayoutSizedBuilder<'a, IDSource, Backend>(AllocData<'a, IDSource, Backend>) where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend; + Backend: ShmProviderBackend; impl<'a, IDSource, Backend> AllocLayoutSizedBuilder<'a, IDSource, Backend> where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, { - fn new(provider: &'a SharedMemoryProvider, size: usize) -> Self { + fn new(provider: &'a ShmProvider, size: usize) -> Self { Self(AllocData { provider, size, @@ -129,7 +129,7 @@ where impl<'a, IDSource, Backend> Resolvable for AllocLayoutSizedBuilder<'a, IDSource, Backend> where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, { type To = BufLayoutAllocResult; } @@ -138,7 +138,7 @@ where impl<'a, IDSource, Backend> Wait for AllocLayoutSizedBuilder<'a, IDSource, Backend> where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, { fn wait(self) -> ::To { let builder = AllocBuilder2::<'a, IDSource, Backend, JustAlloc> { @@ -151,23 +151,23 @@ where /// A layout for allocations. /// This is a pre-calculated layout suitable for making series of similar allocations -/// adopted for particular SharedMemoryProvider +/// adopted for particular ShmProvider #[zenoh_macros::unstable_doc] #[derive(Debug)] pub struct AllocLayout<'a, IDSource, Backend> where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, { size: usize, provider_layout: MemoryLayout, - provider: &'a SharedMemoryProvider, + provider: &'a ShmProvider, } impl<'a, IDSource, Backend> AllocLayout<'a, IDSource, Backend> where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, { /// Allocate the new buffer with this layout #[zenoh_macros::unstable_doc] @@ -204,8 +204,8 @@ where /// Trait for deallocation policies. #[zenoh_macros::unstable_doc] pub trait ForceDeallocPolicy { - fn dealloc( - provider: &SharedMemoryProvider, + fn dealloc( + provider: &ShmProvider, ) -> bool; } @@ -213,8 +213,8 @@ pub trait ForceDeallocPolicy { #[zenoh_macros::unstable_doc] pub struct DeallocOptimal; impl ForceDeallocPolicy for DeallocOptimal { - fn dealloc( - provider: &SharedMemoryProvider, + fn dealloc( + provider: &ShmProvider, ) -> bool { let mut guard = provider.busy_list.lock().unwrap(); let chunk_to_dealloc = match guard.remove(1) { @@ -235,8 +235,8 @@ impl ForceDeallocPolicy for DeallocOptimal { #[zenoh_macros::unstable_doc] pub struct DeallocYoungest; impl ForceDeallocPolicy for DeallocYoungest { - fn dealloc( - provider: &SharedMemoryProvider, + fn dealloc( + provider: &ShmProvider, ) -> bool { match provider.busy_list.lock().unwrap().pop_back() { Some(val) => { @@ -252,8 +252,8 @@ impl ForceDeallocPolicy for DeallocYoungest { #[zenoh_macros::unstable_doc] pub struct DeallocEldest; impl ForceDeallocPolicy for DeallocEldest { - fn dealloc( - provider: &SharedMemoryProvider, + fn dealloc( + provider: &ShmProvider, ) -> bool { match provider.busy_list.lock().unwrap().pop_front() { Some(val) => { @@ -268,9 +268,9 @@ impl ForceDeallocPolicy for DeallocEldest { /// Trait for allocation policies #[zenoh_macros::unstable_doc] pub trait AllocPolicy { - fn alloc( + fn alloc( layout: &MemoryLayout, - provider: &SharedMemoryProvider, + provider: &ShmProvider, ) -> ChunkAllocResult; } @@ -278,9 +278,9 @@ pub trait AllocPolicy { #[zenoh_macros::unstable_doc] #[async_trait] pub trait AsyncAllocPolicy: Send { - async fn alloc_async( + async fn alloc_async( layout: &MemoryLayout, - provider: &SharedMemoryProvider, + provider: &ShmProvider, ) -> ChunkAllocResult; } @@ -288,9 +288,9 @@ pub trait AsyncAllocPolicy: Send { #[zenoh_macros::unstable_doc] pub struct JustAlloc; impl AllocPolicy for JustAlloc { - fn alloc( + fn alloc( layout: &MemoryLayout, - provider: &SharedMemoryProvider, + provider: &ShmProvider, ) -> ChunkAllocResult { provider.backend.alloc(layout) } @@ -313,9 +313,9 @@ where InnerPolicy: AllocPolicy, AltPolicy: AllocPolicy, { - fn alloc( + fn alloc( layout: &MemoryLayout, - provider: &SharedMemoryProvider, + provider: &ShmProvider, ) -> ChunkAllocResult { let result = InnerPolicy::alloc(layout, provider); if let Err(ZAllocError::OutOfMemory) = result { @@ -345,9 +345,9 @@ where InnerPolicy: AllocPolicy, AltPolicy: AllocPolicy, { - fn alloc( + fn alloc( layout: &MemoryLayout, - provider: &SharedMemoryProvider, + provider: &ShmProvider, ) -> ChunkAllocResult { let result = InnerPolicy::alloc(layout, provider); if let Err(ZAllocError::NeedDefragment) = result { @@ -384,9 +384,9 @@ where AltPolicy: AllocPolicy, DeallocatePolicy: ForceDeallocPolicy, { - fn alloc( + fn alloc( layout: &MemoryLayout, - provider: &SharedMemoryProvider, + provider: &ShmProvider, ) -> ChunkAllocResult { let mut result = InnerPolicy::alloc(layout, provider); for _ in 0..N { @@ -422,12 +422,9 @@ impl AsyncAllocPolicy for BlockOn where InnerPolicy: AllocPolicy + Send, { - async fn alloc_async< - IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend + Sync, - >( + async fn alloc_async( layout: &MemoryLayout, - provider: &SharedMemoryProvider, + provider: &ShmProvider, ) -> ChunkAllocResult { loop { match InnerPolicy::alloc(layout, provider) { @@ -446,9 +443,9 @@ impl AllocPolicy for BlockOn where InnerPolicy: AllocPolicy, { - fn alloc( + fn alloc( layout: &MemoryLayout, - provider: &SharedMemoryProvider, + provider: &ShmProvider, ) -> ChunkAllocResult { loop { match InnerPolicy::alloc(layout, provider) { @@ -469,14 +466,14 @@ where 'a, Policy: AllocPolicy, IDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, > { - provider: &'a SharedMemoryProvider, - allocations: lockfree::map::Map, SharedMemoryBuf>, + provider: &'a ShmProvider, + allocations: lockfree::map::Map, ShmBufInner>, _phantom: PhantomData, } -impl<'a, Policy: AllocPolicy, IDSource, Backend: SharedMemoryProviderBackend> +impl<'a, Policy: AllocPolicy, IDSource, Backend: ShmProviderBackend> ShmAllocator<'a, Policy, IDSource, Backend> { fn allocate(&self, layout: std::alloc::Layout) -> BufAllocResult { @@ -490,7 +487,7 @@ impl<'a, Policy: AllocPolicy, IDSource, Backend: SharedMemoryProviderBackend> } } -unsafe impl<'a, Policy: AllocPolicy, IDSource, Backend: SharedMemoryProviderBackend> +unsafe impl<'a, Policy: AllocPolicy, IDSource, Backend: ShmProviderBackend> allocator_api2::alloc::Allocator for ShmAllocator<'a, Policy, IDSource, Backend> { fn allocate( @@ -520,7 +517,7 @@ unsafe impl<'a, Policy: AllocPolicy, IDSource, Backend: SharedMemoryProviderBack pub struct AllocBuilder2< 'a, IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, Policy = JustAlloc, > { data: AllocData<'a, IDSource, Backend>, @@ -531,7 +528,7 @@ pub struct AllocBuilder2< impl<'a, IDSource, Backend, Policy> AllocBuilder2<'a, IDSource, Backend, Policy> where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, { /// Set the allocation policy #[zenoh_macros::unstable_doc] @@ -546,7 +543,7 @@ where impl<'a, IDSource, Backend, Policy> Resolvable for AllocBuilder2<'a, IDSource, Backend, Policy> where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, { type To = BufLayoutAllocResult; } @@ -555,7 +552,7 @@ where impl<'a, IDSource, Backend, Policy> Wait for AllocBuilder2<'a, IDSource, Backend, Policy> where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, Policy: AllocPolicy, { fn wait(self) -> ::To { @@ -573,7 +570,7 @@ where impl<'a, IDSource, Backend, Policy> IntoFuture for AllocBuilder2<'a, IDSource, Backend, Policy> where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend + Sync, + Backend: ShmProviderBackend + Sync, Policy: AsyncAllocPolicy, { type Output = ::To; @@ -599,7 +596,7 @@ where pub struct AllocBuilder< 'a, IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, Policy = JustAlloc, > { layout: &'a AllocLayout<'a, IDSource, Backend>, @@ -610,7 +607,7 @@ pub struct AllocBuilder< impl<'a, IDSource, Backend, Policy> AllocBuilder<'a, IDSource, Backend, Policy> where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, { /// Set the allocation policy #[zenoh_macros::unstable_doc] @@ -625,7 +622,7 @@ where impl<'a, IDSource, Backend, Policy> Resolvable for AllocBuilder<'a, IDSource, Backend, Policy> where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, { type To = BufAllocResult; } @@ -634,7 +631,7 @@ where impl<'a, IDSource, Backend, Policy> Wait for AllocBuilder<'a, IDSource, Backend, Policy> where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, Policy: AllocPolicy, { fn wait(self) -> ::To { @@ -648,7 +645,7 @@ where impl<'a, IDSource, Backend, Policy> IntoFuture for AllocBuilder<'a, IDSource, Backend, Policy> where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend + Sync, + Backend: ShmProviderBackend + Sync, Policy: AsyncAllocPolicy, { type Output = ::To; @@ -668,9 +665,9 @@ where } #[zenoh_macros::unstable_doc] -pub struct SharedMemoryProviderBuilder; -impl SharedMemoryProviderBuilder { - /// Get the builder to construct SharedMemoryProvider +pub struct ShmProviderBuilder; +impl ShmProviderBuilder { + /// Get the builder to construct ShmProvider #[zenoh_macros::unstable_doc] pub fn builder() -> Self { Self @@ -678,38 +675,33 @@ impl SharedMemoryProviderBuilder { /// Set compile-time-evaluated protocol ID (preferred) #[zenoh_macros::unstable_doc] - pub fn protocol_id( - self, - ) -> SharedMemoryProviderBuilderID> { - SharedMemoryProviderBuilderID::> { + pub fn protocol_id(self) -> ShmProviderBuilderID> { + ShmProviderBuilderID::> { id: StaticProtocolID, } } /// Set runtime-evaluated protocol ID #[zenoh_macros::unstable_doc] - pub fn dynamic_protocol_id( - self, - id: ProtocolID, - ) -> SharedMemoryProviderBuilderID { - SharedMemoryProviderBuilderID:: { + pub fn dynamic_protocol_id(self, id: ProtocolID) -> ShmProviderBuilderID { + ShmProviderBuilderID:: { id: DynamicProtocolID::new(id), } } } #[zenoh_macros::unstable_doc] -pub struct SharedMemoryProviderBuilderID { +pub struct ShmProviderBuilderID { id: IDSource, } -impl SharedMemoryProviderBuilderID { +impl ShmProviderBuilderID { /// Set the backend #[zenoh_macros::unstable_doc] - pub fn backend( + pub fn backend( self, backend: Backend, - ) -> SharedMemoryProviderBuilderBackendID { - SharedMemoryProviderBuilderBackendID { + ) -> ShmProviderBuilderBackendID { + ShmProviderBuilderBackendID { backend, id: self.id, } @@ -717,34 +709,34 @@ impl SharedMemoryProviderBuilderID { } #[zenoh_macros::unstable_doc] -pub struct SharedMemoryProviderBuilderBackendID +pub struct ShmProviderBuilderBackendID where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, { backend: Backend, id: IDSource, } -impl SharedMemoryProviderBuilderBackendID +impl ShmProviderBuilderBackendID where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, { - /// build SharedMemoryProvider + /// build ShmProvider #[zenoh_macros::unstable_doc] - pub fn res(self) -> SharedMemoryProvider { - SharedMemoryProvider::new(self.backend, self.id) + pub fn res(self) -> ShmProvider { + ShmProvider::new(self.backend, self.id) } } -/// Trait to create ProtocolID sources for SharedMemoryProvider +/// Trait to create ProtocolID sources for ShmProvider #[zenoh_macros::unstable_doc] pub trait ProtocolIDSource: Send + Sync { fn id(&self) -> ProtocolID; } /// Static ProtocolID source. This is a recommended API to set ProtocolID -/// when creating SharedMemoryProvider as the ID value is statically evaluated +/// when creating ShmProvider as the ID value is statically evaluated /// at compile-time and can be optimized. #[zenoh_macros::unstable_doc] #[derive(Default)] @@ -756,7 +748,7 @@ impl ProtocolIDSource for StaticProtocolID { } /// Dynamic ProtocolID source. This is an alternative API to set ProtocolID -/// when creating SharedMemoryProvider for cases where ProtocolID is unknown +/// when creating ShmProvider for cases where ProtocolID is unknown /// at compile-time. #[zenoh_macros::unstable_doc] pub struct DynamicProtocolID { @@ -779,20 +771,20 @@ unsafe impl Sync for DynamicProtocolID {} /// A generalized interface for shared memory data sources #[zenoh_macros::unstable_doc] #[derive(Debug)] -pub struct SharedMemoryProvider +pub struct ShmProvider where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, { backend: Backend, busy_list: Mutex>, id: IDSource, } -impl SharedMemoryProvider +impl ShmProvider where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, { /// Rich interface for making allocations #[zenoh_macros::unstable_doc] @@ -814,7 +806,7 @@ where // allocate resources for SHM buffer let (allocated_header, allocated_watchdog, confirmed_watchdog) = Self::alloc_resources()?; - // wrap everything to SharedMemoryBuf + // wrap everything to ShmBufInner let wrapped = self.wrap( chunk, len, @@ -863,10 +855,10 @@ where } // PRIVATE impls -impl SharedMemoryProvider +impl ShmProvider where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, + Backend: ShmProviderBackend, { fn new(backend: Backend, id: IDSource) -> Self { Self { @@ -891,7 +883,7 @@ where // and it is necessary to handle that properly and pass this len to corresponding free(...) let chunk = Policy::alloc(layout, self)?; - // wrap allocated chunk to SharedMemoryBuf + // wrap allocated chunk to ShmBufInner let wrapped = self.wrap( chunk, size, @@ -926,7 +918,7 @@ where allocated_header: AllocatedHeaderDescriptor, allocated_watchdog: AllocatedWatchdog, confirmed_watchdog: ConfirmedDescriptor, - ) -> SharedMemoryBuf { + ) -> ShmBufInner { let header = allocated_header.descriptor.clone(); let descriptor = Descriptor::from(&allocated_watchdog.descriptor); @@ -943,7 +935,7 @@ where ); // Create buffer's info - let info = SharedMemoryBufInfo::new( + let info = ShmBufInfo::new( chunk.descriptor.clone(), self.id.id(), len, @@ -953,7 +945,7 @@ where ); // Create buffer - let shmb = SharedMemoryBuf { + let shmb = ShmBufInner { header, buf: chunk.data, info, @@ -972,10 +964,10 @@ where } // PRIVATE impls for Sync backend -impl SharedMemoryProvider +impl ShmProvider where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend + Sync, + Backend: ShmProviderBackend + Sync, { async fn alloc_inner_async( &self, @@ -996,7 +988,7 @@ where // and it is necessary to handle that properly and pass this len to corresponding free(...) let chunk = Policy::alloc_async(backend_layout, self).await?; - // wrap allocated chunk to SharedMemoryBuf + // wrap allocated chunk to ShmBufInner let wrapped = self.wrap( chunk, size, diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider_backend.rs b/commons/zenoh-shm/src/api/provider/shm_provider_backend.rs similarity index 97% rename from commons/zenoh-shm/src/api/provider/shared_memory_provider_backend.rs rename to commons/zenoh-shm/src/api/provider/shm_provider_backend.rs index cd15ce3720..0487981e5c 100644 --- a/commons/zenoh-shm/src/api/provider/shared_memory_provider_backend.rs +++ b/commons/zenoh-shm/src/api/provider/shm_provider_backend.rs @@ -22,7 +22,7 @@ use super::{ /// The provider backend trait /// Implemet this interface to create a Zenoh-compatible shared memory provider #[zenoh_macros::unstable_doc] -pub trait SharedMemoryProviderBackend { +pub trait ShmProviderBackend { /// Allocate the chunk of desired size. /// If successful, the result's chunk size will be >= len #[zenoh_macros::unstable_doc] diff --git a/commons/zenoh-shm/src/lib.rs b/commons/zenoh-shm/src/lib.rs index 316477d26e..eec962a7e4 100644 --- a/commons/zenoh-shm/src/lib.rs +++ b/commons/zenoh-shm/src/lib.rs @@ -50,14 +50,14 @@ pub mod posix_shm; pub mod reader; pub mod watchdog; -/// Informations about a [`SharedMemoryBuf`]. +/// Informations about a [`ShmBufInner`]. /// -/// This that can be serialized and can be used to retrieve the [`SharedMemoryBuf`] in a remote process. +/// This that can be serialized and can be used to retrieve the [`ShmBufInner`] in a remote process. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct SharedMemoryBufInfo { +pub struct ShmBufInfo { /// The data chunk descriptor pub data_descriptor: ChunkDescriptor, - /// Protocol identifier for particular SharedMemory implementation + /// Protocol identifier for particular SHM implementation pub shm_protocol: ProtocolID, /// Actual data length /// NOTE: data_descriptor's len is >= of this len and describes the actual memory length @@ -72,7 +72,7 @@ pub struct SharedMemoryBufInfo { pub generation: u32, } -impl SharedMemoryBufInfo { +impl ShmBufInfo { pub fn new( data_descriptor: ChunkDescriptor, shm_protocol: ProtocolID, @@ -80,8 +80,8 @@ impl SharedMemoryBufInfo { watchdog_descriptor: Descriptor, header_descriptor: HeaderDescriptor, generation: u32, - ) -> SharedMemoryBufInfo { - SharedMemoryBufInfo { + ) -> ShmBufInfo { + ShmBufInfo { data_descriptor, shm_protocol, data_len, @@ -94,14 +94,14 @@ impl SharedMemoryBufInfo { /// A zenoh buffer in shared memory. #[non_exhaustive] -pub struct SharedMemoryBuf { +pub struct ShmBufInner { pub(crate) header: OwnedHeaderDescriptor, pub(crate) buf: AtomicPtr, - pub info: SharedMemoryBufInfo, + pub info: ShmBufInfo, pub(crate) watchdog: Arc, } -impl PartialEq for SharedMemoryBuf { +impl PartialEq for ShmBufInner { fn eq(&self, other: &Self) -> bool { // currently there is no API to resize an SHM buffer, but it is intended in the future, // so I add size comparsion here to avoid future bugs :) @@ -109,11 +109,11 @@ impl PartialEq for SharedMemoryBuf { && self.info.data_len == other.info.data_len } } -impl Eq for SharedMemoryBuf {} +impl Eq for ShmBufInner {} -impl std::fmt::Debug for SharedMemoryBuf { +impl std::fmt::Debug for ShmBufInner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("SharedMemoryBuf") + f.debug_struct("ShmBufInner") .field("header", &self.header) .field("buf", &self.buf) .field("info", &self.info) @@ -121,7 +121,7 @@ impl std::fmt::Debug for SharedMemoryBuf { } } -impl SharedMemoryBuf { +impl ShmBufInner { pub fn len(&self) -> usize { self.info.data_len } @@ -154,10 +154,7 @@ impl SharedMemoryBuf { // PRIVATE: fn as_slice(&self) -> &[u8] { - tracing::trace!( - "SharedMemoryBuf::as_slice() == len = {:?}", - self.info.data_len - ); + tracing::trace!("ShmBufInner::as_slice() == len = {:?}", self.info.data_len); let bp = self.buf.load(Ordering::SeqCst); unsafe { std::slice::from_raw_parts(bp, self.info.data_len) } } @@ -183,21 +180,21 @@ impl SharedMemoryBuf { } } -impl Drop for SharedMemoryBuf { +impl Drop for ShmBufInner { fn drop(&mut self) { // # Safety - // obviouly, we need to decrement refcount when dropping SharedMemoryBuf instance + // obviouly, we need to decrement refcount when dropping ShmBufInner instance unsafe { self.dec_ref_count() }; } } -impl Clone for SharedMemoryBuf { +impl Clone for ShmBufInner { fn clone(&self) -> Self { // # Safety - // obviouly, we need to increment refcount when cloning SharedMemoryBuf instance + // obviouly, we need to increment refcount when cloning ShmBufInner instance unsafe { self.inc_ref_count() }; let bp = self.buf.load(Ordering::SeqCst); - SharedMemoryBuf { + ShmBufInner { header: self.header.clone(), buf: AtomicPtr::new(bp), info: self.info.clone(), @@ -207,20 +204,20 @@ impl Clone for SharedMemoryBuf { } // Buffer impls -// - SharedMemoryBuf -impl AsRef<[u8]> for SharedMemoryBuf { +// - ShmBufInner +impl AsRef<[u8]> for ShmBufInner { fn as_ref(&self) -> &[u8] { self.as_slice() } } -impl AsMut<[u8]> for SharedMemoryBuf { +impl AsMut<[u8]> for ShmBufInner { fn as_mut(&mut self) -> &mut [u8] { unsafe { self.as_mut_slice_inner() } } } -impl ZSliceBuffer for SharedMemoryBuf { +impl ZSliceBuffer for ShmBufInner { fn as_slice(&self) -> &[u8] { self.as_ref() } diff --git a/commons/zenoh-shm/src/reader.rs b/commons/zenoh-shm/src/reader.rs index c2ce2303a9..1298c38aff 100644 --- a/commons/zenoh-shm/src/reader.rs +++ b/commons/zenoh-shm/src/reader.rs @@ -19,34 +19,34 @@ use zenoh_result::ZResult; use crate::{ api::{ - client::shared_memory_segment::SharedMemorySegment, - client_storage::SharedMemoryClientStorage, + client::shm_segment::ShmSegment, + client_storage::ShmClientStorage, common::types::{ProtocolID, SegmentID}, }, header::subscription::GLOBAL_HEADER_SUBSCRIPTION, watchdog::confirmator::GLOBAL_CONFIRMATOR, - SharedMemoryBuf, SharedMemoryBufInfo, + ShmBufInfo, ShmBufInner, }; #[derive(Debug, Clone, Eq, PartialEq)] -pub struct SharedMemoryReader { - client_storage: Arc, +pub struct ShmReader { + client_storage: Arc, } -impl Deref for SharedMemoryReader { - type Target = SharedMemoryClientStorage; +impl Deref for ShmReader { + type Target = ShmClientStorage; fn deref(&self) -> &Self::Target { &self.client_storage } } -impl SharedMemoryReader { - pub fn new(client_storage: Arc) -> Self { +impl ShmReader { + pub fn new(client_storage: Arc) -> Self { Self { client_storage } } - pub fn read_shmbuf(&self, info: &SharedMemoryBufInfo) -> ZResult { + pub fn read_shmbuf(&self, info: &ShmBufInfo) -> ZResult { // Read does not increment the reference count as it is assumed // that the sender of this buffer has incremented it for us. @@ -54,7 +54,7 @@ impl SharedMemoryReader { let watchdog = Arc::new(GLOBAL_CONFIRMATOR.add(&info.watchdog_descriptor)?); let segment = self.ensure_segment(info)?; - let shmb = SharedMemoryBuf { + let shmb = ShmBufInner { header: GLOBAL_HEADER_SUBSCRIPTION.link(&info.header_descriptor)?, buf: segment.map(info.data_descriptor.chunk)?, info: info.clone(), @@ -68,7 +68,7 @@ impl SharedMemoryReader { } } - fn ensure_segment(&self, info: &SharedMemoryBufInfo) -> ZResult> { + fn ensure_segment(&self, info: &ShmBufInfo) -> ZResult> { let id = GlobalDataSegmentID::new(info.shm_protocol, info.data_descriptor.segment); // fastest path: try to get access to already mounted SHM segment diff --git a/commons/zenoh-shm/tests/posix_shm_provider.rs b/commons/zenoh-shm/tests/posix_shm_provider.rs index 4c27879623..60104be6cf 100644 --- a/commons/zenoh-shm/tests/posix_shm_provider.rs +++ b/commons/zenoh-shm/tests/posix_shm_provider.rs @@ -13,13 +13,12 @@ // use zenoh_shm::api::{ - client::shared_memory_client::SharedMemoryClient, + client::shm_client::ShmClient, protocol_implementations::posix::{ - posix_shared_memory_client::PosixSharedMemoryClient, - posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + posix_shm_client::PosixShmClient, posix_shm_provider_backend::PosixShmProviderBackend, }, provider::{ - shared_memory_provider_backend::SharedMemoryProviderBackend, + shm_provider_backend::ShmProviderBackend, types::{AllocAlignment, MemoryLayout}, }, }; @@ -29,43 +28,43 @@ static BUFFER_SIZE: usize = 1024; #[test] fn posix_shm_provider_create() { - let _backend = PosixSharedMemoryProviderBackend::builder() + let _backend = PosixShmProviderBackend::builder() .with_size(1024) .expect("Error creating Layout!") .res() - .expect("Error creating PosixSharedMemoryProviderBackend!"); + .expect("Error creating PosixShmProviderBackend!"); } #[test] fn posix_shm_provider_alloc() { - let backend = PosixSharedMemoryProviderBackend::builder() + let backend = PosixShmProviderBackend::builder() .with_size(1024) .expect("Error creating Layout!") .res() - .expect("Error creating PosixSharedMemoryProviderBackend!"); + .expect("Error creating PosixShmProviderBackend!"); let layout = MemoryLayout::new(100, AllocAlignment::default()).unwrap(); let _buf = backend .alloc(&layout) - .expect("PosixSharedMemoryProviderBackend: error allocating buffer"); + .expect("PosixShmProviderBackend: error allocating buffer"); } #[test] fn posix_shm_provider_open() { - let backend = PosixSharedMemoryProviderBackend::builder() + let backend = PosixShmProviderBackend::builder() .with_size(1024) .expect("Error creating Layout!") .res() - .expect("Error creating PosixSharedMemoryProviderBackend!"); + .expect("Error creating PosixShmProviderBackend!"); let layout = MemoryLayout::new(100, AllocAlignment::default()).unwrap(); let buf = backend .alloc(&layout) - .expect("PosixSharedMemoryProviderBackend: error allocating buffer"); + .expect("PosixShmProviderBackend: error allocating buffer"); - let client = PosixSharedMemoryClient {}; + let client = PosixShmClient {}; let _segment = client .attach(buf.descriptor.segment) @@ -74,11 +73,11 @@ fn posix_shm_provider_open() { #[test] fn posix_shm_provider_allocator() { - let backend = PosixSharedMemoryProviderBackend::builder() + let backend = PosixShmProviderBackend::builder() .with_size(BUFFER_SIZE * BUFFER_NUM) .expect("Error creating Layout!") .res() - .expect("Error creating PosixSharedMemoryProviderBackend!"); + .expect("Error creating PosixShmProviderBackend!"); let layout = MemoryLayout::new(BUFFER_SIZE, AllocAlignment::default()).unwrap(); @@ -87,7 +86,7 @@ fn posix_shm_provider_allocator() { for _ in 0..BUFFER_NUM { let buf = backend .alloc(&layout) - .expect("PosixSharedMemoryProviderBackend: error allocating buffer"); + .expect("PosixShmProviderBackend: error allocating buffer"); buffers.push(buf); } @@ -103,7 +102,7 @@ fn posix_shm_provider_allocator() { // allocate new one let buf = backend .alloc(&layout) - .expect("PosixSharedMemoryProviderBackend: error allocating buffer"); + .expect("PosixShmProviderBackend: error allocating buffer"); buffers.push(buf); } diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index 4423e0b07a..1beabaebd8 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -14,8 +14,8 @@ use zenoh::{ prelude::*, shm::{ - AllocAlignment, BlockOn, Deallocate, Defragment, GarbageCollect, - PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, POSIX_PROTOCOL_ID, + AllocAlignment, BlockOn, Deallocate, Defragment, GarbageCollect, PosixShmProviderBackend, + ShmProviderBuilder, POSIX_PROTOCOL_ID, }, Config, }; @@ -29,14 +29,14 @@ async fn main() { async fn run() -> ZResult<()> { // create an SHM backend... - // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs - let backend = PosixSharedMemoryProviderBackend::builder() + // NOTE: For extended PosixShmProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixShmProviderBackend::builder() .with_size(65536) .unwrap() .res() .unwrap(); // ...and an SHM provider - let provider = SharedMemoryProviderBuilder::builder() + let provider = ShmProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); @@ -90,7 +90,7 @@ async fn run() -> ZResult<()> { simple_layout }; - // Allocate SharedMemoryBuf + // Allocate ShmBufInner // Policy is a generics-based API to describe necessary allocation behaviour // that will be higly optimized at compile-time. // Policy resolvable can be sync and async. diff --git a/examples/examples/z_bytes_shm.rs b/examples/examples/z_bytes_shm.rs index 66d47193ae..75bf01e3bf 100644 --- a/examples/examples/z_bytes_shm.rs +++ b/examples/examples/z_bytes_shm.rs @@ -15,21 +15,21 @@ use zenoh::{ bytes::ZBytes, prelude::*, shm::{ - zshm, zshmmut, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, ZShm, - ZShmMut, POSIX_PROTOCOL_ID, + zshm, zshmmut, PosixShmProviderBackend, ShmProviderBuilder, ZShm, ZShmMut, + POSIX_PROTOCOL_ID, }, }; fn main() { // create an SHM backend... - // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs - let backend = PosixSharedMemoryProviderBackend::builder() + // NOTE: For extended PosixShmProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixShmProviderBackend::builder() .with_size(4096) .unwrap() .res() .unwrap(); // ...and an SHM provider - let provider = SharedMemoryProviderBuilder::builder() + let provider = ShmProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 942ec0e34e..17f4e40e5b 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -19,8 +19,8 @@ use zenoh::{ query::QueryTarget, selector::Selector, shm::{ - zshm, BlockOn, GarbageCollect, PosixSharedMemoryProviderBackend, - SharedMemoryProviderBuilder, POSIX_PROTOCOL_ID, + zshm, BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, + POSIX_PROTOCOL_ID, }, Config, }; @@ -45,14 +45,14 @@ async fn main() { println!("Creating POSIX SHM provider..."); // create an SHM backend... - // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs - let backend = PosixSharedMemoryProviderBackend::builder() + // NOTE: For extended PosixShmProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixShmProviderBackend::builder() .with_size(N * 1024) .unwrap() .res() .unwrap(); // ...and an SHM provider - let provider = SharedMemoryProviderBuilder::builder() + let provider = ShmProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); @@ -69,7 +69,7 @@ async fn main() { let content = value .take() - .unwrap_or_else(|| "Get from SharedMemory Rust!".to_string()); + .unwrap_or_else(|| "Get from SHM Rust!".to_string()); sbuf[0..content.len()].copy_from_slice(content.as_bytes()); println!("Sending Query '{selector}'..."); @@ -87,7 +87,7 @@ async fn main() { print!(">> Received ('{}': ", sample.key_expr().as_str()); match sample.payload().deserialize::<&zshm>() { Ok(payload) => println!("'{}')", String::from_utf8_lossy(payload),), - Err(e) => println!("'Not a SharedMemoryBuf: {:?}')", e), + Err(e) => println!("'Not a ShmBufInner: {:?}')", e), } } Err(err) => { diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index c0cc20127d..033fe2d844 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -19,7 +19,7 @@ use zenoh::{ key_expr::keyexpr, prelude::*, publisher::CongestionControl, - shm::{PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, POSIX_PROTOCOL_ID}, + shm::{PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID}, Config, }; use zenoh_examples::CommonArgs; @@ -53,14 +53,14 @@ fn main() { let mut samples = Vec::with_capacity(n); // create an SHM backend... - // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs - let backend = PosixSharedMemoryProviderBackend::builder() + // NOTE: For extended PosixShmProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixShmProviderBackend::builder() .with_size(size) .unwrap() .res() .unwrap(); // ...and an SHM provider - let provider = SharedMemoryProviderBuilder::builder() + let provider = ShmProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); diff --git a/examples/examples/z_posix_shm_provider.rs b/examples/examples/z_posix_shm_provider.rs index d89d419846..7c68d56bd3 100644 --- a/examples/examples/z_posix_shm_provider.rs +++ b/examples/examples/z_posix_shm_provider.rs @@ -12,14 +12,13 @@ // ZettaScale Zenoh Team, // use zenoh::shm::{ - AllocAlignment, MemoryLayout, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, - POSIX_PROTOCOL_ID, + AllocAlignment, MemoryLayout, PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID, }; fn main() { // Construct an SHM backend let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + // NOTE: code in this block is a specific PosixShmProviderBackend API. // Total amount of shared memory to allocate let size = 4096; @@ -33,14 +32,14 @@ fn main() { let provider_layout = MemoryLayout::new(size, provider_alignment).unwrap(); // Build a provider backend - PosixSharedMemoryProviderBackend::builder() + PosixShmProviderBackend::builder() .with_layout(provider_layout) .res() .unwrap() }; // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let _shared_memory_provider = SharedMemoryProviderBuilder::builder() + let _shm_provider = ShmProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 9c4e64c496..dfb6fb44a6 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -16,8 +16,7 @@ use zenoh::{ key_expr::KeyExpr, prelude::*, shm::{ - BlockOn, GarbageCollect, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, - POSIX_PROTOCOL_ID, + BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID, }, Config, }; @@ -42,14 +41,14 @@ async fn main() -> Result<(), ZError> { println!("Creating POSIX SHM provider..."); // create an SHM backend... - // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs - let backend = PosixSharedMemoryProviderBackend::builder() + // NOTE: For extended PosixShmProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixShmProviderBackend::builder() .with_size(N * 1024) .unwrap() .res() .unwrap(); // ...and an SHM provider - let provider = SharedMemoryProviderBuilder::builder() + let provider = ShmProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); @@ -97,7 +96,7 @@ struct Args { #[arg(short, long, default_value = "demo/example/zenoh-rs-pub")] /// The key expression to publish onto. path: KeyExpr<'static>, - #[arg(short, long, default_value = "Pub from SharedMemory Rust!")] + #[arg(short, long, default_value = "Pub from SHM Rust!")] /// The value of to publish. value: String, #[command(flatten)] diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index fca2994d33..cff095024e 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -16,7 +16,7 @@ use zenoh::{ internal::buffers::ZSlice, prelude::*, publisher::CongestionControl, - shm::{PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, POSIX_PROTOCOL_ID}, + shm::{PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID}, Config, }; use zenoh_examples::CommonArgs; @@ -35,14 +35,14 @@ async fn main() { let z = zenoh::open(config).await.unwrap(); // create an SHM backend... - // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs - let backend = PosixSharedMemoryProviderBackend::builder() + // NOTE: For extended PosixShmProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixShmProviderBackend::builder() .with_size(sm_size) .unwrap() .res() .unwrap(); // ...and an SHM provider - let provider = SharedMemoryProviderBuilder::builder() + let provider = ShmProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs index ec2058c897..c76a031286 100644 --- a/examples/examples/z_queryable_shm.rs +++ b/examples/examples/z_queryable_shm.rs @@ -16,8 +16,8 @@ use zenoh::{ key_expr::KeyExpr, prelude::*, shm::{ - zshm, BlockOn, GarbageCollect, PosixSharedMemoryProviderBackend, - SharedMemoryProviderBuilder, POSIX_PROTOCOL_ID, + zshm, BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, + POSIX_PROTOCOL_ID, }, Config, }; @@ -42,14 +42,14 @@ async fn main() { println!("Creating POSIX SHM provider..."); // create an SHM backend... - // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs - let backend = PosixSharedMemoryProviderBackend::builder() + // NOTE: For extended PosixShmProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixShmProviderBackend::builder() .with_size(N * 1024) .unwrap() .res() .unwrap(); // ...and an SHM provider - let provider = SharedMemoryProviderBuilder::builder() + let provider = ShmProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); @@ -71,7 +71,7 @@ async fn main() { if let Some(payload) = query.payload() { match payload.deserialize::<&zshm>() { Ok(payload) => print!(": '{}'", String::from_utf8_lossy(payload)), - Err(e) => print!(": 'Not a SharedMemoryBuf: {:?}'", e), + Err(e) => print!(": 'Not a ShmBufInner: {:?}'", e), } } println!(")"); @@ -105,7 +105,7 @@ struct Args { #[arg(short, long, default_value = "demo/example/zenoh-rs-queryable")] /// The key expression matching queries to reply to. key: KeyExpr<'static>, - #[arg(short, long, default_value = "Queryable from SharedMemory Rust!")] + #[arg(short, long, default_value = "Queryable from SHM Rust!")] /// The value to reply to queries. value: String, #[arg(long)] diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 4cc797d8b4..e32c6140ac 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -42,7 +42,7 @@ async fn main() { ); match sample.payload().deserialize::<&zshm>() { Ok(payload) => print!("'{}'", String::from_utf8_lossy(payload)), - Err(e) => print!("'Not a SharedMemoryBuf: {:?}'", e), + Err(e) => print!("'Not a ShmBufInner: {:?}'", e), } println!(")"); } @@ -62,7 +62,7 @@ async fn main() { // kind, key_expr, payload // ), // Err(e) => { - // println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); + // println!(">> [Subscriber] Not a ShmBufInner: {:?}", e); // } // } // } diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index 7d5e8f0885..f578e4d4fa 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -27,7 +27,7 @@ use zenoh_result::{bail, ZResult}; #[cfg(feature = "shared-memory")] use zenoh_shm::api::client_storage::GLOBAL_CLIENT_STORAGE; #[cfg(feature = "shared-memory")] -use zenoh_shm::reader::SharedMemoryReader; +use zenoh_shm::reader::ShmReader; use zenoh_task::TaskController; use super::{ @@ -140,12 +140,12 @@ pub struct TransportManagerBuilder { tx_threads: usize, protocols: Option>, #[cfg(feature = "shared-memory")] - shm_reader: Option, + shm_reader: Option, } impl TransportManagerBuilder { #[cfg(feature = "shared-memory")] - pub fn shm_reader(mut self, shm_reader: Option) -> Self { + pub fn shm_reader(mut self, shm_reader: Option) -> Self { self.shm_reader = shm_reader; self } @@ -268,7 +268,7 @@ impl TransportManagerBuilder { #[cfg(feature = "shared-memory")] let shm_reader = self .shm_reader - .unwrap_or_else(|| SharedMemoryReader::new(GLOBAL_CLIENT_STORAGE.clone())); + .unwrap_or_else(|| ShmReader::new(GLOBAL_CLIENT_STORAGE.clone())); let unicast = self.unicast.build( &mut prng, @@ -364,7 +364,7 @@ pub struct TransportManager { pub(crate) locator_inspector: zenoh_link::LocatorInspector, pub(crate) new_unicast_link_sender: NewLinkChannelSender, #[cfg(feature = "shared-memory")] - pub(crate) shmr: SharedMemoryReader, + pub(crate) shmr: ShmReader, #[cfg(feature = "stats")] pub(crate) stats: Arc, pub(crate) task_controller: TaskController, @@ -374,7 +374,7 @@ impl TransportManager { pub fn new( params: TransportManagerParams, mut prng: PseudoRng, - #[cfg(feature = "shared-memory")] shmr: SharedMemoryReader, + #[cfg(feature = "shared-memory")] shmr: ShmReader, ) -> TransportManager { // Initialize the Cipher let mut key = [0_u8; BlockCipher::BLOCK_SIZE]; diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index 3c04cf6425..d95a5ff8c5 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -17,7 +17,7 @@ use tokio::sync::Mutex; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionMulticastConf; #[cfg(feature = "shared-memory")] -use zenoh_config::SharedMemoryConf; +use zenoh_config::ShmConf; use zenoh_config::{Config, LinkTxConf}; use zenoh_core::zasynclock; use zenoh_link::*; @@ -152,7 +152,7 @@ impl Default for TransportManagerBuilderMulticast { fn default() -> TransportManagerBuilderMulticast { let link_tx = LinkTxConf::default(); #[cfg(feature = "shared-memory")] - let shm = SharedMemoryConf::default(); + let shm = ShmConf::default(); #[cfg(feature = "transport_compression")] let compression = CompressionMulticastConf::default(); diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 7a50a68742..8450ad878e 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -26,10 +26,7 @@ use zenoh_protocol::{ }, }; use zenoh_result::ZResult; -use zenoh_shm::{ - api::common::types::ProtocolID, reader::SharedMemoryReader, SharedMemoryBuf, - SharedMemoryBufInfo, -}; +use zenoh_shm::{api::common::types::ProtocolID, reader::ShmReader, ShmBufInfo, ShmBufInner}; use crate::unicast::establishment::ext::shm::AuthSegment; @@ -84,7 +81,7 @@ pub fn map_zmsg_to_partner( } } -pub fn map_zmsg_to_shmbuf(msg: &mut NetworkMessage, shmr: &SharedMemoryReader) -> ZResult<()> { +pub fn map_zmsg_to_shmbuf(msg: &mut NetworkMessage, shmr: &ShmReader) -> ZResult<()> { match &mut msg.body { NetworkBody::Push(Push { payload, .. }) => match payload { PushBody::Put(b) => b.map_to_shmbuf(shmr), @@ -117,7 +114,7 @@ trait MapShm { // RX: // - shminfo -> shmbuf // - rawbuf -> rawbuf (no changes) - fn map_to_shmbuf(&mut self, shmr: &SharedMemoryReader) -> ZResult<()>; + fn map_to_shmbuf(&mut self, shmr: &ShmReader) -> ZResult<()>; // TX: // - shmbuf -> shminfo if partner supports shmbuf's SHM protocol @@ -170,7 +167,7 @@ impl MapShm for Put { map_to_partner!(payload, ext_shm, partner_shm_cfg) } - fn map_to_shmbuf(&mut self, shmr: &SharedMemoryReader) -> ZResult<()> { + fn map_to_shmbuf(&mut self, shmr: &ShmReader) -> ZResult<()> { let Self { payload, ext_shm, .. } = self; @@ -197,7 +194,7 @@ impl MapShm for Query { } } - fn map_to_shmbuf(&mut self, shmr: &SharedMemoryReader) -> ZResult<()> { + fn map_to_shmbuf(&mut self, shmr: &ShmReader) -> ZResult<()> { if let Self { ext_body: Some(QueryBodyType { payload, ext_shm, .. @@ -229,7 +226,7 @@ impl MapShm for Reply { } } - fn map_to_shmbuf(&mut self, shmr: &SharedMemoryReader) -> ZResult<()> { + fn map_to_shmbuf(&mut self, shmr: &ShmReader) -> ZResult<()> { match &mut self.payload { PushBody::Put(put) => { let Put { @@ -254,7 +251,7 @@ impl MapShm for Err { map_to_partner!(payload, ext_shm, partner_shm_cfg) } - fn map_to_shmbuf(&mut self, shmr: &SharedMemoryReader) -> ZResult<()> { + fn map_to_shmbuf(&mut self, shmr: &ShmReader) -> ZResult<()> { let Self { payload, ext_shm, .. } = self; @@ -264,7 +261,7 @@ impl MapShm for Err { #[cold] #[inline(never)] -pub fn shmbuf_to_rawbuf(shmb: &SharedMemoryBuf) -> ZSlice { +pub fn shmbuf_to_rawbuf(shmb: &ShmBufInner) -> ZSlice { // Convert shmb to raw buffer // TODO: optimize this! We should not make additional buffer copy here, // but we need to make serializer serialize SHM buffer as raw buffer. @@ -273,7 +270,7 @@ pub fn shmbuf_to_rawbuf(shmb: &SharedMemoryBuf) -> ZSlice { #[cold] #[inline(never)] -pub fn shmbuf_to_shminfo(shmb: &SharedMemoryBuf) -> ZResult { +pub fn shmbuf_to_shminfo(shmb: &ShmBufInner) -> ZResult { // Serialize the shmb info let codec = Zenoh080::new(); let mut info = vec![]; @@ -281,7 +278,7 @@ pub fn shmbuf_to_shminfo(shmb: &SharedMemoryBuf) -> ZResult { codec .write(&mut writer, &shmb.info) .map_err(|e| zerror!("{:?}", e))?; - // Increase the reference count so to keep the SharedMemoryBuf valid + // Increase the reference count so to keep the ShmBufInner valid unsafe { shmb.inc_ref_count() }; // Replace the content of the slice let mut zslice: ZSlice = info.into(); @@ -295,7 +292,7 @@ fn to_shm_partner( ) -> ZResult { let mut res = false; for zs in zbuf.zslices_mut() { - if let Some(shmb) = zs.downcast_ref::() { + if let Some(shmb) = zs.downcast_ref::() { if partner_shm_cfg.supports_protocol(shmb.info.shm_protocol) { *zs = shmbuf_to_shminfo(shmb)?; res = true; @@ -310,14 +307,14 @@ fn to_shm_partner( fn to_non_shm_partner(zbuf: &mut ZBuf) { for zs in zbuf.zslices_mut() { - if let Some(shmb) = zs.downcast_ref::() { + if let Some(shmb) = zs.downcast_ref::() { // Replace the content of the slice with rawbuf *zs = shmbuf_to_rawbuf(shmb) } } } -pub fn map_zbuf_to_shmbuf(zbuf: &mut ZBuf, shmr: &SharedMemoryReader) -> ZResult<()> { +pub fn map_zbuf_to_shmbuf(zbuf: &mut ZBuf, shmr: &ShmReader) -> ZResult<()> { for zs in zbuf.zslices_mut().filter(|x| x.kind == ZSliceKind::ShmPtr) { map_zslice_to_shmbuf(zs, shmr)?; } @@ -326,12 +323,12 @@ pub fn map_zbuf_to_shmbuf(zbuf: &mut ZBuf, shmr: &SharedMemoryReader) -> ZResult #[cold] #[inline(never)] -pub fn map_zslice_to_shmbuf(zslice: &mut ZSlice, shmr: &SharedMemoryReader) -> ZResult<()> { +pub fn map_zslice_to_shmbuf(zslice: &mut ZSlice, shmr: &ShmReader) -> ZResult<()> { let codec = Zenoh080::new(); let mut reader = zslice.reader(); // Deserialize the shminfo - let shmbinfo: SharedMemoryBufInfo = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; + let shmbinfo: ShmBufInfo = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; // Mount shmbuf let smb = shmr.read_shmbuf(&shmbinfo)?; diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index f42002b0d3..a9082ce705 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -24,7 +24,7 @@ use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionUnicastConf; #[cfg(feature = "shared-memory")] -use zenoh_config::SharedMemoryConf; +use zenoh_config::ShmConf; use zenoh_config::{Config, LinkTxConf, QoSUnicastConf, TransportUnicastConf}; use zenoh_core::{zasynclock, zcondfeat}; use zenoh_crypto::PseudoRng; @@ -35,7 +35,7 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, zerror, ZResult}; #[cfg(feature = "shared-memory")] -use zenoh_shm::reader::SharedMemoryReader; +use zenoh_shm::reader::ShmReader; #[cfg(feature = "shared-memory")] use super::establishment::ext::shm::AuthUnicast; @@ -216,7 +216,7 @@ impl TransportManagerBuilderUnicast { pub fn build( self, #[allow(unused)] prng: &mut PseudoRng, // Required for #[cfg(feature = "transport_multilink")] - #[cfg(feature = "shared-memory")] shm_reader: &SharedMemoryReader, + #[cfg(feature = "shared-memory")] shm_reader: &ShmReader, ) -> ZResult { if self.is_qos && self.is_lowlatency { bail!("'qos' and 'lowlatency' options are incompatible"); @@ -267,7 +267,7 @@ impl Default for TransportManagerBuilderUnicast { let link_tx = LinkTxConf::default(); let qos = QoSUnicastConf::default(); #[cfg(feature = "shared-memory")] - let shm = SharedMemoryConf::default(); + let shm = ShmConf::default(); #[cfg(feature = "transport_compression")] let compression = CompressionUnicastConf::default(); diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index 1b2369e620..5ec01f9290 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -38,14 +38,11 @@ mod tests { use zenoh_shm::{ api::{ protocol_implementations::posix::{ - posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, - protocol_id::POSIX_PROTOCOL_ID, - }, - provider::shared_memory_provider::{ - BlockOn, GarbageCollect, SharedMemoryProviderBuilder, + posix_shm_provider_backend::PosixShmProviderBackend, protocol_id::POSIX_PROTOCOL_ID, }, + provider::shm_provider::{BlockOn, GarbageCollect, ShmProviderBuilder}, }, - SharedMemoryBuf, + ShmBufInner, }; use zenoh_transport::{ multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, @@ -118,11 +115,10 @@ mod tests { NetworkBody::Push(m) => match m.payload { PushBody::Put(Put { payload, .. }) => { for zs in payload.zslices() { - if self.is_shm && zs.downcast_ref::().is_none() { - panic!("Expected SharedMemoryBuf: {:?}", zs); - } else if !self.is_shm && zs.downcast_ref::().is_some() - { - panic!("Not Expected SharedMemoryBuf: {:?}", zs); + if self.is_shm && zs.downcast_ref::().is_none() { + panic!("Expected ShmBufInner: {:?}", zs); + } else if !self.is_shm && zs.downcast_ref::().is_some() { + panic!("Not Expected ShmBufInner: {:?}", zs); } } payload.contiguous().into_owned() @@ -162,12 +158,12 @@ mod tests { let peer_net01 = ZenohId::try_from([3]).unwrap(); // create SHM provider - let backend = PosixSharedMemoryProviderBackend::builder() + let backend = PosixShmProviderBackend::builder() .with_size(2 * MSG_SIZE) .unwrap() .res() .unwrap(); - let shm01 = SharedMemoryProviderBuilder::builder() + let shm01 = ShmProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index c4bcf6ae5e..76ffdc1650 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -34,7 +34,7 @@ use zenoh_shm::{ zshm::{zshm, ZShm}, zshmmut::{zshmmut, ZShmMut}, }, - SharedMemoryBuf, + ShmBufInner, }; /// Trait to encode a type `T` into a [`Value`]. @@ -1613,7 +1613,7 @@ impl<'a> Deserialize<'a, &'a zshm> for ZSerde { // A ZShm is expected to have only one slice let mut zslices = v.0.zslices(); if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_ref::() { + if let Some(shmb) = zs.downcast_ref::() { return Ok(shmb.into()); } } @@ -1648,7 +1648,7 @@ impl<'a> Deserialize<'a, &'a mut zshm> for ZSerde { // A ZSliceShmBorrowMut is expected to have only one slice let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_mut::() { + if let Some(shmb) = zs.downcast_mut::() { return Ok(shmb.into()); } } @@ -1665,7 +1665,7 @@ impl<'a> Deserialize<'a, &'a mut zshmmut> for ZSerde { // A ZSliceShmBorrowMut is expected to have only one slice let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_mut::() { + if let Some(shmb) = zs.downcast_mut::() { return shmb.try_into().map_err(|_| ZDeserializeError); } } @@ -1882,10 +1882,9 @@ mod tests { use zenoh_shm::api::{ buffer::zshm::{zshm, ZShm}, protocol_implementations::posix::{ - posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, - protocol_id::POSIX_PROTOCOL_ID, + posix_shm_provider_backend::PosixShmProviderBackend, protocol_id::POSIX_PROTOCOL_ID, }, - provider::shared_memory_provider::SharedMemoryProviderBuilder, + provider::shm_provider::ShmProviderBuilder, }; use super::ZBytes; @@ -1995,13 +1994,13 @@ mod tests { #[cfg(feature = "shared-memory")] { // create an SHM backend... - let backend = PosixSharedMemoryProviderBackend::builder() + let backend = PosixShmProviderBackend::builder() .with_size(4096) .unwrap() .res() .unwrap(); // ...and an SHM provider - let provider = SharedMemoryProviderBuilder::builder() + let provider = ShmProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 187ec27be7..db02cd9649 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -56,7 +56,7 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; #[cfg(feature = "shared-memory")] -use zenoh_shm::api::client_storage::SharedMemoryClientStorage; +use zenoh_shm::api::client_storage::ShmClientStorage; use zenoh_task::TaskController; use super::{ @@ -839,7 +839,7 @@ impl Session { #[allow(clippy::new_ret_no_self)] pub(super) fn new( config: Config, - #[cfg(feature = "shared-memory")] shm_clients: Option>, + #[cfg(feature = "shared-memory")] shm_clients: Option>, ) -> impl Resolve> { ResolveFuture::new(async move { tracing::debug!("Config: {:?}", &config); @@ -2733,7 +2733,7 @@ where { config: TryIntoConfig, #[cfg(feature = "shared-memory")] - shm_clients: Option>, + shm_clients: Option>, } #[cfg(feature = "shared-memory")] @@ -2742,7 +2742,7 @@ where TryIntoConfig: std::convert::TryInto + Send + 'static, >::Error: std::fmt::Debug, { - pub fn with_shm_clients(mut self, shm_clients: Arc) -> Self { + pub fn with_shm_clients(mut self, shm_clients: Arc) -> Self { self.shm_clients = Some(shm_clients); self } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 0c47070609..c1cb39fdee 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -414,30 +414,27 @@ pub mod shm { zshm::{zshm, ZShm}, zshmmut::{zshmmut, ZShmMut}, }, - client::{ - shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, - }, - client_storage::{SharedMemoryClientStorage, GLOBAL_CLIENT_STORAGE}, + client::{shm_client::ShmClient, shm_segment::ShmSegment}, + client_storage::{ShmClientStorage, GLOBAL_CLIENT_STORAGE}, common::types::{ChunkID, ProtocolID, SegmentID}, protocol_implementations::posix::{ - posix_shared_memory_client::PosixSharedMemoryClient, - posix_shared_memory_provider_backend::{ - LayoutedPosixSharedMemoryProviderBackendBuilder, PosixSharedMemoryProviderBackend, - PosixSharedMemoryProviderBackendBuilder, + posix_shm_client::PosixShmClient, + posix_shm_provider_backend::{ + LayoutedPosixShmProviderBackendBuilder, PosixShmProviderBackend, + PosixShmProviderBackendBuilder, }, protocol_id::POSIX_PROTOCOL_ID, }, provider::{ chunk::{AllocatedChunk, ChunkDescriptor}, - shared_memory_provider::{ + shm_provider::{ AllocBuilder, AllocBuilder2, AllocLayout, AllocLayoutSizedBuilder, AllocPolicy, AsyncAllocPolicy, BlockOn, DeallocEldest, DeallocOptimal, DeallocYoungest, Deallocate, Defragment, DynamicProtocolID, ForceDeallocPolicy, GarbageCollect, - JustAlloc, ProtocolIDSource, SharedMemoryProvider, SharedMemoryProviderBuilder, - SharedMemoryProviderBuilderBackendID, SharedMemoryProviderBuilderID, - StaticProtocolID, + JustAlloc, ProtocolIDSource, ShmProvider, ShmProviderBuilder, + ShmProviderBuilderBackendID, ShmProviderBuilderID, StaticProtocolID, }, - shared_memory_provider_backend::SharedMemoryProviderBackend, + shm_provider_backend::ShmProviderBackend, types::{ AllocAlignment, BufAllocResult, BufLayoutAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError, ZLayoutAllocError, ZLayoutError, diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index d5b42ecdd2..81a904a3da 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -45,9 +45,9 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, ZResult}; #[cfg(feature = "shared-memory")] -use zenoh_shm::api::client_storage::SharedMemoryClientStorage; +use zenoh_shm::api::client_storage::ShmClientStorage; #[cfg(feature = "shared-memory")] -use zenoh_shm::reader::SharedMemoryReader; +use zenoh_shm::reader::ShmReader; use zenoh_sync::get_mut_unchecked; use zenoh_task::TaskController; use zenoh_transport::{ @@ -98,7 +98,7 @@ pub struct RuntimeBuilder { #[cfg(feature = "plugins")] plugins_manager: Option, #[cfg(feature = "shared-memory")] - shm_clients: Option>, + shm_clients: Option>, } impl RuntimeBuilder { @@ -119,7 +119,7 @@ impl RuntimeBuilder { } #[cfg(feature = "shared-memory")] - pub fn shm_clients(mut self, shm_clients: Option>) -> Self { + pub fn shm_clients(mut self, shm_clients: Option>) -> Self { self.shm_clients = shm_clients; self } @@ -157,7 +157,7 @@ impl RuntimeBuilder { #[cfg(feature = "unstable")] let transport_manager = zcondfeat!( "shared-memory", - transport_manager.shm_reader(shm_clients.map(SharedMemoryReader::new)), + transport_manager.shm_reader(shm_clients.map(ShmReader::new)), transport_manager ) .build(handler.clone())?; diff --git a/zenoh/tests/bytes.rs b/zenoh/tests/bytes.rs index e97475c237..34c9837d04 100644 --- a/zenoh/tests/bytes.rs +++ b/zenoh/tests/bytes.rs @@ -16,21 +16,21 @@ use zenoh::{ bytes::ZBytes, prelude::*, shm::{ - zshm, zshmmut, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, ZShm, - ZShmMut, POSIX_PROTOCOL_ID, + zshm, zshmmut, PosixShmProviderBackend, ShmProviderBuilder, ZShm, ZShmMut, + POSIX_PROTOCOL_ID, }, }; #[test] fn shm_bytes_single_buf() { // create an SHM backend... - let backend = PosixSharedMemoryProviderBackend::builder() + let backend = PosixShmProviderBackend::builder() .with_size(4096) .unwrap() .res() .unwrap(); // ...and an SHM provider - let provider = SharedMemoryProviderBuilder::builder() + let provider = ShmProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 81e5fdece1..c2cbc4e89a 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -26,8 +26,7 @@ use zenoh::{ prelude::*, publisher::CongestionControl, shm::{ - BlockOn, GarbageCollect, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, - POSIX_PROTOCOL_ID, + BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID, }, subscriber::Reliability, Session, @@ -117,13 +116,13 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re tokio::time::sleep(SLEEP).await; // create SHM backend... - let backend = PosixSharedMemoryProviderBackend::builder() + let backend = PosixShmProviderBackend::builder() .with_size(size * MSG_COUNT / 10) .unwrap() .res() .unwrap(); // ...and SHM provider - let shm01 = SharedMemoryProviderBuilder::builder() + let shm01 = ShmProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); From 438804f21158de96585004e44cb362ccf54a4045 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 9 Jun 2024 19:23:01 +0200 Subject: [PATCH 388/598] properties renamed to parameters, selector rework --- commons/zenoh-protocol/src/core/endpoint.rs | 46 +- commons/zenoh-protocol/src/core/mod.rs | 6 +- commons/zenoh-protocol/src/core/parameters.rs | 600 ++++++++++++++---- .../src/core/parameters_view.rs | 211 ++++++ commons/zenoh-protocol/src/core/properties.rs | 517 --------------- examples/examples/z_get_shm.rs | 6 +- examples/examples/z_storage.rs | 2 +- io/zenoh-links/zenoh-link-quic/src/utils.rs | 4 +- io/zenoh-links/zenoh-link-tls/src/utils.rs | 4 +- .../zenoh-link-unixpipe/src/unix/mod.rs | 4 +- io/zenoh-transport/src/multicast/manager.rs | 4 +- io/zenoh-transport/src/unicast/manager.rs | 6 +- plugins/zenoh-plugin-example/src/lib.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 27 +- .../zenoh-plugin-storage-manager/src/lib.rs | 25 +- .../src/replica/align_queryable.rs | 27 +- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 2 +- zenoh-ext/src/publication_cache.rs | 7 +- zenoh-ext/src/querying_subscriber.rs | 7 +- zenoh/src/api/bytes.rs | 48 +- zenoh/src/api/key_expr.rs | 1 + zenoh/src/api/liveliness.rs | 6 +- zenoh/src/api/plugins.rs | 8 +- zenoh/src/api/query.rs | 48 +- zenoh/src/api/queryable.rs | 9 +- zenoh/src/api/selector.rs | 240 +++---- zenoh/src/api/session.rs | 42 +- zenoh/src/lib.rs | 6 +- zenoh/src/net/runtime/adminspace.rs | 4 +- 30 files changed, 938 insertions(+), 983 deletions(-) create mode 100644 commons/zenoh-protocol/src/core/parameters_view.rs delete mode 100644 commons/zenoh-protocol/src/core/properties.rs diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index 1c9ccffb40..3c6f3dad1b 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -16,7 +16,7 @@ use core::{borrow::Borrow, convert::TryFrom, fmt, str::FromStr}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; -use super::{locator::*, parameters::Parameters}; +use super::{locator::*, parameters_view::ParametersView}; // Parsing chars pub const PROTO_SEPARATOR: char = '/'; @@ -196,15 +196,15 @@ impl<'a> Metadata<'a> { } pub fn iter(&'a self) -> impl DoubleEndedIterator + Clone { - Parameters::iter(self.0) + ParametersView::iter(self.0) } pub fn get(&'a self, k: &str) -> Option<&'a str> { - Parameters::get(self.0, k) + ParametersView::get(self.0, k) } pub fn values(&'a self, k: &str) -> impl DoubleEndedIterator { - Parameters::values(self.0, k) + ParametersView::values(self.0, k) } } @@ -250,7 +250,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::from_iter(Parameters::sort(Parameters::join( + ParametersView::from_iter(ParametersView::sort(ParametersView::join( self.0.metadata().iter(), iter.map(|(k, v)| (k.borrow(), v.borrow())), ))), @@ -269,7 +269,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::insert_sort(self.0.metadata().as_str(), k.borrow(), v.borrow()).0, + ParametersView::insert_sort(self.0.metadata().as_str(), k.borrow(), v.borrow()).0, self.0.config(), )?; @@ -284,7 +284,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::remove(self.0.metadata().as_str(), k.borrow()).0, + ParametersView::remove(self.0.metadata().as_str(), k.borrow()).0, self.0.config(), )?; @@ -326,15 +326,15 @@ impl<'a> Config<'a> { } pub fn iter(&'a self) -> impl DoubleEndedIterator + Clone { - Parameters::iter(self.0) + ParametersView::iter(self.0) } pub fn get(&'a self, k: &str) -> Option<&'a str> { - Parameters::get(self.0, k) + ParametersView::get(self.0, k) } pub fn values(&'a self, k: &str) -> impl DoubleEndedIterator { - Parameters::values(self.0, k) + ParametersView::values(self.0, k) } } @@ -381,7 +381,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::from_iter(Parameters::sort(Parameters::join( + ParametersView::from_iter(ParametersView::sort(ParametersView::join( self.0.config().iter(), iter.map(|(k, v)| (k.borrow(), v.borrow())), ))), @@ -400,7 +400,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::insert_sort(self.0.config().as_str(), k.borrow(), v.borrow()).0, + ParametersView::insert_sort(self.0.config().as_str(), k.borrow(), v.borrow()).0, )?; self.0.inner = ep.inner; @@ -415,7 +415,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::remove(self.0.config().as_str(), k.borrow()).0, + ParametersView::remove(self.0.config().as_str(), k.borrow()).0, )?; self.0.inner = ep.inner; @@ -577,8 +577,8 @@ impl TryFrom for EndPoint { (Some(midx), None) if midx > pidx && !s[midx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - Parameters::from_iter_into( - Parameters::sort(Parameters::iter(&s[midx + 1..])), + ParametersView::from_iter_into( + ParametersView::sort(ParametersView::iter(&s[midx + 1..])), &mut inner, ); Ok(EndPoint { inner }) @@ -587,8 +587,8 @@ impl TryFrom for EndPoint { (None, Some(cidx)) if cidx > pidx && !s[cidx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..cidx + 1]); // Includes config separator - Parameters::from_iter_into( - Parameters::sort(Parameters::iter(&s[cidx + 1..])), + ParametersView::from_iter_into( + ParametersView::sort(ParametersView::iter(&s[cidx + 1..])), &mut inner, ); Ok(EndPoint { inner }) @@ -603,14 +603,14 @@ impl TryFrom for EndPoint { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - Parameters::from_iter_into( - Parameters::sort(Parameters::iter(&s[midx + 1..cidx])), + ParametersView::from_iter_into( + ParametersView::sort(ParametersView::iter(&s[midx + 1..cidx])), &mut inner, ); inner.push(CONFIG_SEPARATOR); - Parameters::from_iter_into( - Parameters::sort(Parameters::iter(&s[cidx + 1..])), + ParametersView::from_iter_into( + ParametersView::sort(ParametersView::iter(&s[cidx + 1..])), &mut inner, ); @@ -655,11 +655,11 @@ impl EndPoint { if rng.gen_bool(0.5) { endpoint.push(METADATA_SEPARATOR); - Parameters::rand(&mut endpoint); + ParametersView::rand(&mut endpoint); } if rng.gen_bool(0.5) { endpoint.push(CONFIG_SEPARATOR); - Parameters::rand(&mut endpoint); + ParametersView::rand(&mut endpoint); } endpoint.parse().unwrap() diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 9c8eee58a1..47f240e8ef 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -53,12 +53,12 @@ pub use endpoint::*; pub mod resolution; pub use resolution::*; +pub mod parameters_view; +pub use parameters_view::*; + pub mod parameters; pub use parameters::*; -pub mod properties; -pub use properties::*; - /// The global unique id of a zenoh peer. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs index e4f815feff..b5cfc92e05 100644 --- a/commons/zenoh-protocol/src/core/parameters.rs +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -1,5 +1,5 @@ // -// Copyright (c) 2023 ZettaScale Technology +// Copyright (c) 2022 ZettaScale Technology // // This program and the accompanying materials are made available under the // terms of the Eclipse Public License 2.0 which is available at @@ -11,201 +11,519 @@ // Contributors: // ZettaScale Zenoh Team, // -pub(super) const LIST_SEPARATOR: char = ';'; -pub(super) const FIELD_SEPARATOR: char = '='; -pub(super) const VALUE_SEPARATOR: char = '|'; +use alloc::{ + borrow::Cow, + string::{String, ToString}, +}; +use core::{borrow::Borrow, fmt}; +#[cfg(feature = "std")] +use std::collections::HashMap; -use alloc::{string::String, vec::Vec}; +use super::parameters_view::{ParametersView, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; -fn split_once(s: &str, c: char) -> (&str, &str) { - match s.find(c) { - Some(index) => { - let (l, r) = s.split_at(index); - (l, &r[1..]) - } - None => (s, ""), - } -} - -/// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. +/// A map of key/value (String,String) properties. +/// It can be parsed from a String, using `;` or `` as separator between each properties +/// and `=` as separator between a key and its value. Keys and values are trimed. +/// +/// Example: +/// ``` +/// use zenoh_protocol::core::Properties; /// -/// `;` is the separator between the key-value `(&str, &str)` elements. +/// let a = "a=1;b=2;c=3|4|5;d=6"; +/// let p = Properties::from(a); /// -/// `=` is the separator between the `&str`-key and `&str`-value +/// // Retrieve values +/// assert!(!p.is_empty()); +/// assert_eq!(p.get("a").unwrap(), "1"); +/// assert_eq!(p.get("b").unwrap(), "2"); +/// assert_eq!(p.get("c").unwrap(), "3|4|5"); +/// assert_eq!(p.get("d").unwrap(), "6"); +/// assert_eq!(p.values("c").collect::>(), vec!["3", "4", "5"]); /// -/// `|` is the separator between multiple elements of the values. -pub struct Parameters; +/// // Iterate over properties +/// let mut iter = p.iter(); +/// assert_eq!(iter.next().unwrap(), ("a", "1")); +/// assert_eq!(iter.next().unwrap(), ("b", "2")); +/// assert_eq!(iter.next().unwrap(), ("c", "3|4|5")); +/// assert_eq!(iter.next().unwrap(), ("d", "6")); +/// assert!(iter.next().is_none()); +/// +/// // Create properties from iterators +/// let pi = Properties::from_iter(vec![("a", "1"), ("b", "2"), ("c", "3|4|5"), ("d", "6")]); +/// assert_eq!(p, pi); +/// ``` +#[derive(Clone, PartialEq, Eq, Hash, Default)] +pub struct Parameters<'s>(Cow<'s, str>); + +impl<'s> Parameters<'s> { + /// Returns `true` if properties does not contain anything. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } -impl Parameters { - /// Returns an iterator of key-value `(&str, &str)` pairs according to the parameters format. - pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { - s.split(LIST_SEPARATOR) - .filter(|p| !p.is_empty()) - .map(|p| split_once(p, FIELD_SEPARATOR)) + /// Returns properties as [`str`]. + pub fn as_str(&'s self) -> &'s str { + &self.0 } - /// Same as [`Self::from_iter_into`] but keys are sorted in alphabetical order. - pub fn sort<'s, I>(iter: I) -> impl Iterator + /// Returns `true` if properties contains the specified key. + pub fn contains_key(&self, k: K) -> bool where - I: Iterator, + K: Borrow, { - let mut from = iter.collect::>(); - from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); - from.into_iter() + ParametersView::get(self.as_str(), k.borrow()).is_some() } - /// Joins two key-value `(&str, &str)` iterators removing from `current` any element whose key is present in `new`. - pub fn join<'s, C, N>(current: C, new: N) -> impl Iterator + Clone + /// Returns a reference to the `&str`-value corresponding to the key. + pub fn get(&'s self, k: K) -> Option<&'s str> where - C: Iterator + Clone, - N: Iterator + Clone + 's, + K: Borrow, { - let n = new.clone(); - let current = current - .clone() - .filter(move |(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); - current.chain(new) + ParametersView::get(self.as_str(), k.borrow()) } - /// Builds a string from an iterator preserving the order. - #[allow(clippy::should_implement_trait)] - pub fn from_iter<'s, I>(iter: I) -> String + /// Returns an iterator to the `&str`-values corresponding to the key. + pub fn values(&'s self, k: K) -> impl DoubleEndedIterator where - I: Iterator, + K: Borrow, { - let mut into = String::new(); - Parameters::from_iter_into(iter, &mut into); - into + ParametersView::values(self.as_str(), k.borrow()) } - /// Same as [`Self::from_iter`] but it writes into a user-provided string instead of allocating a new one. - pub fn from_iter_into<'s, I>(iter: I, into: &mut String) + /// Returns an iterator on the key-value pairs as `(&str, &str)`. + pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { + ParametersView::iter(self.as_str()) + } + + /// Inserts a key-value pair into the map. + /// If the map did not have this key present, [`None`]` is returned. + /// If the map did have this key present, the value is updated, and the old value is returned. + pub fn insert(&mut self, k: K, v: V) -> Option where - I: Iterator, + K: Borrow, + V: Borrow, { - Parameters::concat_into(iter, into); + let (inner, item) = ParametersView::insert(self.as_str(), k.borrow(), v.borrow()); + let item = item.map(|i| i.to_string()); + self.0 = Cow::Owned(inner); + item } - /// Get the a `&str`-value for a `&str`-key according to the parameters format. - pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { - Parameters::iter(s) - .find(|(key, _)| *key == k) - .map(|(_, value)| value) + /// Removes a key from the map, returning the value at the key if the key was previously in the properties. + pub fn remove(&mut self, k: K) -> Option + where + K: Borrow, + { + let (inner, item) = ParametersView::remove(self.as_str(), k.borrow()); + let item = item.map(|i| i.to_string()); + self.0 = Cow::Owned(inner); + item } - /// Get the a `&str`-value iterator for a `&str`-key according to the parameters format. - pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { - match Parameters::get(s, k) { - Some(v) => v.split(VALUE_SEPARATOR), - None => { - let mut i = "".split(VALUE_SEPARATOR); - i.next(); - i - } - } + /// Extend these properties with other properties. + pub fn extend(&mut self, other: &Parameters) { + self.extend_from_iter(other.iter()); } - fn _insert<'s, I>( - i: I, - k: &'s str, - v: &'s str, - ) -> (impl Iterator, Option<&'s str>) + /// Extend these properties from an iterator. + pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) where - I: Iterator + Clone, + I: Iterator + Clone, + K: Borrow + 'e + ?Sized, + V: Borrow + 'e + ?Sized, { - let mut iter = i.clone(); - let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); + let inner = ParametersView::from_iter(ParametersView::join( + self.iter(), + iter.map(|(k, v)| (k.borrow(), v.borrow())), + )); + self.0 = Cow::Owned(inner); + } - let current = i.filter(move |x| x.0 != k); - let new = Some((k, v)).into_iter(); - (current.chain(new), item) + /// Convert these properties into owned properties. + pub fn into_owned(self) -> Parameters<'static> { + Parameters(Cow::Owned(self.0.into_owned())) } - /// Insert a key-value `(&str, &str)` pair by appending it at the end of `s` preserving the insertion order. - pub fn insert<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { - let (iter, item) = Parameters::_insert(Parameters::iter(s), k, v); - (Parameters::from_iter(iter), item) + /// Returns `true`` if all keys are sorted in alphabetical order. + pub fn is_ordered(&self) -> bool { + ParametersView::is_ordered(self.as_str()) } +} - /// Same as [`Self::insert`] but keys are sorted in alphabetical order. - pub fn insert_sort<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { - let (iter, item) = Parameters::_insert(Parameters::iter(s), k, v); - (Parameters::from_iter(Parameters::sort(iter)), item) +impl<'s> From<&'s str> for Parameters<'s> { + fn from(mut value: &'s str) -> Self { + value = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); + Self(Cow::Borrowed(value)) } +} - /// Remove a key-value `(&str, &str)` pair from `s` preserving the insertion order. - pub fn remove<'s>(s: &'s str, k: &str) -> (String, Option<&'s str>) { - let mut iter = Parameters::iter(s); - let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); - let iter = iter.filter(|x| x.0 != k); - (Parameters::concat(iter), item) +impl From for Parameters<'_> { + fn from(mut value: String) -> Self { + let s = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); + value.truncate(s.len()); + Self(Cow::Owned(value)) } +} - /// Returns `true` if all keys are sorted in alphabetical order - pub fn is_ordered(s: &str) -> bool { - let mut prev = None; - for (k, _) in Parameters::iter(s) { - match prev.take() { - Some(p) if k < p => return false, - _ => prev = Some(k), - } +impl<'s> From> for Parameters<'s> { + fn from(value: Cow<'s, str>) -> Self { + match value { + Cow::Borrowed(s) => Parameters::from(s), + Cow::Owned(s) => Parameters::from(s), } - true } +} - fn concat<'s, I>(iter: I) -> String +impl<'a> From> for Cow<'_, Parameters<'a>> { + fn from(props: Parameters<'a>) -> Self { + Cow::Owned(props) + } +} + +impl<'a> From<&'a Parameters<'a>> for Cow<'a, Parameters<'a>> { + fn from(props: &'a Parameters<'a>) -> Self { + Cow::Borrowed(props) + } +} + +impl<'s, K, V> FromIterator<(&'s K, &'s V)> for Parameters<'_> +where + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, +{ + fn from_iter>(iter: T) -> Self { + let iter = iter.into_iter(); + let inner = ParametersView::from_iter(iter.map(|(k, v)| (k.borrow(), v.borrow()))); + Self(Cow::Owned(inner)) + } +} + +impl<'s, K, V> FromIterator<&'s (K, V)> for Parameters<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from_iter>(iter: T) -> Self { + Self::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))) + } +} + +impl<'s, K, V> From<&'s [(K, V)]> for Parameters<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from(value: &'s [(K, V)]) -> Self { + Self::from_iter(value.iter()) + } +} + +#[cfg(feature = "std")] +impl From> for Parameters<'_> +where + K: Borrow, + V: Borrow, +{ + fn from(map: HashMap) -> Self { + Self::from_iter(map.iter()) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s Parameters<'s>> for HashMap<&'s str, &'s str> { + fn from(props: &'s Parameters<'s>) -> Self { + HashMap::from_iter(props.iter()) + } +} + +#[cfg(feature = "std")] +impl From<&Parameters<'_>> for HashMap { + fn from(props: &Parameters<'_>) -> Self { + HashMap::from_iter(props.iter().map(|(k, v)| (k.to_string(), v.to_string()))) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s Parameters<'s>> for HashMap, Cow<'s, str>> { + fn from(props: &'s Parameters<'s>) -> Self { + HashMap::from_iter(props.iter().map(|(k, v)| (Cow::from(k), Cow::from(v)))) + } +} + +#[cfg(feature = "std")] +impl From> for HashMap { + fn from(props: Parameters) -> Self { + HashMap::from(&props) + } +} + +impl fmt::Display for Parameters<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl fmt::Debug for Parameters<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self) + } +} + +#[derive(Clone, PartialEq, Eq, Hash, Default)] +pub struct OrderedProperties<'s>(Parameters<'s>); + +impl<'s> OrderedProperties<'s> { + /// Returns `true` if properties does not contain anything. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns properties as [`str`]. + pub fn as_str(&'s self) -> &'s str { + self.0.as_str() + } + + /// Returns `true` if properties contains the specified key. + pub fn contains_key(&self, k: K) -> bool where - I: Iterator, + K: Borrow, { - let mut into = String::new(); - Parameters::concat_into(iter, &mut into); - into + self.0.contains_key(k) } - fn concat_into<'s, I>(iter: I, into: &mut String) + /// Returns a reference to the `&str`-value corresponding to the key. + pub fn get(&'s self, k: K) -> Option<&'s str> where - I: Iterator, + K: Borrow, { - let mut first = true; - for (k, v) in iter.filter(|(k, _)| !k.is_empty()) { - if !first { - into.push(LIST_SEPARATOR); - } - into.push_str(k); - if !v.is_empty() { - into.push(FIELD_SEPARATOR); - into.push_str(v); - } - first = false; - } + self.0.get(k) } - #[cfg(feature = "test")] - pub fn rand(into: &mut String) { - use rand::{ - distributions::{Alphanumeric, DistString}, - Rng, - }; + /// Returns an iterator to the `&str`-values corresponding to the key. + pub fn values(&'s self, k: K) -> impl DoubleEndedIterator + where + K: Borrow, + { + self.0.values(k) + } - const MIN: usize = 2; - const MAX: usize = 8; + /// Returns an iterator on the key-value pairs as `(&str, &str)`. + pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { + self.0.iter() + } - let mut rng = rand::thread_rng(); + /// Removes a key from the map, returning the value at the key if the key was previously in the properties. + pub fn remove(&mut self, k: K) -> Option + where + K: Borrow, + { + self.0.remove(k) + } - let num = rng.gen_range(MIN..MAX); - for i in 0..num { - if i != 0 { - into.push(LIST_SEPARATOR); - } - let len = rng.gen_range(MIN..MAX); - let key = Alphanumeric.sample_string(&mut rng, len); - into.push_str(key.as_str()); + /// Inserts a key-value pair into the map. + /// If the map did not have this key present, [`None`]` is returned. + /// If the map did have this key present, the value is updated, and the old value is returned. + pub fn insert(&mut self, k: K, v: V) -> Option + where + K: Borrow, + V: Borrow, + { + let item = self.0.insert(k, v); + self.order(); + item + } - into.push(FIELD_SEPARATOR); + /// Extend these properties with other properties. + pub fn extend(&mut self, other: &Parameters) { + self.extend_from_iter(other.iter()); + } - let len = rng.gen_range(MIN..MAX); - let value = Alphanumeric.sample_string(&mut rng, len); - into.push_str(value.as_str()); + /// Extend these properties from an iterator. + pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) + where + I: Iterator + Clone, + K: Borrow + 'e + ?Sized, + V: Borrow + 'e + ?Sized, + { + self.0.extend_from_iter(iter); + self.order(); + } + + /// Convert these properties into owned properties. + pub fn into_owned(self) -> OrderedProperties<'static> { + OrderedProperties(self.0.into_owned()) + } + + fn order(&mut self) { + if !self.0.is_ordered() { + self.0 = Parameters(Cow::Owned(ParametersView::from_iter(ParametersView::sort( + self.iter(), + )))); } } } + +impl<'s> From> for OrderedProperties<'s> { + fn from(value: Parameters<'s>) -> Self { + let mut props = Self(value); + props.order(); + props + } +} + +impl<'s> From<&'s str> for OrderedProperties<'s> { + fn from(value: &'s str) -> Self { + Self::from(Parameters::from(value)) + } +} + +impl From for OrderedProperties<'_> { + fn from(value: String) -> Self { + Self::from(Parameters::from(value)) + } +} + +impl<'s> From> for OrderedProperties<'s> { + fn from(value: Cow<'s, str>) -> Self { + Self::from(Parameters::from(value)) + } +} + +impl<'s, K, V> FromIterator<(&'s K, &'s V)> for OrderedProperties<'_> +where + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, +{ + fn from_iter>(iter: T) -> Self { + Self::from(Parameters::from_iter(iter)) + } +} + +impl<'s, K, V> FromIterator<&'s (K, V)> for OrderedProperties<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from_iter>(iter: T) -> Self { + Self::from(Parameters::from_iter(iter)) + } +} + +impl<'s, K, V> From<&'s [(K, V)]> for OrderedProperties<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from(value: &'s [(K, V)]) -> Self { + Self::from_iter(value.iter()) + } +} + +#[cfg(feature = "std")] +impl From> for OrderedProperties<'_> +where + K: Borrow, + V: Borrow, +{ + fn from(map: HashMap) -> Self { + Self::from_iter(map.iter()) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s OrderedProperties<'s>> for HashMap<&'s str, &'s str> { + fn from(props: &'s OrderedProperties<'s>) -> Self { + HashMap::from(&props.0) + } +} + +#[cfg(feature = "std")] +impl From<&OrderedProperties<'_>> for HashMap { + fn from(props: &OrderedProperties<'_>) -> Self { + HashMap::from(&props.0) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s OrderedProperties<'s>> for HashMap, Cow<'s, str>> { + fn from(props: &'s OrderedProperties<'s>) -> Self { + HashMap::from(&props.0) + } +} + +#[cfg(feature = "std")] +impl From> for HashMap { + fn from(props: OrderedProperties) -> Self { + HashMap::from(&props) + } +} + +impl fmt::Display for OrderedProperties<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl fmt::Debug for OrderedProperties<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_properties() { + assert!(Parameters::from("").0.is_empty()); + + assert_eq!(Parameters::from("p1"), Parameters::from(&[("p1", "")][..])); + + assert_eq!( + Parameters::from("p1=v1"), + Parameters::from(&[("p1", "v1")][..]) + ); + + assert_eq!( + Parameters::from("p1=v1;p2=v2;"), + Parameters::from(&[("p1", "v1"), ("p2", "v2")][..]) + ); + + assert_eq!( + Parameters::from("p1=v1;p2=v2;|="), + Parameters::from(&[("p1", "v1"), ("p2", "v2")][..]) + ); + + assert_eq!( + Parameters::from("p1=v1;p2;p3=v3"), + Parameters::from(&[("p1", "v1"), ("p2", ""), ("p3", "v3")][..]) + ); + + assert_eq!( + Parameters::from("p1=v 1;p 2=v2"), + Parameters::from(&[("p1", "v 1"), ("p 2", "v2")][..]) + ); + + assert_eq!( + Parameters::from("p1=x=y;p2=a==b"), + Parameters::from(&[("p1", "x=y"), ("p2", "a==b")][..]) + ); + + let mut hm: HashMap = HashMap::new(); + hm.insert("p1".to_string(), "v1".to_string()); + assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); + + let mut hm: HashMap<&str, &str> = HashMap::new(); + hm.insert("p1", "v1"); + assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); + + let mut hm: HashMap, Cow> = HashMap::new(); + hm.insert(Cow::from("p1"), Cow::from("v1")); + assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); + } +} diff --git a/commons/zenoh-protocol/src/core/parameters_view.rs b/commons/zenoh-protocol/src/core/parameters_view.rs new file mode 100644 index 0000000000..adcf0ea0fb --- /dev/null +++ b/commons/zenoh-protocol/src/core/parameters_view.rs @@ -0,0 +1,211 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +pub(super) const LIST_SEPARATOR: char = ';'; +pub(super) const FIELD_SEPARATOR: char = '='; +pub(super) const VALUE_SEPARATOR: char = '|'; + +use alloc::{string::String, vec::Vec}; + +fn split_once(s: &str, c: char) -> (&str, &str) { + match s.find(c) { + Some(index) => { + let (l, r) = s.split_at(index); + (l, &r[1..]) + } + None => (s, ""), + } +} + +/// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. +/// +/// `;` is the separator between the key-value `(&str, &str)` elements. +/// +/// `=` is the separator between the `&str`-key and `&str`-value +/// +/// `|` is the separator between multiple elements of the values. +pub struct ParametersView; + +impl ParametersView { + /// Returns an iterator of key-value `(&str, &str)` pairs according to the parameters format. + pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { + s.split(LIST_SEPARATOR) + .filter(|p| !p.is_empty()) + .map(|p| split_once(p, FIELD_SEPARATOR)) + } + + /// Same as [`Self::from_iter_into`] but keys are sorted in alphabetical order. + pub fn sort<'s, I>(iter: I) -> impl Iterator + where + I: Iterator, + { + let mut from = iter.collect::>(); + from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); + from.into_iter() + } + + /// Joins two key-value `(&str, &str)` iterators removing from `current` any element whose key is present in `new`. + pub fn join<'s, C, N>(current: C, new: N) -> impl Iterator + Clone + where + C: Iterator + Clone, + N: Iterator + Clone + 's, + { + let n = new.clone(); + let current = current + .clone() + .filter(move |(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); + current.chain(new) + } + + /// Builds a string from an iterator preserving the order. + #[allow(clippy::should_implement_trait)] + pub fn from_iter<'s, I>(iter: I) -> String + where + I: Iterator, + { + let mut into = String::new(); + ParametersView::from_iter_into(iter, &mut into); + into + } + + /// Same as [`Self::from_iter`] but it writes into a user-provided string instead of allocating a new one. + pub fn from_iter_into<'s, I>(iter: I, into: &mut String) + where + I: Iterator, + { + ParametersView::concat_into(iter, into); + } + + /// Get the a `&str`-value for a `&str`-key according to the parameters format. + pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { + ParametersView::iter(s) + .find(|(key, _)| *key == k) + .map(|(_, value)| value) + } + + /// Get the a `&str`-value iterator for a `&str`-key according to the parameters format. + pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { + match ParametersView::get(s, k) { + Some(v) => v.split(VALUE_SEPARATOR), + None => { + let mut i = "".split(VALUE_SEPARATOR); + i.next(); + i + } + } + } + + fn _insert<'s, I>( + i: I, + k: &'s str, + v: &'s str, + ) -> (impl Iterator, Option<&'s str>) + where + I: Iterator + Clone, + { + let mut iter = i.clone(); + let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); + + let current = i.filter(move |x| x.0 != k); + let new = Some((k, v)).into_iter(); + (current.chain(new), item) + } + + /// Insert a key-value `(&str, &str)` pair by appending it at the end of `s` preserving the insertion order. + pub fn insert<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { + let (iter, item) = ParametersView::_insert(ParametersView::iter(s), k, v); + (ParametersView::from_iter(iter), item) + } + + /// Same as [`Self::insert`] but keys are sorted in alphabetical order. + pub fn insert_sort<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { + let (iter, item) = ParametersView::_insert(ParametersView::iter(s), k, v); + (ParametersView::from_iter(ParametersView::sort(iter)), item) + } + + /// Remove a key-value `(&str, &str)` pair from `s` preserving the insertion order. + pub fn remove<'s>(s: &'s str, k: &str) -> (String, Option<&'s str>) { + let mut iter = ParametersView::iter(s); + let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); + let iter = iter.filter(|x| x.0 != k); + (ParametersView::concat(iter), item) + } + + /// Returns `true` if all keys are sorted in alphabetical order + pub fn is_ordered(s: &str) -> bool { + let mut prev = None; + for (k, _) in ParametersView::iter(s) { + match prev.take() { + Some(p) if k < p => return false, + _ => prev = Some(k), + } + } + true + } + + fn concat<'s, I>(iter: I) -> String + where + I: Iterator, + { + let mut into = String::new(); + ParametersView::concat_into(iter, &mut into); + into + } + + fn concat_into<'s, I>(iter: I, into: &mut String) + where + I: Iterator, + { + let mut first = true; + for (k, v) in iter.filter(|(k, _)| !k.is_empty()) { + if !first { + into.push(LIST_SEPARATOR); + } + into.push_str(k); + if !v.is_empty() { + into.push(FIELD_SEPARATOR); + into.push_str(v); + } + first = false; + } + } + + #[cfg(feature = "test")] + pub fn rand(into: &mut String) { + use rand::{ + distributions::{Alphanumeric, DistString}, + Rng, + }; + + const MIN: usize = 2; + const MAX: usize = 8; + + let mut rng = rand::thread_rng(); + + let num = rng.gen_range(MIN..MAX); + for i in 0..num { + if i != 0 { + into.push(LIST_SEPARATOR); + } + let len = rng.gen_range(MIN..MAX); + let key = Alphanumeric.sample_string(&mut rng, len); + into.push_str(key.as_str()); + + into.push(FIELD_SEPARATOR); + + let len = rng.gen_range(MIN..MAX); + let value = Alphanumeric.sample_string(&mut rng, len); + into.push_str(value.as_str()); + } + } +} diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs deleted file mode 100644 index 5264288448..0000000000 --- a/commons/zenoh-protocol/src/core/properties.rs +++ /dev/null @@ -1,517 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use alloc::{ - borrow::Cow, - string::{String, ToString}, -}; -use core::{borrow::Borrow, fmt}; -#[cfg(feature = "std")] -use std::collections::HashMap; - -use super::parameters::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; - -/// A map of key/value (String,String) properties. -/// It can be parsed from a String, using `;` or `` as separator between each properties -/// and `=` as separator between a key and its value. Keys and values are trimed. -/// -/// Example: -/// ``` -/// use zenoh_protocol::core::Properties; -/// -/// let a = "a=1;b=2;c=3|4|5;d=6"; -/// let p = Properties::from(a); -/// -/// // Retrieve values -/// assert!(!p.is_empty()); -/// assert_eq!(p.get("a").unwrap(), "1"); -/// assert_eq!(p.get("b").unwrap(), "2"); -/// assert_eq!(p.get("c").unwrap(), "3|4|5"); -/// assert_eq!(p.get("d").unwrap(), "6"); -/// assert_eq!(p.values("c").collect::>(), vec!["3", "4", "5"]); -/// -/// // Iterate over properties -/// let mut iter = p.iter(); -/// assert_eq!(iter.next().unwrap(), ("a", "1")); -/// assert_eq!(iter.next().unwrap(), ("b", "2")); -/// assert_eq!(iter.next().unwrap(), ("c", "3|4|5")); -/// assert_eq!(iter.next().unwrap(), ("d", "6")); -/// assert!(iter.next().is_none()); -/// -/// // Create properties from iterators -/// let pi = Properties::from_iter(vec![("a", "1"), ("b", "2"), ("c", "3|4|5"), ("d", "6")]); -/// assert_eq!(p, pi); -/// ``` -#[derive(Clone, PartialEq, Eq, Hash, Default)] -pub struct Properties<'s>(Cow<'s, str>); - -impl<'s> Properties<'s> { - /// Returns `true` if properties does not contain anything. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Returns properties as [`str`]. - pub fn as_str(&'s self) -> &'s str { - &self.0 - } - - /// Returns `true` if properties contains the specified key. - pub fn contains_key(&self, k: K) -> bool - where - K: Borrow, - { - Parameters::get(self.as_str(), k.borrow()).is_some() - } - - /// Returns a reference to the `&str`-value corresponding to the key. - pub fn get(&'s self, k: K) -> Option<&'s str> - where - K: Borrow, - { - Parameters::get(self.as_str(), k.borrow()) - } - - /// Returns an iterator to the `&str`-values corresponding to the key. - pub fn values(&'s self, k: K) -> impl DoubleEndedIterator - where - K: Borrow, - { - Parameters::values(self.as_str(), k.borrow()) - } - - /// Returns an iterator on the key-value pairs as `(&str, &str)`. - pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { - Parameters::iter(self.as_str()) - } - - /// Inserts a key-value pair into the map. - /// If the map did not have this key present, [`None`]` is returned. - /// If the map did have this key present, the value is updated, and the old value is returned. - pub fn insert(&mut self, k: K, v: V) -> Option - where - K: Borrow, - V: Borrow, - { - let (inner, item) = Parameters::insert(self.as_str(), k.borrow(), v.borrow()); - let item = item.map(|i| i.to_string()); - self.0 = Cow::Owned(inner); - item - } - - /// Removes a key from the map, returning the value at the key if the key was previously in the properties. - pub fn remove(&mut self, k: K) -> Option - where - K: Borrow, - { - let (inner, item) = Parameters::remove(self.as_str(), k.borrow()); - let item = item.map(|i| i.to_string()); - self.0 = Cow::Owned(inner); - item - } - - /// Extend these properties with other properties. - pub fn extend(&mut self, other: &Properties) { - self.extend_from_iter(other.iter()); - } - - /// Extend these properties from an iterator. - pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) - where - I: Iterator + Clone, - K: Borrow + 'e + ?Sized, - V: Borrow + 'e + ?Sized, - { - let inner = Parameters::from_iter(Parameters::join( - self.iter(), - iter.map(|(k, v)| (k.borrow(), v.borrow())), - )); - self.0 = Cow::Owned(inner); - } - - /// Convert these properties into owned properties. - pub fn into_owned(self) -> Properties<'static> { - Properties(Cow::Owned(self.0.into_owned())) - } - - /// Returns `true`` if all keys are sorted in alphabetical order. - pub fn is_ordered(&self) -> bool { - Parameters::is_ordered(self.as_str()) - } -} - -impl<'s> From<&'s str> for Properties<'s> { - fn from(mut value: &'s str) -> Self { - value = value.trim_end_matches(|c| { - c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR - }); - Self(Cow::Borrowed(value)) - } -} - -impl From for Properties<'_> { - fn from(mut value: String) -> Self { - let s = value.trim_end_matches(|c| { - c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR - }); - value.truncate(s.len()); - Self(Cow::Owned(value)) - } -} - -impl<'s> From> for Properties<'s> { - fn from(value: Cow<'s, str>) -> Self { - match value { - Cow::Borrowed(s) => Properties::from(s), - Cow::Owned(s) => Properties::from(s), - } - } -} - -impl<'s, K, V> FromIterator<(&'s K, &'s V)> for Properties<'_> -where - K: Borrow + 's + ?Sized, - V: Borrow + 's + ?Sized, -{ - fn from_iter>(iter: T) -> Self { - let iter = iter.into_iter(); - let inner = Parameters::from_iter(iter.map(|(k, v)| (k.borrow(), v.borrow()))); - Self(Cow::Owned(inner)) - } -} - -impl<'s, K, V> FromIterator<&'s (K, V)> for Properties<'_> -where - K: Borrow + 's, - V: Borrow + 's, -{ - fn from_iter>(iter: T) -> Self { - Self::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))) - } -} - -impl<'s, K, V> From<&'s [(K, V)]> for Properties<'_> -where - K: Borrow + 's, - V: Borrow + 's, -{ - fn from(value: &'s [(K, V)]) -> Self { - Self::from_iter(value.iter()) - } -} - -#[cfg(feature = "std")] -impl From> for Properties<'_> -where - K: Borrow, - V: Borrow, -{ - fn from(map: HashMap) -> Self { - Self::from_iter(map.iter()) - } -} - -#[cfg(feature = "std")] -impl<'s> From<&'s Properties<'s>> for HashMap<&'s str, &'s str> { - fn from(props: &'s Properties<'s>) -> Self { - HashMap::from_iter(props.iter()) - } -} - -#[cfg(feature = "std")] -impl From<&Properties<'_>> for HashMap { - fn from(props: &Properties<'_>) -> Self { - HashMap::from_iter(props.iter().map(|(k, v)| (k.to_string(), v.to_string()))) - } -} - -#[cfg(feature = "std")] -impl<'s> From<&'s Properties<'s>> for HashMap, Cow<'s, str>> { - fn from(props: &'s Properties<'s>) -> Self { - HashMap::from_iter(props.iter().map(|(k, v)| (Cow::from(k), Cow::from(v)))) - } -} - -#[cfg(feature = "std")] -impl From> for HashMap { - fn from(props: Properties) -> Self { - HashMap::from(&props) - } -} - -impl fmt::Display for Properties<'_> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -impl fmt::Debug for Properties<'_> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self) - } -} - -#[derive(Clone, PartialEq, Eq, Hash, Default)] -pub struct OrderedProperties<'s>(Properties<'s>); - -impl<'s> OrderedProperties<'s> { - /// Returns `true` if properties does not contain anything. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Returns properties as [`str`]. - pub fn as_str(&'s self) -> &'s str { - self.0.as_str() - } - - /// Returns `true` if properties contains the specified key. - pub fn contains_key(&self, k: K) -> bool - where - K: Borrow, - { - self.0.contains_key(k) - } - - /// Returns a reference to the `&str`-value corresponding to the key. - pub fn get(&'s self, k: K) -> Option<&'s str> - where - K: Borrow, - { - self.0.get(k) - } - - /// Returns an iterator to the `&str`-values corresponding to the key. - pub fn values(&'s self, k: K) -> impl DoubleEndedIterator - where - K: Borrow, - { - self.0.values(k) - } - - /// Returns an iterator on the key-value pairs as `(&str, &str)`. - pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { - self.0.iter() - } - - /// Removes a key from the map, returning the value at the key if the key was previously in the properties. - pub fn remove(&mut self, k: K) -> Option - where - K: Borrow, - { - self.0.remove(k) - } - - /// Inserts a key-value pair into the map. - /// If the map did not have this key present, [`None`]` is returned. - /// If the map did have this key present, the value is updated, and the old value is returned. - pub fn insert(&mut self, k: K, v: V) -> Option - where - K: Borrow, - V: Borrow, - { - let item = self.0.insert(k, v); - self.order(); - item - } - - /// Extend these properties with other properties. - pub fn extend(&mut self, other: &Properties) { - self.extend_from_iter(other.iter()); - } - - /// Extend these properties from an iterator. - pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) - where - I: Iterator + Clone, - K: Borrow + 'e + ?Sized, - V: Borrow + 'e + ?Sized, - { - self.0.extend_from_iter(iter); - self.order(); - } - - /// Convert these properties into owned properties. - pub fn into_owned(self) -> OrderedProperties<'static> { - OrderedProperties(self.0.into_owned()) - } - - fn order(&mut self) { - if !self.0.is_ordered() { - self.0 = Properties(Cow::Owned(Parameters::from_iter(Parameters::sort( - self.iter(), - )))); - } - } -} - -impl<'s> From> for OrderedProperties<'s> { - fn from(value: Properties<'s>) -> Self { - let mut props = Self(value); - props.order(); - props - } -} - -impl<'s> From<&'s str> for OrderedProperties<'s> { - fn from(value: &'s str) -> Self { - Self::from(Properties::from(value)) - } -} - -impl From for OrderedProperties<'_> { - fn from(value: String) -> Self { - Self::from(Properties::from(value)) - } -} - -impl<'s> From> for OrderedProperties<'s> { - fn from(value: Cow<'s, str>) -> Self { - Self::from(Properties::from(value)) - } -} - -impl<'s, K, V> FromIterator<(&'s K, &'s V)> for OrderedProperties<'_> -where - K: Borrow + 's + ?Sized, - V: Borrow + 's + ?Sized, -{ - fn from_iter>(iter: T) -> Self { - Self::from(Properties::from_iter(iter)) - } -} - -impl<'s, K, V> FromIterator<&'s (K, V)> for OrderedProperties<'_> -where - K: Borrow + 's, - V: Borrow + 's, -{ - fn from_iter>(iter: T) -> Self { - Self::from(Properties::from_iter(iter)) - } -} - -impl<'s, K, V> From<&'s [(K, V)]> for OrderedProperties<'_> -where - K: Borrow + 's, - V: Borrow + 's, -{ - fn from(value: &'s [(K, V)]) -> Self { - Self::from_iter(value.iter()) - } -} - -#[cfg(feature = "std")] -impl From> for OrderedProperties<'_> -where - K: Borrow, - V: Borrow, -{ - fn from(map: HashMap) -> Self { - Self::from_iter(map.iter()) - } -} - -#[cfg(feature = "std")] -impl<'s> From<&'s OrderedProperties<'s>> for HashMap<&'s str, &'s str> { - fn from(props: &'s OrderedProperties<'s>) -> Self { - HashMap::from(&props.0) - } -} - -#[cfg(feature = "std")] -impl From<&OrderedProperties<'_>> for HashMap { - fn from(props: &OrderedProperties<'_>) -> Self { - HashMap::from(&props.0) - } -} - -#[cfg(feature = "std")] -impl<'s> From<&'s OrderedProperties<'s>> for HashMap, Cow<'s, str>> { - fn from(props: &'s OrderedProperties<'s>) -> Self { - HashMap::from(&props.0) - } -} - -#[cfg(feature = "std")] -impl From> for HashMap { - fn from(props: OrderedProperties) -> Self { - HashMap::from(&props) - } -} - -impl fmt::Display for OrderedProperties<'_> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -impl fmt::Debug for OrderedProperties<'_> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_properties() { - assert!(Properties::from("").0.is_empty()); - - assert_eq!(Properties::from("p1"), Properties::from(&[("p1", "")][..])); - - assert_eq!( - Properties::from("p1=v1"), - Properties::from(&[("p1", "v1")][..]) - ); - - assert_eq!( - Properties::from("p1=v1;p2=v2;"), - Properties::from(&[("p1", "v1"), ("p2", "v2")][..]) - ); - - assert_eq!( - Properties::from("p1=v1;p2=v2;|="), - Properties::from(&[("p1", "v1"), ("p2", "v2")][..]) - ); - - assert_eq!( - Properties::from("p1=v1;p2;p3=v3"), - Properties::from(&[("p1", "v1"), ("p2", ""), ("p3", "v3")][..]) - ); - - assert_eq!( - Properties::from("p1=v 1;p 2=v2"), - Properties::from(&[("p1", "v 1"), ("p 2", "v2")][..]) - ); - - assert_eq!( - Properties::from("p1=x=y;p2=a==b"), - Properties::from(&[("p1", "x=y"), ("p2", "a==b")][..]) - ); - - let mut hm: HashMap = HashMap::new(); - hm.insert("p1".to_string(), "v1".to_string()); - assert_eq!(Properties::from(hm), Properties::from("p1=v1")); - - let mut hm: HashMap<&str, &str> = HashMap::new(); - hm.insert("p1", "v1"); - assert_eq!(Properties::from(hm), Properties::from("p1=v1")); - - let mut hm: HashMap, Cow> = HashMap::new(); - hm.insert(Cow::from("p1"), Cow::from("v1")); - assert_eq!(Properties::from(hm), Properties::from("p1=v1")); - } -} diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 942ec0e34e..8766d54b95 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -17,7 +17,7 @@ use clap::Parser; use zenoh::{ prelude::*, query::QueryTarget, - selector::Selector, + selector::KeyExpr, shm::{ zshm, BlockOn, GarbageCollect, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, POSIX_PROTOCOL_ID, @@ -113,7 +113,7 @@ enum Qt { struct Args { #[arg(short, long, default_value = "demo/example/**")] /// The selection of resources to query - selector: Selector<'static>, + selector: KeyExpr<'static>, /// The value to publish. value: Option, #[arg(short, long, default_value = "BEST_MATCHING")] @@ -128,7 +128,7 @@ struct Args { fn parse_args() -> ( Config, - Selector<'static>, + KeyExpr<'static>, Option, QueryTarget, Duration, diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 83a2dee66d..f812c78094 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -64,7 +64,7 @@ async fn main() { let query = query.unwrap(); println!(">> [Queryable ] Received Query '{}'", query.selector()); for (stored_name, sample) in stored.iter() { - if query.selector().key_expr().intersects(unsafe {keyexpr::from_str_unchecked(stored_name)}) { + if query.key_expr().intersects(unsafe {keyexpr::from_str_unchecked(stored_name)}) { query.reply(sample.key_expr().clone(), sample.payload().clone()).await.unwrap(); } } diff --git a/io/zenoh-links/zenoh-link-quic/src/utils.rs b/io/zenoh-links/zenoh-link-quic/src/utils.rs index 1eb8f94380..059734f9c9 100644 --- a/io/zenoh-links/zenoh-link-quic/src/utils.rs +++ b/io/zenoh-links/zenoh-link-quic/src/utils.rs @@ -31,7 +31,7 @@ use zenoh_config::Config as ZenohConfig; use zenoh_link_commons::ConfigurationInspector; use zenoh_protocol::core::{ endpoint::{Address, Config}, - Parameters, + ParametersView, }; use zenoh_result::{bail, zerror, ZError, ZResult}; @@ -140,7 +140,7 @@ impl ConfigurationInspector for TlsConfigurator { }; } - Ok(Parameters::from_iter(ps.drain(..))) + Ok(ParametersView::from_iter(ps.drain(..))) } } diff --git a/io/zenoh-links/zenoh-link-tls/src/utils.rs b/io/zenoh-links/zenoh-link-tls/src/utils.rs index b646c6e80d..1acaa05454 100644 --- a/io/zenoh-links/zenoh-link-tls/src/utils.rs +++ b/io/zenoh-links/zenoh-link-tls/src/utils.rs @@ -33,7 +33,7 @@ use zenoh_config::Config as ZenohConfig; use zenoh_link_commons::{tls::WebPkiVerifierAnyServerName, ConfigurationInspector}; use zenoh_protocol::core::{ endpoint::{Address, Config}, - Parameters, + ParametersView, }; use zenoh_result::{bail, zerror, ZError, ZResult}; @@ -142,7 +142,7 @@ impl ConfigurationInspector for TlsConfigurator { }; } - Ok(Parameters::from_iter(ps.drain(..))) + Ok(ParametersView::from_iter(ps.drain(..))) } } diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs index 61c891da33..6d11878409 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs @@ -24,7 +24,7 @@ pub use unicast::*; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::core::{Locator, Parameters}; +use zenoh_protocol::core::{Locator, ParametersView}; use zenoh_result::ZResult; pub const UNIXPIPE_LOCATOR_PREFIX: &str = "unixpipe"; @@ -56,7 +56,7 @@ impl ConfigurationInspector for UnixPipeConfigurator { properties.push((config::FILE_ACCESS_MASK, &file_access_mask_)); } - let s = Parameters::from_iter(properties.drain(..)); + let s = ParametersView::from_iter(properties.drain(..)); Ok(s) } diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index 3c04cf6425..9e7ff1ea35 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -22,7 +22,7 @@ use zenoh_config::{Config, LinkTxConf}; use zenoh_core::zasynclock; use zenoh_link::*; use zenoh_protocol::{ - core::{Parameters, ZenohId}, + core::{ParametersView, ZenohId}, transport::close, }; use zenoh_result::{bail, zerror, ZResult}; @@ -258,7 +258,7 @@ impl TransportManager { if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { endpoint .config_mut() - .extend_from_iter(Parameters::iter(config))?; + .extend_from_iter(ParametersView::iter(config))?; } // Open the link diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index f42002b0d3..89ecc1cb1c 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -30,7 +30,7 @@ use zenoh_core::{zasynclock, zcondfeat}; use zenoh_crypto::PseudoRng; use zenoh_link::*; use zenoh_protocol::{ - core::{Parameters, ZenohId}, + core::{ParametersView, ZenohId}, transport::{close, TransportSn}, }; use zenoh_result::{bail, zerror, ZResult}; @@ -387,7 +387,7 @@ impl TransportManager { if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { endpoint .config_mut() - .extend_from_iter(Parameters::iter(config))?; + .extend_from_iter(ParametersView::iter(config))?; }; manager.new_listener(endpoint).await } @@ -698,7 +698,7 @@ impl TransportManager { if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { endpoint .config_mut() - .extend_from_iter(Parameters::iter(config))?; + .extend_from_iter(ParametersView::iter(config))?; }; // Create a new link associated by calling the Link Manager diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index d6c58bed0b..4c55b415af 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -183,7 +183,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { let query = query.unwrap(); info!("Handling query '{}'", query.selector()); for (key_expr, sample) in stored.iter() { - if query.selector().key_expr().intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { + if query.key_expr().intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { query.reply_sample(sample.clone()).await.unwrap(); } } diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 4f0ca3f67d..072a060d1a 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -35,7 +35,7 @@ use zenoh::{ key_expr::{keyexpr, KeyExpr}, query::{QueryConsolidation, Reply}, sample::{Sample, SampleKind, ValueBuilderTrait}, - selector::{Selector, TIME_RANGE_KEY}, + selector::{Parameters, Selector, TIME_RANGE_KEY}, session::{Session, SessionDeclarations}, value::Value, }; @@ -252,16 +252,13 @@ impl PluginControl for RunningPlugin {} impl RunningPluginTrait for RunningPlugin { fn adminspace_getter<'a>( &'a self, - selector: &'a Selector<'a>, + key_expr: &'a KeyExpr<'a>, plugin_status_key: &str, ) -> ZResult> { let mut responses = Vec::new(); let mut key = String::from(plugin_status_key); with_extended_string(&mut key, &["/version"], |key| { - if keyexpr::new(key.as_str()) - .unwrap() - .intersects(selector.key_expr()) - { + if keyexpr::new(key.as_str()).unwrap().intersects(key_expr) { responses.push(zenoh::internal::plugins::Response::new( key.clone(), GIT_VERSION.into(), @@ -271,7 +268,7 @@ impl RunningPluginTrait for RunningPlugin { with_extended_string(&mut key, &["/port"], |port_key| { if keyexpr::new(port_key.as_str()) .unwrap() - .intersects(selector.key_expr()) + .intersects(key_expr) { responses.push(zenoh::internal::plugins::Response::new( port_key.clone(), @@ -385,18 +382,18 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result( &'a self, - selector: &'a Selector<'a>, + key_expr: &'a KeyExpr<'a>, plugin_status_key: &str, ) -> ZResult> { let mut responses = Vec::new(); let mut key = String::from(plugin_status_key); // TODO: to be removed when "__version__" is implemented in admoin space with_extended_string(&mut key, &["/version"], |key| { - if keyexpr::new(key.as_str()) - .unwrap() - .intersects(selector.key_expr()) - { + if keyexpr::new(key.as_str()).unwrap().intersects(key_expr) { responses.push(Response::new( key.clone(), StoragesPlugin::PLUGIN_VERSION.into(), @@ -327,17 +323,11 @@ impl RunningPluginTrait for StorageRuntime { for plugin in guard.plugins_manager.started_plugins_iter() { with_extended_string(key, &[plugin.id()], |key| { with_extended_string(key, &["/__path__"], |key| { - if keyexpr::new(key.as_str()) - .unwrap() - .intersects(selector.key_expr()) - { + if keyexpr::new(key.as_str()).unwrap().intersects(key_expr) { responses.push(Response::new(key.clone(), plugin.path().into())) } }); - if keyexpr::new(key.as_str()) - .unwrap() - .intersects(selector.key_expr()) - { + if keyexpr::new(key.as_str()).unwrap().intersects(key_expr) { responses.push(Response::new( key.clone(), plugin.instance().get_admin_status(), @@ -350,10 +340,7 @@ impl RunningPluginTrait for StorageRuntime { for storages in guard.storages.values() { for (storage, handle) in storages { with_extended_string(key, &[storage], |key| { - if keyexpr::new(key.as_str()) - .unwrap() - .intersects(selector.key_expr()) - { + if keyexpr::new(key.as_str()).unwrap().intersects(key_expr) { if let Ok(value) = task::block_on(async { let (tx, rx) = async_std::channel::bounded(1); let _ = handle.send(StorageMessage::GetStatus(tx)); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 66233d2535..30a40abe30 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -20,8 +20,8 @@ use std::{ use async_std::sync::Arc; use zenoh::{ - bytes::StringOrBase64, key_expr::OwnedKeyExpr, prelude::*, sample::Sample, selector::Selector, - time::Timestamp, value::Value, Session, + bytes::StringOrBase64, key_expr::OwnedKeyExpr, prelude::*, sample::Sample, + selector::Parameters, time::Timestamp, value::Value, Session, }; use super::{digest::*, Snapshotter}; @@ -86,7 +86,7 @@ impl AlignQueryable { } }; tracing::trace!("[ALIGN QUERYABLE] Received Query '{}'", query.selector()); - let diff_required = self.parse_selector(query.selector()); + let diff_required = self.parse_parameters(query.parameters()); tracing::trace!( "[ALIGN QUERYABLE] Parsed selector diff_required:{:?}", diff_required @@ -187,15 +187,14 @@ impl AlignQueryable { } } - fn parse_selector(&self, selector: Selector) -> Option { - let properties = selector.parameters(); // note: this is a hashmap - tracing::trace!("[ALIGN QUERYABLE] Properties are: {:?}", properties); - if properties.contains_key(super::ERA) { + fn parse_parameters(&self, parameters: &Parameters) -> Option { + tracing::trace!("[ALIGN QUERYABLE] Properties are: {:?}", parameters); + if parameters.contains_key(super::ERA) { Some(AlignComponent::Era( - EraType::from_str(properties.get(super::ERA).unwrap()).unwrap(), + EraType::from_str(parameters.get(super::ERA).unwrap()).unwrap(), )) - } else if properties.contains_key(super::INTERVALS) { - let mut intervals = properties.get(super::INTERVALS).unwrap().to_string(); + } else if parameters.contains_key(super::INTERVALS) { + let mut intervals = parameters.get(super::INTERVALS).unwrap().to_string(); intervals.remove(0); intervals.pop(); Some(AlignComponent::Intervals( @@ -204,8 +203,8 @@ impl AlignQueryable { .map(|x| x.parse::().unwrap()) .collect::>(), )) - } else if properties.contains_key(super::SUBINTERVALS) { - let mut subintervals = properties.get(super::SUBINTERVALS).unwrap().to_string(); + } else if parameters.contains_key(super::SUBINTERVALS) { + let mut subintervals = parameters.get(super::SUBINTERVALS).unwrap().to_string(); subintervals.remove(0); subintervals.pop(); Some(AlignComponent::Subintervals( @@ -214,8 +213,8 @@ impl AlignQueryable { .map(|x| x.parse::().unwrap()) .collect::>(), )) - } else if properties.contains_key(super::CONTENTS) { - let contents = serde_json::from_str(properties.get(super::CONTENTS).unwrap()).unwrap(); + } else if parameters.contains_key(super::CONTENTS) { + let contents = serde_json::from_str(parameters.get(super::CONTENTS).unwrap()).unwrap(); Some(AlignComponent::Contents(contents)) } else { None diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index eaecee5246..c20f074e1b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -323,7 +323,7 @@ impl Aligner { async fn perform_query(&self, from: &str, properties: String) -> (Vec, bool) { let mut no_err = true; - let selector = Selector::new( + let selector = Selector::owned( KeyExpr::from(&self.digest_key).join(&from).unwrap(), properties, ); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 4087fb3682..ed7f533147 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -647,7 +647,7 @@ impl StorageService { // with `_time=[..]` to get historical data (in case of time-series) let replies = match self .session - .get(Selector::new(&self.key_expr, "_time=[..]")) + .get(Selector::owned(&self.key_expr, "_time=[..]")) .target(QueryTarget::All) .consolidation(ConsolidationMode::None) .await diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index dc01367666..6f7548d97c 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -26,6 +26,7 @@ use zenoh::{ query::Query, queryable::Queryable, sample::{Locality, Sample}, + selector::PredefinedParameters, session::{SessionDeclarations, SessionRef}, subscriber::FlumeSubscriber, }; @@ -212,8 +213,8 @@ impl<'a> PublicationCache<'a> { // on query, reply with cache content query = quer_recv.recv_async() => { if let Ok(query) = query { - if !query.selector().key_expr().as_str().contains('*') { - if let Some(queue) = cache.get(query.selector().key_expr().as_keyexpr()) { + if !query.key_expr().as_str().contains('*') { + if let Some(queue) = cache.get(query.key_expr().as_keyexpr()) { for sample in queue { if let (Ok(Some(time_range)), Some(timestamp)) = (query.parameters().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ @@ -227,7 +228,7 @@ impl<'a> PublicationCache<'a> { } } else { for (key_expr, queue) in cache.iter() { - if query.selector().key_expr().intersects(unsafe{ keyexpr::from_str_unchecked(key_expr) }) { + if query.key_expr().intersects(unsafe{ keyexpr::from_str_unchecked(key_expr) }) { for sample in queue { if let (Ok(Some(time_range)), Some(timestamp)) = (query.parameters().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 54f3ff0224..2adf4d43ae 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -28,7 +28,6 @@ use zenoh::{ prelude::Wait, query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}, sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}, - selector::Selector, session::{SessionDeclarations, SessionRef}, subscriber::{Reliability, Subscriber}, time::{new_timestamp, Timestamp}, @@ -44,7 +43,7 @@ pub struct QueryingSubscriberBuilder<'a, 'b, KeySpace, Handler> { pub(crate) key_space: KeySpace, pub(crate) reliability: Reliability, pub(crate) origin: Locality, - pub(crate) query_selector: Option>>, + pub(crate) query_selector: Option>>, pub(crate) query_target: QueryTarget, pub(crate) query_consolidation: QueryConsolidation, pub(crate) query_accept_replies: ReplyKeyExpr, @@ -179,8 +178,8 @@ impl<'a, 'b, Handler> QueryingSubscriberBuilder<'a, 'b, crate::UserSpace, Handle #[inline] pub fn query_selector(mut self, query_selector: IntoSelector) -> Self where - IntoSelector: TryInto>, - >>::Error: Into, + IntoSelector: TryInto>, + >>::Error: Into, { self.query_selector = Some(query_selector.try_into().map_err(Into::into)); self diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index c4bcf6ae5e..920be0bbaa 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -26,7 +26,7 @@ use zenoh_buffers::{ ZBuf, ZBufReader, ZBufWriter, ZSlice, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; +use zenoh_protocol::{core::Parameters, zenoh::ext::AttachmentType}; use zenoh_result::{ZError, ZResult}; #[cfg(feature = "shared-memory")] use zenoh_shm::{ @@ -1145,70 +1145,70 @@ impl TryFrom<&mut ZBytes> for bool { // - Zenoh advanced types encoders/decoders // Properties -impl Serialize> for ZSerde { +impl Serialize> for ZSerde { type Output = ZBytes; - fn serialize(self, t: Properties<'_>) -> Self::Output { + fn serialize(self, t: Parameters<'_>) -> Self::Output { Self.serialize(t.as_str()) } } -impl From> for ZBytes { - fn from(t: Properties<'_>) -> Self { +impl From> for ZBytes { + fn from(t: Parameters<'_>) -> Self { ZSerde.serialize(t) } } -impl Serialize<&Properties<'_>> for ZSerde { +impl Serialize<&Parameters<'_>> for ZSerde { type Output = ZBytes; - fn serialize(self, t: &Properties<'_>) -> Self::Output { + fn serialize(self, t: &Parameters<'_>) -> Self::Output { Self.serialize(t.as_str()) } } -impl<'s> From<&'s Properties<'s>> for ZBytes { - fn from(t: &'s Properties<'s>) -> Self { +impl<'s> From<&'s Parameters<'s>> for ZBytes { + fn from(t: &'s Parameters<'s>) -> Self { ZSerde.serialize(t) } } -impl Serialize<&mut Properties<'_>> for ZSerde { +impl Serialize<&mut Parameters<'_>> for ZSerde { type Output = ZBytes; - fn serialize(self, t: &mut Properties<'_>) -> Self::Output { + fn serialize(self, t: &mut Parameters<'_>) -> Self::Output { Self.serialize(t.as_str()) } } -impl<'s> From<&'s mut Properties<'s>> for ZBytes { - fn from(t: &'s mut Properties<'s>) -> Self { +impl<'s> From<&'s mut Parameters<'s>> for ZBytes { + fn from(t: &'s mut Parameters<'s>) -> Self { ZSerde.serialize(&*t) } } -impl<'s> Deserialize<'s, Properties<'s>> for ZSerde { +impl<'s> Deserialize<'s, Parameters<'s>> for ZSerde { type Input = &'s ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { let s = v .deserialize::>() .map_err(|_| ZDeserializeError)?; - Ok(Properties::from(s)) + Ok(Parameters::from(s)) } } -impl TryFrom for Properties<'static> { +impl TryFrom for Parameters<'static> { type Error = ZDeserializeError; fn try_from(v: ZBytes) -> Result { let s = v.deserialize::>().map_err(|_| ZDeserializeError)?; - Ok(Properties::from(s.into_owned())) + Ok(Parameters::from(s.into_owned())) } } -impl<'s> TryFrom<&'s ZBytes> for Properties<'s> { +impl<'s> TryFrom<&'s ZBytes> for Parameters<'s> { type Error = ZDeserializeError; fn try_from(value: &'s ZBytes) -> Result { @@ -1216,7 +1216,7 @@ impl<'s> TryFrom<&'s ZBytes> for Properties<'s> { } } -impl<'s> TryFrom<&'s mut ZBytes> for Properties<'s> { +impl<'s> TryFrom<&'s mut ZBytes> for Parameters<'s> { type Error = ZDeserializeError; fn try_from(value: &'s mut ZBytes) -> Result { @@ -1877,7 +1877,7 @@ mod tests { use zenoh_buffers::{ZBuf, ZSlice}; #[cfg(feature = "shared-memory")] use zenoh_core::Wait; - use zenoh_protocol::core::Properties; + use zenoh_protocol::core::Parameters; #[cfg(feature = "shared-memory")] use zenoh_shm::api::{ buffer::zshm::{zshm, ZShm}, @@ -2018,9 +2018,9 @@ mod tests { serialize_deserialize!(&zshm, immutable_shm_buf); } - // Properties - serialize_deserialize!(Properties, Properties::from("")); - serialize_deserialize!(Properties, Properties::from("a=1;b=2;c3")); + // Parameters + serialize_deserialize!(Parameters, Parameters::from("")); + serialize_deserialize!(Parameters, Parameters::from("a=1;b=2;c3")); // Tuple serialize_deserialize!((usize, usize), (0, 1)); diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 8215fe5278..18b3e2ca0c 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -358,6 +358,7 @@ impl<'a> From> for String { } } } + impl<'a> TryFrom for KeyExpr<'a> { type Error = zenoh_result::Error; fn try_from(value: String) -> Result { diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 5011b99a7e..f100a7469c 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -22,7 +22,10 @@ use std::{ use zenoh_config::unwrap_or_default; use zenoh_core::{Resolvable, Resolve, Result as ZResult, Wait}; use zenoh_keyexpr::keyexpr; -use zenoh_protocol::network::{declare::subscriber::ext::SubscriberInfo, request}; +use zenoh_protocol::{ + core::Parameters, + network::{declare::subscriber::ext::SubscriberInfo, request}, +}; use super::{ handlers::{locked, DefaultHandler, IntoHandler}, @@ -743,6 +746,7 @@ where self.session .query( &self.key_expr?.into(), + &Parameters::default(), &Some(KeyExpr::from(*KE_PREFIX_LIVELINESS)), QueryTarget::DEFAULT, QueryConsolidation::DEFAULT, diff --git a/zenoh/src/api/plugins.rs b/zenoh/src/api/plugins.rs index b7f1954a6b..63519eac2b 100644 --- a/zenoh/src/api/plugins.rs +++ b/zenoh/src/api/plugins.rs @@ -21,7 +21,7 @@ use zenoh_plugin_trait::{ use zenoh_protocol::core::key_expr::keyexpr; use zenoh_result::ZResult; -use super::selector::Selector; +use super::key_expr::KeyExpr; use crate::net::runtime::Runtime; zconfigurable! { @@ -93,9 +93,9 @@ pub trait RunningPluginTrait: Send + Sync + PluginControl { /// Function called on any query on admin space that matches this plugin's sub-part of the admin space. /// Thus the plugin can reply its contribution to the global admin space of this zenohd. /// Parameters: - /// * `selector`: the full selector of the query (usually only key_expr part is used). This selector is + /// * `key_expr`: the key_expr selector of the query. This key_expr is /// exactly the same as it was requested by user, for example "@/router/ROUTER_ID/plugins/PLUGIN_NAME/some/plugin/info" or "@/router/*/plugins/*/foo/bar". - /// But the plugin's [RunningPluginTrait::adminspace_getter] is called only if the selector matches the `plugin_status_key` + /// But the plugin's [RunningPluginTrait::adminspace_getter] is called only if the key_expr matches the `plugin_status_key` /// * `plugin_status_key`: the actual path to plugin's status in the admin space. For example "@/router/ROUTER_ID/plugins/PLUGIN_NAME" /// Returns value: /// * `Ok(Vec)`: the list of responses to the query. For example if plugins can return information on subleys "foo", "bar", "foo/buzz" and "bar/buzz" @@ -113,7 +113,7 @@ pub trait RunningPluginTrait: Send + Sync + PluginControl { /// fn adminspace_getter<'a>( &'a self, - _selector: &'a Selector<'a>, + _key_expr: &'a KeyExpr<'a>, _plugin_status_key: &str, ) -> ZResult> { Ok(Vec::new()) diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 562069566b..ba925876c9 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -18,9 +18,11 @@ use std::{ time::Duration, }; +#[zenoh_macros::unstable] +use std::borrow::Cow; use zenoh_core::{Resolvable, Wait}; use zenoh_keyexpr::OwnedKeyExpr; -use zenoh_protocol::core::{CongestionControl, ZenohId}; +use zenoh_protocol::core::{CongestionControl, Parameters, ZenohId}; use zenoh_result::ZResult; #[zenoh_macros::unstable] @@ -117,13 +119,20 @@ impl From for Result { pub(crate) struct QueryState { pub(crate) nb_final: usize, - pub(crate) selector: Selector<'static>, + pub(crate) key_expr: KeyExpr<'static>, + pub(crate) parameters: Parameters<'static>, pub(crate) scope: Option>, pub(crate) reception_mode: ConsolidationMode, pub(crate) replies: Option>, pub(crate) callback: Callback<'static, Reply>, } +impl QueryState { + pub(crate) fn selector(&self) -> Selector { + Selector::borrowed(&self.key_expr, &self.parameters) + } +} + /// A builder for initializing a `query`. /// /// # Examples @@ -407,12 +416,27 @@ impl<'a, 'b, Handler> SessionGetBuilder<'a, 'b, Handler> { #[zenoh_macros::unstable] pub fn accept_replies(self, accept: ReplyKeyExpr) -> Self { Self { - selector: self.selector.map(|mut s| { - if accept == ReplyKeyExpr::Any { - s.parameters_mut().insert(_REPLY_KEY_EXPR_ANY_SEL_PARAM, ""); - } - s - }), + selector: self.selector.map( + |Selector { + key_expr, + parameters, + }| { + if accept == ReplyKeyExpr::Any { + let mut parameters = parameters.into_owned(); + parameters.insert(_REPLY_KEY_EXPR_ANY_SEL_PARAM, ""); + let parameters = Cow::Owned(parameters); + Selector { + key_expr, + parameters, + } + } else { + Selector { + key_expr, + parameters, + } + } + }, + ), ..self } } @@ -445,10 +469,14 @@ where { fn wait(self) -> ::To { let (callback, receiver) = self.handler.into_handler(); - + let Selector { + key_expr, + parameters, + } = self.selector?; self.session .query( - &self.selector?, + &key_expr, + ¶meters, &self.scope?, self.target, self.consolidation, diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index bb41a37c2f..f113dfc11e 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -21,7 +21,7 @@ use std::{ use uhlc::Timestamp; use zenoh_core::{Resolvable, Resolve, Wait}; use zenoh_protocol::{ - core::{CongestionControl, EntityId, WireExpr, ZenohId}, + core::{CongestionControl, EntityId, Parameters, WireExpr, ZenohId}, network::{response, Mapping, RequestId, Response, ResponseFinal}, zenoh::{self, reply::ReplyBody, Del, Put, ResponseBody}, }; @@ -43,7 +43,7 @@ use super::{ key_expr::KeyExpr, publisher::Priority, sample::{Locality, QoSBuilder, Sample, SampleKind}, - selector::{Parameters, Selector}, + selector::Selector, session::{SessionRef, Undeclarable}, value::Value, Id, @@ -81,10 +81,7 @@ impl Query { /// The full [`Selector`] of this Query. #[inline(always)] pub fn selector(&self) -> Selector<'_> { - Selector { - key_expr: self.inner.key_expr.clone(), - parameters: self.inner.parameters.clone(), - } + Selector::borrowed(&self.inner.key_expr, &self.inner.parameters) } /// The key selector part of this Query. diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 7477ea65e9..6f6eeaf8ff 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -13,16 +13,11 @@ // //! [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries -use std::{ - collections::HashMap, - convert::TryFrom, - ops::{Deref, DerefMut}, - str::FromStr, -}; +use std::{borrow::Cow, convert::TryFrom, str::FromStr}; use zenoh_protocol::core::{ key_expr::{keyexpr, OwnedKeyExpr}, - Properties, + Parameters, }; #[cfg(feature = "unstable")] use zenoh_result::ZResult; @@ -60,7 +55,7 @@ use super::{key_expr::KeyExpr, queryable::Query}; /// queryables. /// /// Here are the currently standardized parameters for Zenoh (check the specification page for the exhaustive list): -/// - `_time`: used to express interest in only values dated within a certain time range, values for +/// - **`[unstable]`** `_time`: used to express interest in only values dated within a certain time range, values for /// this parameter must be readable by the [Zenoh Time DSL](zenoh_util::time_range::TimeRange) for the value to be considered valid. /// - **`[unstable]`** `_anyke`: used in queries to express interest in replies coming from any key expression. By default, only replies /// whose key expression match query's key expression are accepted. `_anyke` disables the query-reply key expression matching check. @@ -68,146 +63,76 @@ use super::{key_expr::KeyExpr, queryable::Query}; #[derive(Clone, PartialEq, Eq)] pub struct Selector<'a> { /// The part of this selector identifying which keys should be part of the selection. - pub(crate) key_expr: KeyExpr<'a>, + pub key_expr: Cow<'a, KeyExpr<'a>>, /// the part of this selector identifying which values should be part of the selection. - pub(crate) parameters: Parameters<'a>, + pub parameters: Cow<'a, Parameters<'a>>, } #[zenoh_macros::unstable] pub const TIME_RANGE_KEY: &str = "_time"; + impl<'a> Selector<'a> { - /// Builds a new selector - pub fn new(key_expr: K, parameters: P) -> Self + /// Builds a new selector which owns keyexpr and parameters + pub fn owned(key_expr: K, parameters: P) -> Self where K: Into>, P: Into>, { Self { - key_expr: key_expr.into(), - parameters: parameters.into(), + key_expr: Cow::Owned(key_expr.into()), + parameters: Cow::Owned(parameters.into()), } } - - /// Gets the key-expression. - pub fn key_expr(&'a self) -> &KeyExpr<'a> { - &self.key_expr - } - - /// Gets a reference to selector's [`Parameters`]. - pub fn parameters(&self) -> &Parameters<'a> { - &self.parameters - } - - /// Gets a mutable reference to selector's [`Parameters`]. - pub fn parameters_mut(&mut self) -> &mut Parameters<'a> { - &mut self.parameters - } - - /// Sets the parameters of this selector. This operation completly overwrites existing [`Parameters`]. - #[inline(always)] - pub fn set_parameters

(&mut self, parameters: P) - where - P: Into>, - { - self.parameters = parameters.into(); - } - - /// Create an owned version of this selector with `'static` lifetime. - pub fn into_owned(self) -> Selector<'static> { - Selector { - key_expr: self.key_expr.into_owned(), - parameters: self.parameters.into_owned(), + /// Build a new selector holding references to keyexpr and parameters + /// Useful for printing pair of keyexpr and parameters in url-like format + pub fn borrowed(key_expr: &'a KeyExpr<'a>, parameters: &'a Parameters<'a>) -> Self { + Self { + key_expr: Cow::Borrowed(key_expr), + parameters: Cow::Borrowed(parameters), } } - - /// Returns this selectors components as a tuple. - pub fn split(self) -> (KeyExpr<'a>, Parameters<'a>) { - (self.key_expr, self.parameters) - } -} - -/// A wrapper type to help decode zenoh selector parameters. -/// -/// Most methods will return an Error if duplicates of a same parameter are found, to avoid HTTP Parameter Pollution like vulnerabilities. -#[repr(transparent)] -#[derive(Clone, PartialEq, Eq)] -pub struct Parameters<'a>(Properties<'a>); - -impl<'a> Deref for Parameters<'a> { - type Target = Properties<'a>; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl<'a> DerefMut for Parameters<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl std::fmt::Display for Parameters<'_> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - -impl std::fmt::Debug for Parameters<'_> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self) - } -} - -impl<'a, T> From for Parameters<'a> -where - T: Into>, -{ - fn from(value: T) -> Self { - Parameters(value.into()) - } } -impl<'s> From<&'s Parameters<'s>> for HashMap<&'s str, &'s str> { - fn from(props: &'s Parameters<'s>) -> Self { - HashMap::from(&props.0) +impl<'a> From> for (KeyExpr<'a>, Parameters<'a>) { + fn from(selector: Selector<'a>) -> Self { + ( + selector.key_expr.into_owned(), + selector.parameters.into_owned(), + ) } } -impl From<&Parameters<'_>> for HashMap { - fn from(props: &Parameters) -> Self { - HashMap::from(&props.0) +impl<'a> From<&'a Selector<'a>> for (&'a KeyExpr<'a>, &'a Parameters<'a>) { + fn from(selector: &'a Selector<'a>) -> Self { + (selector.key_expr.as_ref(), selector.parameters.as_ref()) } } -impl From> for HashMap { - fn from(props: Parameters) -> Self { - HashMap::from(props.0) - } +#[zenoh_macros::unstable] +pub trait PredefinedParameters { + const TIME_RANGE_KEY: &'static str = "_time"; + /// Sets the time range targeted by the selector parameters. + fn set_time_range>>(&mut self, time_range: T); + /// Extracts the standardized `_time` argument from the selector parameters. + fn time_range(&self) -> ZResult>; } -impl Parameters<'_> { - /// Create an owned version of these parameters with `'static` lifetime. - pub fn into_owned(self) -> Parameters<'static> { - Parameters(self.0.into_owned()) - } - - #[zenoh_macros::unstable] - /// Sets the time range targeted by the selector. - pub fn set_time_range>>(&mut self, time_range: T) { +#[zenoh_macros::unstable] +impl PredefinedParameters for Parameters<'_> { + /// Sets the time range targeted by the selector parameters. + fn set_time_range>>(&mut self, time_range: T) { let mut time_range: Option = time_range.into(); match time_range.take() { - Some(tr) => self.0.insert(TIME_RANGE_KEY, format!("{}", tr)), - None => self.0.remove(TIME_RANGE_KEY), + Some(tr) => self.insert(TIME_RANGE_KEY, format!("{}", tr)), + None => self.remove(TIME_RANGE_KEY), }; } - #[zenoh_macros::unstable] /// Extracts the standardized `_time` argument from the selector parameters. /// /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. - pub fn time_range(&self) -> ZResult> { - match self.0.get(TIME_RANGE_KEY) { + fn time_range(&self) -> ZResult> { + match self.get(TIME_RANGE_KEY) { Some(tr) => Ok(Some(tr.parse()?)), None => Ok(None), } @@ -243,7 +168,7 @@ impl TryFrom for Selector<'_> { Some(qmark_position) => { let parameters = s[qmark_position + 1..].to_owned(); s.truncate(qmark_position); - Ok(Selector::new(KeyExpr::try_from(s)?, parameters)) + Ok(Selector::owned(KeyExpr::try_from(s)?, parameters)) } None => Ok(KeyExpr::try_from(s)?.into()), } @@ -256,7 +181,7 @@ impl<'a> TryFrom<&'a str> for Selector<'a> { match s.find('?') { Some(qmark_position) => { let params = &s[qmark_position + 1..]; - Ok(Selector::new( + Ok(Selector::owned( KeyExpr::try_from(&s[..qmark_position])?, params, )) @@ -281,18 +206,18 @@ impl<'a> TryFrom<&'a String> for Selector<'a> { impl<'a> From<&'a Query> for Selector<'a> { fn from(q: &'a Query) -> Self { - Selector { - key_expr: q.inner.key_expr.clone(), - parameters: q.inner.parameters.clone(), + Self { + key_expr: Cow::Borrowed(&q.inner.key_expr), + parameters: Cow::Borrowed(&q.inner.parameters), } } } -impl<'a> From<&KeyExpr<'a>> for Selector<'a> { - fn from(key_selector: &KeyExpr<'a>) -> Self { +impl<'a> From<&'a KeyExpr<'a>> for Selector<'a> { + fn from(key_selector: &'a KeyExpr<'a>) -> Self { Self { - key_expr: key_selector.clone(), - parameters: "".into(), + key_expr: Cow::Borrowed(key_selector), + parameters: Cow::Owned("".into()), } } } @@ -300,8 +225,8 @@ impl<'a> From<&KeyExpr<'a>> for Selector<'a> { impl<'a> From<&'a keyexpr> for Selector<'a> { fn from(key_selector: &'a keyexpr) -> Self { Self { - key_expr: key_selector.into(), - parameters: "".into(), + key_expr: Cow::Owned(key_selector.into()), + parameters: Cow::Owned("".into()), } } } @@ -309,8 +234,8 @@ impl<'a> From<&'a keyexpr> for Selector<'a> { impl<'a> From<&'a OwnedKeyExpr> for Selector<'a> { fn from(key_selector: &'a OwnedKeyExpr) -> Self { Self { - key_expr: key_selector.into(), - parameters: "".into(), + key_expr: Cow::Owned(key_selector.into()), + parameters: Cow::Owned("".into()), } } } @@ -318,8 +243,8 @@ impl<'a> From<&'a OwnedKeyExpr> for Selector<'a> { impl From for Selector<'static> { fn from(key_selector: OwnedKeyExpr) -> Self { Self { - key_expr: key_selector.into(), - parameters: "".into(), + key_expr: Cow::Owned(key_selector.into()), + parameters: Cow::Owned("".into()), } } } @@ -327,8 +252,8 @@ impl From for Selector<'static> { impl<'a> From> for Selector<'a> { fn from(key_selector: KeyExpr<'a>) -> Self { Self { - key_expr: key_selector, - parameters: "".into(), + key_expr: Cow::Owned(key_selector), + parameters: Cow::Owned("".into()), } } } @@ -336,63 +261,64 @@ impl<'a> From> for Selector<'a> { #[test] fn selector_accessors() { use crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; + use std::collections::HashMap; - for selector in [ + for s in [ "hello/there?_timetrick", "hello/there?_timetrick;_time", "hello/there?_timetrick;_time;_filter", "hello/there?_timetrick;_time=[..]", "hello/there?_timetrick;_time=[..];_filter", ] { - let mut selector = Selector::try_from(selector).unwrap(); - println!("Parameters start: {}", selector.parameters()); - for i in selector.parameters().iter() { + let Selector { + key_expr, + parameters, + } = s.try_into().unwrap(); + assert_eq!(key_expr.as_str(), "hello/there"); + let mut parameters = parameters.into_owned(); + + println!("Parameters start: {}", parameters); + for i in parameters.iter() { println!("\t{:?}", i); } - assert_eq!(selector.parameters().get("_timetrick").unwrap(), ""); + assert_eq!(parameters.get("_timetrick").unwrap(), ""); let time_range = "[now(-2s)..now(2s)]"; zcondfeat!( "unstable", { let time_range = time_range.parse().unwrap(); - selector.parameters_mut().set_time_range(time_range); - assert_eq!( - selector.parameters().time_range().unwrap().unwrap(), - time_range - ); + parameters.set_time_range(time_range); + assert_eq!(parameters.time_range().unwrap().unwrap(), time_range); }, { - selector.parameters_mut().insert(TIME_RANGE_KEY, time_range); + parameters.insert(TIME_RANGE_KEY, time_range); } ); - assert_eq!( - selector.parameters().get(TIME_RANGE_KEY).unwrap(), - time_range - ); + assert_eq!(parameters.get(TIME_RANGE_KEY).unwrap(), time_range); - let hm: HashMap<&str, &str> = HashMap::from(selector.parameters()); + let hm: HashMap<&str, &str> = HashMap::from(¶meters); assert!(hm.contains_key(TIME_RANGE_KEY)); - selector.parameters_mut().insert("_filter", ""); - assert_eq!(selector.parameters().get("_filter").unwrap(), ""); + parameters.insert("_filter", ""); + assert_eq!(parameters.get("_filter").unwrap(), ""); - let hm: HashMap = HashMap::from(selector.parameters()); + let hm: HashMap = HashMap::from(¶meters); assert!(hm.contains_key(TIME_RANGE_KEY)); - selector.parameters_mut().extend_from_iter(hm.iter()); - assert_eq!(selector.parameters().get("_filter").unwrap(), ""); + parameters.extend_from_iter(hm.iter()); + assert_eq!(parameters.get("_filter").unwrap(), ""); - selector.parameters_mut().insert(ANYKE, ""); + parameters.insert(ANYKE, ""); - println!("Parameters end: {}", selector.parameters()); - for i in selector.parameters().iter() { + println!("Parameters end: {}", parameters); + for i in parameters.iter() { println!("\t{:?}", i); } assert_eq!( - HashMap::::from(selector.parameters()), + HashMap::::from(¶meters), HashMap::::from(Parameters::from( "_anyke;_filter;_time=[now(-2s)..now(2s)];_timetrick" )) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 187ec27be7..ee30e808a7 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -35,7 +35,8 @@ use zenoh_protocol::network::{declare::SubscriberId, ext}; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, - AtomicExprId, CongestionControl, EntityId, ExprId, Reliability, WireExpr, EMPTY_EXPR_ID, + AtomicExprId, CongestionControl, EntityId, ExprId, Parameters, Reliability, WireExpr, + EMPTY_EXPR_ID, }, network::{ self, @@ -1656,7 +1657,8 @@ impl Session { #[allow(clippy::too_many_arguments)] pub(crate) fn query( &self, - selector: &Selector<'_>, + key_expr: &KeyExpr<'_>, + parameters: &Parameters<'_>, scope: &Option>, target: QueryTarget, consolidation: QueryConsolidation, @@ -1668,11 +1670,16 @@ impl Session { #[cfg(feature = "unstable")] source: SourceInfo, callback: Callback<'static, Reply>, ) -> ZResult<()> { - tracing::trace!("get({}, {:?}, {:?})", selector, target, consolidation); + tracing::trace!( + "get({}, {:?}, {:?})", + Selector::borrowed(key_expr, parameters), + target, + consolidation + ); let mut state = zwrite!(self.state); let consolidation = match consolidation.mode { ConsolidationMode::Auto => { - if selector.parameters().contains_key(TIME_RANGE_KEY) { + if parameters.contains_key(TIME_RANGE_KEY) { ConsolidationMode::None } else { ConsolidationMode::Latest @@ -1714,21 +1721,19 @@ impl Session { } }); - let selector = match scope { - Some(scope) => Selector { - key_expr: scope / &*selector.key_expr, - parameters: selector.parameters.clone(), - }, - None => selector.clone(), + let key_expr = match scope { + Some(scope) => scope / key_expr, + None => key_expr.clone().into_owned(), }; tracing::trace!("Register query {} (nb_final = {})", qid, nb_final); - let wexpr = selector.key_expr.to_wire(self).to_owned(); + let wexpr = key_expr.to_wire(self).to_owned(); state.queries.insert( qid, QueryState { nb_final, - selector: selector.clone().into_owned(), + key_expr, + parameters: parameters.clone().into_owned(), scope: scope.clone().map(|e| e.into_owned()), reception_mode: consolidation, replies: (consolidation != ConsolidationMode::None).then(HashMap::new), @@ -1759,7 +1764,7 @@ impl Session { ext_timeout: Some(timeout), payload: RequestBody::Query(zenoh_protocol::zenoh::Query { consolidation, - parameters: selector.parameters().to_string(), + parameters: parameters.to_string(), #[cfg(feature = "unstable")] ext_sinfo: source.into(), #[cfg(not(feature = "unstable"))] @@ -1779,7 +1784,7 @@ impl Session { self.handle_query( true, &wexpr, - selector.parameters().as_str(), + parameters.as_str(), qid, target, consolidation, @@ -2247,18 +2252,15 @@ impl Primitives for Session { Some(query) => { let c = zcondfeat!( "unstable", - !query - .selector - .parameters() - .contains_key(_REPLY_KEY_EXPR_ANY_SEL_PARAM), + !query.parameters.contains_key(_REPLY_KEY_EXPR_ANY_SEL_PARAM), true ); - if c && !query.selector.key_expr.intersects(&key_expr) { + if c && !query.key_expr.intersects(&key_expr) { tracing::warn!( "Received Reply for `{}` from `{:?}, which didn't match query `{}`: dropping Reply.", key_expr, msg.ext_respid, - query.selector + query.selector() ); return; } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 0c47070609..3bf3e9f9f7 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -247,13 +247,15 @@ pub mod bytes { /// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries pub mod selector { - pub use zenoh_protocol::core::Properties; + #[zenoh_macros::unstable] + pub use crate::api::selector::PredefinedParameters; + pub use zenoh_protocol::core::Parameters; #[zenoh_macros::unstable] pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; + pub use crate::api::selector::Selector; #[zenoh_macros::unstable] pub use crate::api::selector::TIME_RANGE_KEY; - pub use crate::api::selector::{Parameters, Selector}; } /// Subscribing primitives diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 6b8ac52240..c2505b1f6d 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -787,7 +787,7 @@ fn plugins_data(context: &AdminContext, query: Query) { fn plugins_status(context: &AdminContext, query: Query) { use crate::bytes::{Serialize, ZSerde}; - let selector = query.selector(); + let key_expr = query.key_expr(); let guard = context.runtime.plugins_manager(); let mut root_key = format!( "@/{}/{}/status/plugins/", @@ -820,7 +820,7 @@ fn plugins_status(context: &AdminContext, query: Query) { return; } match std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { - plugin.instance().adminspace_getter(&selector, plugin_key) + plugin.instance().adminspace_getter(&key_expr, plugin_key) })) { Ok(Ok(responses)) => { for response in responses { From a65686af4fd5341c7b9ac895b4ae76058db04b58 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 9 Jun 2024 19:43:40 +0200 Subject: [PATCH 389/598] from (Keyexp, Parameters) added --- .../src/replica/storage.rs | 2 +- zenoh/src/api/selector.rs | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index ed7f533147..1cdced1c0b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -647,7 +647,7 @@ impl StorageService { // with `_time=[..]` to get historical data (in case of time-series) let replies = match self .session - .get(Selector::owned(&self.key_expr, "_time=[..]")) + .get((&self.key_expr, "_time=[..]")) .target(QueryTarget::All) .consolidation(ConsolidationMode::None) .await diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 6f6eeaf8ff..6e2e0e7890 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -93,6 +93,16 @@ impl<'a> Selector<'a> { } } +impl<'a, K, P> From<(K, P)> for Selector<'a> +where + K: Into>, + P: Into>, +{ + fn from((key_expr, parameters): (K, P)) -> Self { + Self::owned(key_expr, parameters) + } +} + impl<'a> From> for (KeyExpr<'a>, Parameters<'a>) { fn from(selector: Selector<'a>) -> Self { ( From 4989f4f8c34797e045d5b1ca2223da2a3e9b42ea Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 9 Jun 2024 20:36:49 +0200 Subject: [PATCH 390/598] attachment made stable --- zenoh/src/api/admin.rs | 13 ++----- zenoh/src/api/builders/publisher.rs | 6 ---- zenoh/src/api/builders/sample.rs | 4 --- zenoh/src/api/liveliness.rs | 1 - zenoh/src/api/queryable.rs | 1 - zenoh/src/api/session.rs | 55 +++++------------------------ zenoh/src/net/runtime/adminspace.rs | 1 - 7 files changed, 11 insertions(+), 70 deletions(-) diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index 6e7605e95b..4c4d2a869e 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -167,7 +167,6 @@ impl TransportMulticastEventHandler for Handler { &expr, Some(info), serde_json::to_vec(&peer).unwrap().into(), - #[cfg(feature = "unstable")] None, ); Ok(Arc::new(PeerHandler { @@ -216,7 +215,6 @@ impl TransportPeerEventHandler for PeerHandler { .with_suffix(&format!("/link/{}", s.finish())), Some(info), serde_json::to_vec(&link).unwrap().into(), - #[cfg(feature = "unstable")] None, ); } @@ -236,7 +234,6 @@ impl TransportPeerEventHandler for PeerHandler { .with_suffix(&format!("/link/{}", s.finish())), Some(info), vec![0u8; 0].into(), - #[cfg(feature = "unstable")] None, ); } @@ -248,14 +245,8 @@ impl TransportPeerEventHandler for PeerHandler { kind: SampleKind::Delete, ..Default::default() }; - self.session.handle_data( - true, - &self.expr, - Some(info), - vec![0u8; 0].into(), - #[cfg(feature = "unstable")] - None, - ); + self.session + .handle_data(true, &self.expr, Some(info), vec![0u8; 0].into(), None); } fn as_any(&self) -> &dyn std::any::Any { diff --git a/zenoh/src/api/builders/publisher.rs b/zenoh/src/api/builders/publisher.rs index 950cd946b3..48465ab807 100644 --- a/zenoh/src/api/builders/publisher.rs +++ b/zenoh/src/api/builders/publisher.rs @@ -78,7 +78,6 @@ pub struct PublicationBuilder { pub(crate) timestamp: Option, #[cfg(feature = "unstable")] pub(crate) source_info: SourceInfo, - #[cfg(feature = "unstable")] pub(crate) attachment: Option, } @@ -157,7 +156,6 @@ impl SampleBuilderTrait for PublicationBuilder { ..self } } - #[cfg(feature = "unstable")] fn attachment>(self, attachment: TA) -> Self { let attachment: OptionZBytes = attachment.into(); Self { @@ -191,7 +189,6 @@ impl Wait for PublicationBuilder, PublicationBuilderPut self.timestamp, #[cfg(feature = "unstable")] self.source_info, - #[cfg(feature = "unstable")] self.attachment, ) } @@ -208,7 +205,6 @@ impl Wait for PublicationBuilder, PublicationBuilderDel self.timestamp, #[cfg(feature = "unstable")] self.source_info, - #[cfg(feature = "unstable")] self.attachment, ) } @@ -393,7 +389,6 @@ impl Wait for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { self.timestamp, #[cfg(feature = "unstable")] self.source_info, - #[cfg(feature = "unstable")] self.attachment, ) } @@ -408,7 +403,6 @@ impl Wait for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { self.timestamp, #[cfg(feature = "unstable")] self.source_info, - #[cfg(feature = "unstable")] self.attachment, ) } diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index e80253a074..8b3eb1d19c 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -95,7 +95,6 @@ impl SampleBuilder { qos: QoS::default(), #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] attachment: None, }, _t: PhantomData::, @@ -118,7 +117,6 @@ impl SampleBuilder { qos: QoS::default(), #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] attachment: None, }, _t: PhantomData::, @@ -162,7 +160,6 @@ impl TimestampBuilderTrait for SampleBuilder { } } -#[cfg(feature = "unstable")] impl SampleBuilderTrait for SampleBuilder { #[zenoh_macros::unstable] fn source_info(self, source_info: SourceInfo) -> Self { @@ -175,7 +172,6 @@ impl SampleBuilderTrait for SampleBuilder { } } - #[zenoh_macros::unstable] fn attachment>(self, attachment: U) -> Self { let attachment: OptionZBytes = attachment.into(); Self { diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 5011b99a7e..af1925032d 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -750,7 +750,6 @@ where Locality::default(), self.timeout, None, - #[cfg(feature = "unstable")] None, SourceInfo::empty(), callback, diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index bb41a37c2f..588df0e331 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -325,7 +325,6 @@ impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { } } -#[cfg(feature = "unstable")] impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { fn attachment>(self, attachment: U) -> Self { let attachment: OptionZBytes = attachment.into(); diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 187ec27be7..c2f4bd5591 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -1634,21 +1634,13 @@ impl Session { drop(state); let zenoh_collections::single_or_vec::IntoIter { drain, last } = callbacks.into_iter(); for (cb, key_expr) in drain { - let sample = info.clone().into_sample( - key_expr, - payload.clone(), - #[cfg(feature = "unstable")] - attachment.clone(), - ); + let sample = info + .clone() + .into_sample(key_expr, payload.clone(), attachment.clone()); cb(sample); } if let Some((cb, key_expr)) = last { - let sample = info.into_sample( - key_expr, - payload, - #[cfg(feature = "unstable")] - attachment.clone(), - ); + let sample = info.into_sample(key_expr, payload, attachment.clone()); cb(sample); } } @@ -1664,7 +1656,7 @@ impl Session { destination: Locality, timeout: Duration, value: Option, - #[cfg(feature = "unstable")] attachment: Option, + attachment: Option, #[cfg(feature = "unstable")] source: SourceInfo, callback: Callback<'static, Reply>, ) -> ZResult<()> { @@ -1740,14 +1732,7 @@ impl Session { drop(state); if destination != Locality::SessionLocal { - #[allow(unused_mut)] - let mut ext_attachment = None; - #[cfg(feature = "unstable")] - { - if let Some(attachment) = attachment.clone() { - ext_attachment = Some(attachment.into()); - } - } + let ext_attachment = attachment.clone().map(Into::into); primitives.send_request(Request { id: qid, wire_expr: wexpr.clone(), @@ -1789,7 +1774,6 @@ impl Session { encoding: v.encoding.clone().into(), payload: v.payload.clone().into(), }), - #[cfg(feature = "unstable")] attachment, ); } @@ -1806,7 +1790,7 @@ impl Session { _target: TargetType, _consolidation: Consolidation, body: Option, - #[cfg(feature = "unstable")] attachment: Option, + attachment: Option, ) { let (primitives, key_expr, queryables) = { let state = zread!(self.state); @@ -1869,7 +1853,6 @@ impl Session { payload: b.payload.clone().into(), encoding: b.encoding.clone().into(), }), - #[cfg(feature = "unstable")] attachment: attachment.clone(), }); } @@ -2079,14 +2062,7 @@ impl Primitives for Session { .starts_with(crate::api::liveliness::PREFIX_LIVELINESS) { drop(state); - self.handle_data( - false, - &m.wire_expr, - None, - ZBuf::default(), - #[cfg(feature = "unstable")] - None, - ); + self.handle_data(false, &m.wire_expr, None, ZBuf::default(), None); } } Err(err) => { @@ -2120,7 +2096,6 @@ impl Primitives for Session { &expr.to_wire(self), Some(data_info), ZBuf::default(), - #[cfg(feature = "unstable")] None, ); } @@ -2164,7 +2139,6 @@ impl Primitives for Session { &msg.wire_expr, Some(info), m.payload, - #[cfg(feature = "unstable")] m.ext_attachment.map(Into::into), ) } @@ -2182,7 +2156,6 @@ impl Primitives for Session { &msg.wire_expr, Some(info), ZBuf::empty(), - #[cfg(feature = "unstable")] m.ext_attachment.map(Into::into), ) } @@ -2200,7 +2173,6 @@ impl Primitives for Session { msg.ext_target, m.consolidation, m.ext_body, - #[cfg(feature = "unstable")] m.ext_attachment.map(Into::into), ), } @@ -2292,13 +2264,11 @@ impl Primitives for Session { struct Ret { payload: ZBuf, info: DataInfo, - #[cfg(feature = "unstable")] attachment: Option, } let Ret { payload, info, - #[cfg(feature = "unstable")] attachment, } = match m.payload { ReplyBody::Put(Put { @@ -2318,7 +2288,6 @@ impl Primitives for Session { source_id: ext_sinfo.as_ref().map(|i| i.id), source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), }, - #[cfg(feature = "unstable")] attachment: _attachment.map(Into::into), }, ReplyBody::Del(Del { @@ -2336,16 +2305,10 @@ impl Primitives for Session { source_id: ext_sinfo.as_ref().map(|i| i.id), source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), }, - #[cfg(feature = "unstable")] attachment: _attachment.map(Into::into), }, }; - let sample = info.into_sample( - key_expr.into_owned(), - payload, - #[cfg(feature = "unstable")] - attachment, - ); + let sample = info.into_sample(key_expr.into_owned(), payload, attachment); let new_reply = Reply { result: Ok(sample), replier_id: zenoh_protocol::core::ZenohId::rand(), // TODO diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 6b8ac52240..da2ab9b628 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -473,7 +473,6 @@ impl Primitives for AdminSpace { }), eid: self.queryable_id, value: query.ext_body.map(|b| Value::new(b.payload, b.encoding)), - #[cfg(feature = "unstable")] attachment: query.ext_attachment.map(Into::into), }; From 75fbda2adb94db0247887e18d9012ed41cb0c1f3 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 9 Jun 2024 20:42:14 +0200 Subject: [PATCH 391/598] one more unstable remove --- zenoh/src/api/builders/sample.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 8b3eb1d19c..d5cd95851c 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -49,7 +49,6 @@ pub trait SampleBuilderTrait { #[zenoh_macros::unstable] fn source_info(self, source_info: SourceInfo) -> Self; /// Attach user-provided data in key-value format - #[zenoh_macros::unstable] fn attachment>(self, attachment: T) -> Self; } From 6b541fcf67fd5b7e9b7f697035606a72722b3e0b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 9 Jun 2024 20:49:26 +0200 Subject: [PATCH 392/598] use moved out of unstable --- zenoh/src/api/builders/publisher.rs | 1 - zenoh/src/api/builders/sample.rs | 3 ++- zenoh/src/api/queryable.rs | 7 +++---- zenoh/src/api/session.rs | 3 ++- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/zenoh/src/api/builders/publisher.rs b/zenoh/src/api/builders/publisher.rs index 48465ab807..6c3e8b5ade 100644 --- a/zenoh/src/api/builders/publisher.rs +++ b/zenoh/src/api/builders/publisher.rs @@ -16,7 +16,6 @@ use std::future::{IntoFuture, Ready}; use zenoh_core::{Resolvable, Result as ZResult, Wait}; use zenoh_protocol::{core::CongestionControl, network::Mapping}; -#[cfg(feature = "unstable")] use crate::api::bytes::OptionZBytes; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index d5cd95851c..c74e429318 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -17,6 +17,7 @@ use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; +use crate::api::bytes::OptionZBytes; use crate::api::{ bytes::ZBytes, encoding::Encoding, @@ -26,7 +27,7 @@ use crate::api::{ value::Value, }; #[cfg(feature = "unstable")] -use crate::{api::bytes::OptionZBytes, sample::SourceInfo}; +use crate::sample::SourceInfo; pub trait QoSBuilderTrait { /// Change the `congestion_control` to apply when routing the data. diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 588df0e331..c881ed6ffa 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -18,6 +18,8 @@ use std::{ sync::Arc, }; +use super::builders::sample::SampleBuilderTrait; +use super::bytes::OptionZBytes; use uhlc::Timestamp; use zenoh_core::{Resolvable, Resolve, Wait}; use zenoh_protocol::{ @@ -28,10 +30,7 @@ use zenoh_protocol::{ use zenoh_result::ZResult; #[zenoh_macros::unstable] use { - super::{ - builders::sample::SampleBuilderTrait, bytes::OptionZBytes, query::ReplyKeyExpr, - sample::SourceInfo, - }, + super::{query::ReplyKeyExpr, sample::SourceInfo}, zenoh_protocol::core::EntityGlobalId, }; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index c2f4bd5591..3aeb10bbeb 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -76,7 +76,7 @@ use super::{ }, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, - selector::{Selector, TIME_RANGE_KEY}, + selector::Selector, subscriber::{SubscriberBuilder, SubscriberState}, value::Value, Id, @@ -88,6 +88,7 @@ use super::{ publisher::{MatchingListenerState, MatchingStatus}, query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, sample::SourceInfo, + selector::TIME_RANGE_KEY, }; use crate::net::{ primitives::Primitives, From 9bf86e881ae70498d11a664551c33f92df10c287 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 9 Jun 2024 20:51:16 +0200 Subject: [PATCH 393/598] cargo fmt --- zenoh/src/api/builders/publisher.rs | 3 +-- zenoh/src/api/builders/sample.rs | 3 +-- zenoh/src/api/queryable.rs | 9 +++++---- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/zenoh/src/api/builders/publisher.rs b/zenoh/src/api/builders/publisher.rs index 6c3e8b5ade..9ebf25cba6 100644 --- a/zenoh/src/api/builders/publisher.rs +++ b/zenoh/src/api/builders/publisher.rs @@ -16,14 +16,13 @@ use std::future::{IntoFuture, Ready}; use zenoh_core::{Resolvable, Result as ZResult, Wait}; use zenoh_protocol::{core::CongestionControl, network::Mapping}; -use crate::api::bytes::OptionZBytes; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; use crate::api::{ builders::sample::{ QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }, - bytes::ZBytes, + bytes::{OptionZBytes, ZBytes}, encoding::Encoding, key_expr::KeyExpr, publisher::{Priority, Publisher}, diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index c74e429318..5537cf4326 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -17,9 +17,8 @@ use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; -use crate::api::bytes::OptionZBytes; use crate::api::{ - bytes::ZBytes, + bytes::{OptionZBytes, ZBytes}, encoding::Encoding, key_expr::KeyExpr, publisher::Priority, diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index c881ed6ffa..f6cc939e5b 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -18,8 +18,6 @@ use std::{ sync::Arc, }; -use super::builders::sample::SampleBuilderTrait; -use super::bytes::OptionZBytes; use uhlc::Timestamp; use zenoh_core::{Resolvable, Resolve, Wait}; use zenoh_protocol::{ @@ -35,8 +33,11 @@ use { }; use super::{ - builders::sample::{QoSBuilderTrait, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}, - bytes::ZBytes, + builders::sample::{ + QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, + ValueBuilderTrait, + }, + bytes::{OptionZBytes, ZBytes}, encoding::Encoding, handlers::{locked, DefaultHandler, IntoHandler}, key_expr::KeyExpr, From cf1c65392f11625107dde4b0ca6f9d4ad8b83950 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 10 Jun 2024 10:33:11 +0200 Subject: [PATCH 394/598] feat: remove `alive` flag in session/undeclarable objects This flag was just used to prevent double close/undeclaration, i.e. object being drop after `close`/`undeclare`. Using `ManuallyDrop` instead in `close`/`undeclare` solve this issue, and save some space in structs (often one word counting the padding, which is not negligible). --- zenoh/src/api/liveliness.rs | 17 +++++++---------- zenoh/src/api/publisher.rs | 22 ++++++++++------------ zenoh/src/api/queryable.rs | 16 +++++++--------- zenoh/src/api/session.rs | 22 ++++++++++------------ zenoh/src/api/subscriber.rs | 16 +++++++--------- 5 files changed, 41 insertions(+), 52 deletions(-) diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 5011b99a7e..5d33898f6f 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -15,6 +15,7 @@ use std::{ convert::TryInto, future::{IntoFuture, Ready}, + mem::ManuallyDrop, sync::Arc, time::Duration, }; @@ -235,7 +236,6 @@ impl Wait for LivelinessTokenBuilder<'_, '_> { .map(|tok_state| LivelinessToken { session, state: tok_state, - alive: true, }) } } @@ -291,7 +291,6 @@ pub(crate) struct LivelinessTokenState { pub struct LivelinessToken<'a> { pub(crate) session: SessionRef<'a>, pub(crate) state: Arc, - pub(crate) alive: bool, } /// A [`Resolvable`] returned when undeclaring a [`LivelinessToken`](LivelinessToken). @@ -315,7 +314,7 @@ pub struct LivelinessToken<'a> { #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[zenoh_macros::unstable] pub struct LivelinessTokenUndeclaration<'a> { - token: LivelinessToken<'a>, + token: ManuallyDrop>, } #[zenoh_macros::unstable] @@ -325,8 +324,7 @@ impl Resolvable for LivelinessTokenUndeclaration<'_> { #[zenoh_macros::unstable] impl Wait for LivelinessTokenUndeclaration<'_> { - fn wait(mut self) -> ::To { - self.token.alive = false; + fn wait(self) -> ::To { self.token.session.undeclare_liveliness(self.token.state.id) } } @@ -374,16 +372,16 @@ impl<'a> LivelinessToken<'a> { #[zenoh_macros::unstable] impl<'a> Undeclarable<(), LivelinessTokenUndeclaration<'a>> for LivelinessToken<'a> { fn undeclare_inner(self, _: ()) -> LivelinessTokenUndeclaration<'a> { - LivelinessTokenUndeclaration { token: self } + LivelinessTokenUndeclaration { + token: ManuallyDrop::new(self), + } } } #[zenoh_macros::unstable] impl Drop for LivelinessToken<'_> { fn drop(&mut self) { - if self.alive { - let _ = self.session.undeclare_liveliness(self.state.id); - } + let _ = self.session.undeclare_liveliness(self.state.id); } } @@ -553,7 +551,6 @@ where subscriber: SubscriberInner { session, state: sub_state, - alive: true, }, handler, }) diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index c4cff83848..9b3306da4d 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -16,6 +16,7 @@ use std::{ convert::TryFrom, fmt, future::{IntoFuture, Ready}, + mem::ManuallyDrop, pin::Pin, task::{Context, Poll}, }; @@ -894,7 +895,6 @@ where listener: MatchingListenerInner { publisher: self.publisher, state: listener_state, - alive: true, }, receiver, }) @@ -939,7 +939,6 @@ impl std::fmt::Debug for MatchingListenerState { pub(crate) struct MatchingListenerInner<'a> { pub(crate) publisher: PublisherRef<'a>, pub(crate) state: std::sync::Arc, - pub(crate) alive: bool, } #[zenoh_macros::unstable] @@ -953,7 +952,9 @@ impl<'a> MatchingListenerInner<'a> { #[zenoh_macros::unstable] impl<'a> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListenerInner<'a> { fn undeclare_inner(self, _: ()) -> MatchingListenerUndeclaration<'a> { - MatchingListenerUndeclaration { subscriber: self } + MatchingListenerUndeclaration { + subscriber: ManuallyDrop::new(self), + } } } @@ -1033,7 +1034,7 @@ impl std::ops::DerefMut for MatchingListener<'_, Receiver> { #[zenoh_macros::unstable] pub struct MatchingListenerUndeclaration<'a> { - subscriber: MatchingListenerInner<'a>, + subscriber: ManuallyDrop>, } #[zenoh_macros::unstable] @@ -1043,8 +1044,7 @@ impl Resolvable for MatchingListenerUndeclaration<'_> { #[zenoh_macros::unstable] impl Wait for MatchingListenerUndeclaration<'_> { - fn wait(mut self) -> ::To { - self.subscriber.alive = false; + fn wait(self) -> ::To { self.subscriber .publisher .session @@ -1065,12 +1065,10 @@ impl IntoFuture for MatchingListenerUndeclaration<'_> { #[zenoh_macros::unstable] impl Drop for MatchingListenerInner<'_> { fn drop(&mut self) { - if self.alive { - let _ = self - .publisher - .session - .undeclare_matches_listener_inner(self.state.id); - } + let _ = self + .publisher + .session + .undeclare_matches_listener_inner(self.state.id); } } diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index bb41a37c2f..e3b9f30ba8 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -14,6 +14,7 @@ use std::{ fmt, future::{IntoFuture, Ready}, + mem::ManuallyDrop, ops::{Deref, DerefMut}, sync::Arc, }; @@ -611,12 +612,13 @@ impl fmt::Debug for QueryableState { pub(crate) struct CallbackQueryable<'a> { pub(crate) session: SessionRef<'a>, pub(crate) state: Arc, - pub(crate) alive: bool, } impl<'a> Undeclarable<(), QueryableUndeclaration<'a>> for CallbackQueryable<'a> { fn undeclare_inner(self, _: ()) -> QueryableUndeclaration<'a> { - QueryableUndeclaration { queryable: self } + QueryableUndeclaration { + queryable: ManuallyDrop::new(self), + } } } @@ -635,7 +637,7 @@ impl<'a> Undeclarable<(), QueryableUndeclaration<'a>> for CallbackQueryable<'a> /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct QueryableUndeclaration<'a> { - queryable: CallbackQueryable<'a>, + queryable: ManuallyDrop>, } impl Resolvable for QueryableUndeclaration<'_> { @@ -643,8 +645,7 @@ impl Resolvable for QueryableUndeclaration<'_> { } impl Wait for QueryableUndeclaration<'_> { - fn wait(mut self) -> ::To { - self.queryable.alive = false; + fn wait(self) -> ::To { self.queryable .session .close_queryable(self.queryable.state.id) @@ -662,9 +663,7 @@ impl<'a> IntoFuture for QueryableUndeclaration<'a> { impl Drop for CallbackQueryable<'_> { fn drop(&mut self) { - if self.alive { - let _ = self.session.close_queryable(self.state.id); - } + let _ = self.session.close_queryable(self.state.id); } } @@ -944,7 +943,6 @@ where queryable: CallbackQueryable { session, state: qable_state, - alive: true, }, handler: receiver, }) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 187ec27be7..eb51817dfb 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -16,6 +16,7 @@ use std::{ convert::{TryFrom, TryInto}, fmt, future::{IntoFuture, Ready}, + mem::ManuallyDrop, ops::Deref, sync::{ atomic::{AtomicU16, Ordering}, @@ -404,7 +405,6 @@ pub struct Session { pub(crate) runtime: Runtime, pub(crate) state: Arc>, pub(crate) id: u16, - pub(crate) alive: bool, owns_runtime: bool, task_controller: TaskController, } @@ -426,7 +426,6 @@ impl Session { runtime: runtime.clone(), state: state.clone(), id: SESSION_ID_COUNTER.fetch_add(1, Ordering::SeqCst), - alive: true, owns_runtime: false, task_controller: TaskController::default(), }; @@ -530,20 +529,22 @@ impl Session { /// session.close().await.unwrap(); /// # } /// ``` - pub fn close(mut self) -> impl Resolve> { + pub fn close(self) -> impl Resolve> { + let session = ManuallyDrop::new(self); ResolveFuture::new(async move { trace!("close()"); - self.task_controller.terminate_all(Duration::from_secs(10)); - if self.owns_runtime { - self.runtime.close().await?; + session + .task_controller + .terminate_all(Duration::from_secs(10)); + if session.owns_runtime { + session.runtime.close().await?; } - let mut state = zwrite!(self.state); + let mut state = zwrite!(session.state); // clean up to break cyclic references from self.state to itself let primitives = state.primitives.take(); state.queryables.clear(); drop(state); primitives.as_ref().unwrap().send_close(); - self.alive = false; Ok(()) }) } @@ -830,7 +831,6 @@ impl Session { runtime: self.runtime.clone(), state: self.state.clone(), id: self.id, - alive: false, owns_runtime: self.owns_runtime, task_controller: self.task_controller.clone(), } @@ -2472,9 +2472,7 @@ impl Primitives for Session { impl Drop for Session { fn drop(&mut self) { - if self.alive { - let _ = self.clone().close().wait(); - } + let _ = self.clone().close().wait(); } } diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index e298e3c9c9..83acd4c219 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -15,6 +15,7 @@ use std::{ fmt, future::{IntoFuture, Ready}, + mem::ManuallyDrop, ops::{Deref, DerefMut}, sync::Arc, }; @@ -78,7 +79,6 @@ impl fmt::Debug for SubscriberState { pub(crate) struct SubscriberInner<'a> { pub(crate) session: SessionRef<'a>, pub(crate) state: Arc, - pub(crate) alive: bool, } impl<'a> SubscriberInner<'a> { @@ -111,7 +111,9 @@ impl<'a> SubscriberInner<'a> { impl<'a> Undeclarable<(), SubscriberUndeclaration<'a>> for SubscriberInner<'a> { fn undeclare_inner(self, _: ()) -> SubscriberUndeclaration<'a> { - SubscriberUndeclaration { subscriber: self } + SubscriberUndeclaration { + subscriber: ManuallyDrop::new(self), + } } } @@ -133,7 +135,7 @@ impl<'a> Undeclarable<(), SubscriberUndeclaration<'a>> for SubscriberInner<'a> { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct SubscriberUndeclaration<'a> { - subscriber: SubscriberInner<'a>, + subscriber: ManuallyDrop>, } impl Resolvable for SubscriberUndeclaration<'_> { @@ -141,8 +143,7 @@ impl Resolvable for SubscriberUndeclaration<'_> { } impl Wait for SubscriberUndeclaration<'_> { - fn wait(mut self) -> ::To { - self.subscriber.alive = false; + fn wait(self) -> ::To { self.subscriber .session .undeclare_subscriber_inner(self.subscriber.state.id) @@ -160,9 +161,7 @@ impl IntoFuture for SubscriberUndeclaration<'_> { impl Drop for SubscriberInner<'_> { fn drop(&mut self) { - if self.alive { - let _ = self.session.undeclare_subscriber_inner(self.state.id); - } + let _ = self.session.undeclare_subscriber_inner(self.state.id); } } @@ -387,7 +386,6 @@ where subscriber: SubscriberInner { session, state: sub_state, - alive: true, }, handler: receiver, }) From b0ba472b5c94923cc4dfbc03db4675b4cc00107c Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 10 Jun 2024 11:30:23 +0200 Subject: [PATCH 395/598] fix: add missing `Publisher` refactoring --- zenoh/src/api/publisher.rs | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index 9b3306da4d..cfce42d6ed 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -443,7 +443,9 @@ impl PublisherDeclarations for std::sync::Arc> { impl<'a> Undeclarable<(), PublisherUndeclaration<'a>> for Publisher<'a> { fn undeclare_inner(self, _: ()) -> PublisherUndeclaration<'a> { - PublisherUndeclaration { publisher: self } + PublisherUndeclaration { + publisher: ManuallyDrop::new(self), + } } } @@ -462,7 +464,7 @@ impl<'a> Undeclarable<(), PublisherUndeclaration<'a>> for Publisher<'a> { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct PublisherUndeclaration<'a> { - publisher: Publisher<'a>, + publisher: ManuallyDrop>, } impl Resolvable for PublisherUndeclaration<'_> { @@ -470,13 +472,10 @@ impl Resolvable for PublisherUndeclaration<'_> { } impl Wait for PublisherUndeclaration<'_> { - fn wait(mut self) -> ::To { - let Publisher { - session, id: eid, .. - } = &self.publisher; - session.undeclare_publisher_inner(*eid)?; - self.publisher.key_expr = unsafe { keyexpr::from_str_unchecked("") }.into(); - Ok(()) + fn wait(self) -> ::To { + self.publisher + .session + .undeclare_publisher_inner(self.publisher.id) } } @@ -491,9 +490,7 @@ impl IntoFuture for PublisherUndeclaration<'_> { impl Drop for Publisher<'_> { fn drop(&mut self) { - if !self.key_expr.is_empty() { - let _ = self.session.undeclare_publisher_inner(self.id); - } + let _ = self.session.undeclare_publisher_inner(self.id); } } From 00264bfe3bcd1dd9163d69382447409055dcfed3 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 10 Jun 2024 12:18:00 +0200 Subject: [PATCH 396/598] feat: put unrecognized encoding string in schema of "zenoh/bytes" (#1102) --- zenoh/src/api/encoding.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index fcc5ae119a..d23429ee0e 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -638,10 +638,13 @@ impl From<&str> for Encoding { } // Everything before `;` may be mapped to a known id - let (id, schema) = t.split_once(Encoding::SCHEMA_SEP).unwrap_or((t, "")); + let (id, mut schema) = t.split_once(Encoding::SCHEMA_SEP).unwrap_or((t, "")); if let Some(id) = Encoding::STR_TO_ID.get(id).copied() { inner.id = id; - }; + // if id is not recognized, e.g. `t == "my_encoding"`, put it in the schema + } else { + schema = t; + } if !schema.is_empty() { inner.schema = Some(ZSlice::from(schema.to_string().into_bytes())); } From 2aa42e4befea8085f9cd3406a52a7dbe3a261e86 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 10 Jun 2024 12:20:42 +0200 Subject: [PATCH 397/598] Attachment made stable (#1099) * attachment made stable * one more unstable remove * use moved out of unstable * cargo fmt --- zenoh/src/api/admin.rs | 13 +------ zenoh/src/api/builders/publisher.rs | 10 +---- zenoh/src/api/builders/sample.rs | 9 +---- zenoh/src/api/liveliness.rs | 1 - zenoh/src/api/queryable.rs | 13 +++---- zenoh/src/api/session.rs | 58 ++++++----------------------- zenoh/src/net/runtime/adminspace.rs | 1 - 7 files changed, 22 insertions(+), 83 deletions(-) diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index 6e7605e95b..4c4d2a869e 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -167,7 +167,6 @@ impl TransportMulticastEventHandler for Handler { &expr, Some(info), serde_json::to_vec(&peer).unwrap().into(), - #[cfg(feature = "unstable")] None, ); Ok(Arc::new(PeerHandler { @@ -216,7 +215,6 @@ impl TransportPeerEventHandler for PeerHandler { .with_suffix(&format!("/link/{}", s.finish())), Some(info), serde_json::to_vec(&link).unwrap().into(), - #[cfg(feature = "unstable")] None, ); } @@ -236,7 +234,6 @@ impl TransportPeerEventHandler for PeerHandler { .with_suffix(&format!("/link/{}", s.finish())), Some(info), vec![0u8; 0].into(), - #[cfg(feature = "unstable")] None, ); } @@ -248,14 +245,8 @@ impl TransportPeerEventHandler for PeerHandler { kind: SampleKind::Delete, ..Default::default() }; - self.session.handle_data( - true, - &self.expr, - Some(info), - vec![0u8; 0].into(), - #[cfg(feature = "unstable")] - None, - ); + self.session + .handle_data(true, &self.expr, Some(info), vec![0u8; 0].into(), None); } fn as_any(&self) -> &dyn std::any::Any { diff --git a/zenoh/src/api/builders/publisher.rs b/zenoh/src/api/builders/publisher.rs index 950cd946b3..9ebf25cba6 100644 --- a/zenoh/src/api/builders/publisher.rs +++ b/zenoh/src/api/builders/publisher.rs @@ -16,15 +16,13 @@ use std::future::{IntoFuture, Ready}; use zenoh_core::{Resolvable, Result as ZResult, Wait}; use zenoh_protocol::{core::CongestionControl, network::Mapping}; -#[cfg(feature = "unstable")] -use crate::api::bytes::OptionZBytes; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; use crate::api::{ builders::sample::{ QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }, - bytes::ZBytes, + bytes::{OptionZBytes, ZBytes}, encoding::Encoding, key_expr::KeyExpr, publisher::{Priority, Publisher}, @@ -78,7 +76,6 @@ pub struct PublicationBuilder { pub(crate) timestamp: Option, #[cfg(feature = "unstable")] pub(crate) source_info: SourceInfo, - #[cfg(feature = "unstable")] pub(crate) attachment: Option, } @@ -157,7 +154,6 @@ impl SampleBuilderTrait for PublicationBuilder { ..self } } - #[cfg(feature = "unstable")] fn attachment>(self, attachment: TA) -> Self { let attachment: OptionZBytes = attachment.into(); Self { @@ -191,7 +187,6 @@ impl Wait for PublicationBuilder, PublicationBuilderPut self.timestamp, #[cfg(feature = "unstable")] self.source_info, - #[cfg(feature = "unstable")] self.attachment, ) } @@ -208,7 +203,6 @@ impl Wait for PublicationBuilder, PublicationBuilderDel self.timestamp, #[cfg(feature = "unstable")] self.source_info, - #[cfg(feature = "unstable")] self.attachment, ) } @@ -393,7 +387,6 @@ impl Wait for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { self.timestamp, #[cfg(feature = "unstable")] self.source_info, - #[cfg(feature = "unstable")] self.attachment, ) } @@ -408,7 +401,6 @@ impl Wait for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { self.timestamp, #[cfg(feature = "unstable")] self.source_info, - #[cfg(feature = "unstable")] self.attachment, ) } diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index e80253a074..5537cf4326 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -18,7 +18,7 @@ use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; use crate::api::{ - bytes::ZBytes, + bytes::{OptionZBytes, ZBytes}, encoding::Encoding, key_expr::KeyExpr, publisher::Priority, @@ -26,7 +26,7 @@ use crate::api::{ value::Value, }; #[cfg(feature = "unstable")] -use crate::{api::bytes::OptionZBytes, sample::SourceInfo}; +use crate::sample::SourceInfo; pub trait QoSBuilderTrait { /// Change the `congestion_control` to apply when routing the data. @@ -49,7 +49,6 @@ pub trait SampleBuilderTrait { #[zenoh_macros::unstable] fn source_info(self, source_info: SourceInfo) -> Self; /// Attach user-provided data in key-value format - #[zenoh_macros::unstable] fn attachment>(self, attachment: T) -> Self; } @@ -95,7 +94,6 @@ impl SampleBuilder { qos: QoS::default(), #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] attachment: None, }, _t: PhantomData::, @@ -118,7 +116,6 @@ impl SampleBuilder { qos: QoS::default(), #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] attachment: None, }, _t: PhantomData::, @@ -162,7 +159,6 @@ impl TimestampBuilderTrait for SampleBuilder { } } -#[cfg(feature = "unstable")] impl SampleBuilderTrait for SampleBuilder { #[zenoh_macros::unstable] fn source_info(self, source_info: SourceInfo) -> Self { @@ -175,7 +171,6 @@ impl SampleBuilderTrait for SampleBuilder { } } - #[zenoh_macros::unstable] fn attachment>(self, attachment: U) -> Self { let attachment: OptionZBytes = attachment.into(); Self { diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 5011b99a7e..af1925032d 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -750,7 +750,6 @@ where Locality::default(), self.timeout, None, - #[cfg(feature = "unstable")] None, SourceInfo::empty(), callback, diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index bb41a37c2f..f6cc939e5b 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -28,16 +28,16 @@ use zenoh_protocol::{ use zenoh_result::ZResult; #[zenoh_macros::unstable] use { - super::{ - builders::sample::SampleBuilderTrait, bytes::OptionZBytes, query::ReplyKeyExpr, - sample::SourceInfo, - }, + super::{query::ReplyKeyExpr, sample::SourceInfo}, zenoh_protocol::core::EntityGlobalId, }; use super::{ - builders::sample::{QoSBuilderTrait, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}, - bytes::ZBytes, + builders::sample::{ + QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, + ValueBuilderTrait, + }, + bytes::{OptionZBytes, ZBytes}, encoding::Encoding, handlers::{locked, DefaultHandler, IntoHandler}, key_expr::KeyExpr, @@ -325,7 +325,6 @@ impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { } } -#[cfg(feature = "unstable")] impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { fn attachment>(self, attachment: U) -> Self { let attachment: OptionZBytes = attachment.into(); diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 187ec27be7..3aeb10bbeb 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -76,7 +76,7 @@ use super::{ }, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, - selector::{Selector, TIME_RANGE_KEY}, + selector::Selector, subscriber::{SubscriberBuilder, SubscriberState}, value::Value, Id, @@ -88,6 +88,7 @@ use super::{ publisher::{MatchingListenerState, MatchingStatus}, query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, sample::SourceInfo, + selector::TIME_RANGE_KEY, }; use crate::net::{ primitives::Primitives, @@ -1634,21 +1635,13 @@ impl Session { drop(state); let zenoh_collections::single_or_vec::IntoIter { drain, last } = callbacks.into_iter(); for (cb, key_expr) in drain { - let sample = info.clone().into_sample( - key_expr, - payload.clone(), - #[cfg(feature = "unstable")] - attachment.clone(), - ); + let sample = info + .clone() + .into_sample(key_expr, payload.clone(), attachment.clone()); cb(sample); } if let Some((cb, key_expr)) = last { - let sample = info.into_sample( - key_expr, - payload, - #[cfg(feature = "unstable")] - attachment.clone(), - ); + let sample = info.into_sample(key_expr, payload, attachment.clone()); cb(sample); } } @@ -1664,7 +1657,7 @@ impl Session { destination: Locality, timeout: Duration, value: Option, - #[cfg(feature = "unstable")] attachment: Option, + attachment: Option, #[cfg(feature = "unstable")] source: SourceInfo, callback: Callback<'static, Reply>, ) -> ZResult<()> { @@ -1740,14 +1733,7 @@ impl Session { drop(state); if destination != Locality::SessionLocal { - #[allow(unused_mut)] - let mut ext_attachment = None; - #[cfg(feature = "unstable")] - { - if let Some(attachment) = attachment.clone() { - ext_attachment = Some(attachment.into()); - } - } + let ext_attachment = attachment.clone().map(Into::into); primitives.send_request(Request { id: qid, wire_expr: wexpr.clone(), @@ -1789,7 +1775,6 @@ impl Session { encoding: v.encoding.clone().into(), payload: v.payload.clone().into(), }), - #[cfg(feature = "unstable")] attachment, ); } @@ -1806,7 +1791,7 @@ impl Session { _target: TargetType, _consolidation: Consolidation, body: Option, - #[cfg(feature = "unstable")] attachment: Option, + attachment: Option, ) { let (primitives, key_expr, queryables) = { let state = zread!(self.state); @@ -1869,7 +1854,6 @@ impl Session { payload: b.payload.clone().into(), encoding: b.encoding.clone().into(), }), - #[cfg(feature = "unstable")] attachment: attachment.clone(), }); } @@ -2079,14 +2063,7 @@ impl Primitives for Session { .starts_with(crate::api::liveliness::PREFIX_LIVELINESS) { drop(state); - self.handle_data( - false, - &m.wire_expr, - None, - ZBuf::default(), - #[cfg(feature = "unstable")] - None, - ); + self.handle_data(false, &m.wire_expr, None, ZBuf::default(), None); } } Err(err) => { @@ -2120,7 +2097,6 @@ impl Primitives for Session { &expr.to_wire(self), Some(data_info), ZBuf::default(), - #[cfg(feature = "unstable")] None, ); } @@ -2164,7 +2140,6 @@ impl Primitives for Session { &msg.wire_expr, Some(info), m.payload, - #[cfg(feature = "unstable")] m.ext_attachment.map(Into::into), ) } @@ -2182,7 +2157,6 @@ impl Primitives for Session { &msg.wire_expr, Some(info), ZBuf::empty(), - #[cfg(feature = "unstable")] m.ext_attachment.map(Into::into), ) } @@ -2200,7 +2174,6 @@ impl Primitives for Session { msg.ext_target, m.consolidation, m.ext_body, - #[cfg(feature = "unstable")] m.ext_attachment.map(Into::into), ), } @@ -2292,13 +2265,11 @@ impl Primitives for Session { struct Ret { payload: ZBuf, info: DataInfo, - #[cfg(feature = "unstable")] attachment: Option, } let Ret { payload, info, - #[cfg(feature = "unstable")] attachment, } = match m.payload { ReplyBody::Put(Put { @@ -2318,7 +2289,6 @@ impl Primitives for Session { source_id: ext_sinfo.as_ref().map(|i| i.id), source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), }, - #[cfg(feature = "unstable")] attachment: _attachment.map(Into::into), }, ReplyBody::Del(Del { @@ -2336,16 +2306,10 @@ impl Primitives for Session { source_id: ext_sinfo.as_ref().map(|i| i.id), source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), }, - #[cfg(feature = "unstable")] attachment: _attachment.map(Into::into), }, }; - let sample = info.into_sample( - key_expr.into_owned(), - payload, - #[cfg(feature = "unstable")] - attachment, - ); + let sample = info.into_sample(key_expr.into_owned(), payload, attachment); let new_reply = Reply { result: Ok(sample), replier_id: zenoh_protocol::core::ZenohId::rand(), // TODO diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 6b8ac52240..da2ab9b628 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -473,7 +473,6 @@ impl Primitives for AdminSpace { }), eid: self.queryable_id, value: query.ext_body.map(|b| Value::new(b.payload, b.encoding)), - #[cfg(feature = "unstable")] attachment: query.ext_attachment.map(Into::into), }; From 2d1e64ca3fe5cf254be32ef17ccbf87f40f8caab Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Mon, 10 Jun 2024 12:43:48 +0200 Subject: [PATCH 398/598] Enable `zenoh/internal` feature in `zenohd` crate (#1105) --- zenohd/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenohd/Cargo.toml b/zenohd/Cargo.toml index caf7169673..b0320ce648 100644 --- a/zenohd/Cargo.toml +++ b/zenohd/Cargo.toml @@ -42,7 +42,7 @@ tracing = {workspace = true} tracing-subscriber = {workspace = true} tracing-loki = {workspace = true, optional = true } url = {workspace = true, optional = true } -zenoh = { workspace = true, features = ["unstable", "plugins"] } +zenoh = { workspace = true, features = ["unstable", "internal", "plugins"] } [dev-dependencies] rand = { workspace = true, features = ["default"] } From 05a56ceb9cae4839d9f1a156b07fb0c48242e9a2 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 10 Jun 2024 12:52:38 +0200 Subject: [PATCH 399/598] fix: add comments --- zenoh/src/api/liveliness.rs | 2 ++ zenoh/src/api/publisher.rs | 5 ++++- zenoh/src/api/queryable.rs | 2 ++ zenoh/src/api/session.rs | 2 ++ zenoh/src/api/subscriber.rs | 2 ++ 5 files changed, 12 insertions(+), 1 deletion(-) diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 5d33898f6f..54f74b5e49 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -314,6 +314,8 @@ pub struct LivelinessToken<'a> { #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[zenoh_macros::unstable] pub struct LivelinessTokenUndeclaration<'a> { + // ManuallyDrop wrapper prevents the drop code to be executed, + // which would lead to a double undeclaration token: ManuallyDrop>, } diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index cfce42d6ed..9632d29cb6 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -23,7 +23,6 @@ use std::{ use futures::Sink; use zenoh_core::{zread, Resolvable, Resolve, Wait}; -use zenoh_keyexpr::keyexpr; use zenoh_protocol::{ core::CongestionControl, network::{push::ext, Push}, @@ -464,6 +463,8 @@ impl<'a> Undeclarable<(), PublisherUndeclaration<'a>> for Publisher<'a> { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct PublisherUndeclaration<'a> { + // ManuallyDrop wrapper prevents the drop code to be executed, + // which would lead to a double undeclaration publisher: ManuallyDrop>, } @@ -1031,6 +1032,8 @@ impl std::ops::DerefMut for MatchingListener<'_, Receiver> { #[zenoh_macros::unstable] pub struct MatchingListenerUndeclaration<'a> { + // ManuallyDrop wrapper prevents the drop code to be executed, + // which would lead to a double undeclaration subscriber: ManuallyDrop>, } diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index e3b9f30ba8..a9011af217 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -637,6 +637,8 @@ impl<'a> Undeclarable<(), QueryableUndeclaration<'a>> for CallbackQueryable<'a> /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct QueryableUndeclaration<'a> { + // ManuallyDrop wrapper prevents the drop code to be executed, + // which would lead to a double undeclaration queryable: ManuallyDrop>, } diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index eb51817dfb..106ee2b41d 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -530,6 +530,8 @@ impl Session { /// # } /// ``` pub fn close(self) -> impl Resolve> { + // ManuallyDrop wrapper prevents the drop code to be executed, + // which would lead to a double close let session = ManuallyDrop::new(self); ResolveFuture::new(async move { trace!("close()"); diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 83acd4c219..8e7f086f22 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -135,6 +135,8 @@ impl<'a> Undeclarable<(), SubscriberUndeclaration<'a>> for SubscriberInner<'a> { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct SubscriberUndeclaration<'a> { + // ManuallyDrop wrapper prevents the drop code to be executed, + // which would lead to a double undeclaration subscriber: ManuallyDrop>, } From 3d4375ed21bfc58143f1c4347655ebbc547eb412 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 10 Jun 2024 13:37:29 +0200 Subject: [PATCH 400/598] fix: fix session clones being dropped --- zenoh/src/api/admin.rs | 8 ++++---- zenoh/src/api/session.rs | 24 +++++++++++++++++------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index 6e7605e95b..b3142199b0 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -30,7 +30,7 @@ use super::{ key_expr::KeyExpr, queryable::Query, sample::{DataInfo, Locality, SampleKind}, - session::Session, + session::{Session, SessionClone}, }; macro_rules! ke_for_sure { @@ -121,11 +121,11 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { #[derive(Clone)] pub(crate) struct Handler { - pub(crate) session: Arc, + pub(crate) session: Arc, } impl Handler { - pub(crate) fn new(session: Session) -> Self { + pub(crate) fn new(session: SessionClone) -> Self { Self { session: Arc::new(session), } @@ -193,7 +193,7 @@ impl TransportMulticastEventHandler for Handler { pub(crate) struct PeerHandler { pub(crate) expr: WireExpr<'static>, - pub(crate) session: Arc, + pub(crate) session: Arc, } impl TransportPeerEventHandler for PeerHandler { diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 106ee2b41d..338cfbf221 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -531,8 +531,8 @@ impl Session { /// ``` pub fn close(self) -> impl Resolve> { // ManuallyDrop wrapper prevents the drop code to be executed, - // which would lead to a double close let session = ManuallyDrop::new(self); + // which would lead to a double close ResolveFuture::new(async move { trace!("close()"); session @@ -827,15 +827,25 @@ impl Session { } } +/// Like a [`Session`], but not closed on drop +pub(crate) struct SessionClone(ManuallyDrop); + +impl Deref for SessionClone { + type Target = Session; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + impl Session { - pub(crate) fn clone(&self) -> Self { - Session { + pub(crate) fn clone(&self) -> SessionClone { + SessionClone(ManuallyDrop::new(Session { runtime: self.runtime.clone(), state: self.state.clone(), id: self.id, owns_runtime: self.owns_runtime, task_controller: self.task_controller.clone(), - } + })) } #[allow(clippy::new_ret_no_self)] @@ -2030,7 +2040,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { } } -impl Primitives for Session { +impl Primitives for SessionClone { fn send_interest(&self, msg: zenoh_protocol::network::Interest) { trace!("recv Interest {} {:?}", msg.id, msg.wire_expr); } @@ -2474,7 +2484,7 @@ impl Primitives for Session { impl Drop for Session { fn drop(&mut self) { - let _ = self.clone().close().wait(); + let _ = ManuallyDrop::into_inner(self.clone().0).close().wait(); } } @@ -2637,7 +2647,7 @@ pub trait SessionDeclarations<'s, 'a> { fn info(&'s self) -> SessionInfo<'a>; } -impl crate::net::primitives::EPrimitives for Session { +impl crate::net::primitives::EPrimitives for SessionClone { #[inline] fn send_interest(&self, ctx: crate::net::routing::RoutingContext) { (self as &dyn Primitives).send_interest(ctx.msg) From f101b44543fcf98218aa8479fb3ef1495902a98a Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 10 Jun 2024 13:40:33 +0200 Subject: [PATCH 401/598] fix: add comment --- zenoh/src/api/session.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 338cfbf221..a9c59b0742 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -2484,6 +2484,7 @@ impl Primitives for SessionClone { impl Drop for Session { fn drop(&mut self) { + // Use clone inner session, as it will be rewrapped in ManuallyDrop inside Session::close let _ = ManuallyDrop::into_inner(self.clone().0).close().wait(); } } From 631e7dddc44fb77bec42794ce62926b233204157 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 10 Jun 2024 12:32:06 +0200 Subject: [PATCH 402/598] feat: add unstable `background` method to subscriber/queryable/matching listeners The only real change of this PR is to undeclare objects at session closing/publisher undeclaration. Calling `background` is then only a `mem::forget`. --- zenoh/src/api/builders/publisher.rs | 2 + zenoh/src/api/publisher.rs | 74 ++++++++++++++++++++++------- zenoh/src/api/queryable.rs | 7 +++ zenoh/src/api/session.rs | 20 +++++++- zenoh/src/api/subscriber.rs | 7 +++ 5 files changed, 91 insertions(+), 19 deletions(-) diff --git a/zenoh/src/api/builders/publisher.rs b/zenoh/src/api/builders/publisher.rs index 950cd946b3..91100829e5 100644 --- a/zenoh/src/api/builders/publisher.rs +++ b/zenoh/src/api/builders/publisher.rs @@ -320,6 +320,7 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { priority: self.priority, is_express: self.is_express, destination: self.destination, + matching_listeners: Default::default(), }) } } @@ -371,6 +372,7 @@ impl<'a, 'b> Wait for PublisherBuilder<'a, 'b> { priority: self.priority, is_express: self.is_express, destination: self.destination, + matching_listeners: Default::default(), }) } } diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index 9632d29cb6..5b60ec47e3 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -13,11 +13,13 @@ // use std::{ + collections::HashSet, convert::TryFrom, fmt, future::{IntoFuture, Ready}, mem::ManuallyDrop, pin::Pin, + sync::{Arc, Mutex}, task::{Context, Poll}, }; @@ -134,6 +136,7 @@ pub struct Publisher<'a> { pub(crate) priority: Priority, pub(crate) is_express: bool, pub(crate) destination: Locality, + pub(crate) matching_listeners: Arc>>, } impl<'a> Publisher<'a> { @@ -160,28 +163,33 @@ impl<'a> Publisher<'a> { } } + #[inline] pub fn key_expr(&self) -> &KeyExpr<'a> { &self.key_expr } + #[inline] + /// Get the `congestion_control` applied when routing the data. + pub fn congestion_control(&self) -> CongestionControl { + self.congestion_control + } + /// Change the `congestion_control` to apply when routing the data. #[inline] pub fn set_congestion_control(&mut self, congestion_control: CongestionControl) { self.congestion_control = congestion_control; } - /// Change the priority of the written data. + /// Get the priority of the written data. #[inline] - pub fn set_priority(&mut self, priority: Priority) { - self.priority = priority; + pub fn priority(&self) -> Priority { + self.priority } - /// Restrict the matching subscribers that will receive the published data - /// to the ones that have the given [`Locality`](crate::prelude::Locality). - #[zenoh_macros::unstable] + /// Change the priority of the written data. #[inline] - pub fn set_allowed_destination(&mut self, destination: Locality) { - self.destination = destination; + pub fn set_priority(&mut self, priority: Priority) { + self.priority = priority; } /// Consumes the given `Publisher`, returning a thread-safe reference-counting @@ -330,6 +338,7 @@ impl<'a> Publisher<'a> { pub fn matching_listener(&self) -> MatchingListenerBuilder<'_, DefaultHandler> { MatchingListenerBuilder { publisher: PublisherRef::Borrow(self), + background: false, handler: DefaultHandler::default(), } } @@ -350,6 +359,13 @@ impl<'a> Publisher<'a> { pub fn undeclare(self) -> impl Resolve> + 'a { Undeclarable::undeclare_inner(self, ()) } + + fn undeclare_matching_listeners(&self) -> ZResult<()> { + for id in zlock!(self.matching_listeners).drain() { + self.session.undeclare_matches_listener_inner(id)? + } + Ok(()) + } } /// Functions to create zenoh entities with `'static` lifetime. @@ -435,6 +451,7 @@ impl PublisherDeclarations for std::sync::Arc> { fn matching_listener(&self) -> MatchingListenerBuilder<'static, DefaultHandler> { MatchingListenerBuilder { publisher: PublisherRef::Shared(self.clone()), + background: false, handler: DefaultHandler::default(), } } @@ -474,6 +491,7 @@ impl Resolvable for PublisherUndeclaration<'_> { impl Wait for PublisherUndeclaration<'_> { fn wait(self) -> ::To { + self.publisher.undeclare_matching_listeners()?; self.publisher .session .undeclare_publisher_inner(self.publisher.id) @@ -491,6 +509,7 @@ impl IntoFuture for PublisherUndeclaration<'_> { impl Drop for Publisher<'_> { fn drop(&mut self) { + let _ = self.undeclare_matching_listeners(); let _ = self.session.undeclare_publisher_inner(self.id); } } @@ -756,6 +775,7 @@ impl MatchingStatus { #[derive(Debug)] pub struct MatchingListenerBuilder<'a, Handler> { pub(crate) publisher: PublisherRef<'a>, + pub(crate) background: bool, pub handler: Handler, } @@ -792,10 +812,12 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { { let MatchingListenerBuilder { publisher, + background, handler: _, } = self; MatchingListenerBuilder { publisher, + background, handler: callback, } } @@ -862,9 +884,14 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { { let MatchingListenerBuilder { publisher, + background, handler: _, } = self; - MatchingListenerBuilder { publisher, handler } + MatchingListenerBuilder { + publisher, + background, + handler, + } } } @@ -886,16 +913,18 @@ where #[zenoh_macros::unstable] fn wait(self) -> ::To { let (callback, receiver) = self.handler.into_handler(); - self.publisher + let state = self + .publisher .session - .declare_matches_listener_inner(&self.publisher, callback) - .map(|listener_state| MatchingListener { - listener: MatchingListenerInner { - publisher: self.publisher, - state: listener_state, - }, - receiver, - }) + .declare_matches_listener_inner(&self.publisher, callback)?; + zlock!(self.publisher.matching_listeners).insert(state.id); + Ok(MatchingListener { + listener: MatchingListenerInner { + publisher: self.publisher, + state, + }, + receiver, + }) } } @@ -1006,6 +1035,13 @@ impl<'a, Receiver> MatchingListener<'a, Receiver> { pub fn undeclare(self) -> MatchingListenerUndeclaration<'a> { self.listener.undeclare() } + + /// Make the matching listener run in background, until the publisher is undeclared. + #[inline] + #[zenoh_macros::unstable] + pub fn background(self) { + std::mem::forget(self); + } } #[zenoh_macros::unstable] @@ -1045,6 +1081,7 @@ impl Resolvable for MatchingListenerUndeclaration<'_> { #[zenoh_macros::unstable] impl Wait for MatchingListenerUndeclaration<'_> { fn wait(self) -> ::To { + zlock!(self.subscriber.publisher.matching_listeners).remove(&self.subscriber.state.id); self.subscriber .publisher .session @@ -1065,6 +1102,7 @@ impl IntoFuture for MatchingListenerUndeclaration<'_> { #[zenoh_macros::unstable] impl Drop for MatchingListenerInner<'_> { fn drop(&mut self) { + zlock!(self.publisher.matching_listeners).remove(&self.state.id); let _ = self .publisher .session diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index a9011af217..085e52aa21 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -896,6 +896,13 @@ impl<'a, Handler> Queryable<'a, Handler> { pub fn undeclare(self) -> impl Resolve> + 'a { Undeclarable::undeclare_inner(self, ()) } + + /// Make the queryable run in background, until the session is closed. + #[inline] + #[zenoh_macros::unstable] + pub fn background(self) { + std::mem::forget(self); + } } impl<'a, T> Undeclarable<(), QueryableUndeclaration<'a>> for Queryable<'a, T> { diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index a9c59b0742..642b244578 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -535,6 +535,24 @@ impl Session { // which would lead to a double close ResolveFuture::new(async move { trace!("close()"); + let mut publishers = Vec::new(); + let mut queryables = Vec::new(); + let mut subscribers = Vec::new(); + { + let state = zread!(session.state); + publishers.extend(state.publishers.keys()); + queryables.extend(state.queryables.keys()); + subscribers.extend(state.subscribers.keys()); + } + for id in publishers { + session.undeclare_publisher_inner(id)?; + } + for id in queryables { + session.close_queryable(id)?; + } + for id in subscribers { + session.undeclare_subscriber_inner(id)?; + } session .task_controller .terminate_all(Duration::from_secs(10)); @@ -542,7 +560,7 @@ impl Session { session.runtime.close().await?; } let mut state = zwrite!(session.state); - // clean up to break cyclic references from self.state to itself + // clean up to break cyclic references from session.state to itself let primitives = state.primitives.take(); state.queryables.clear(); drop(state); diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 8e7f086f22..d51e98b25f 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -505,6 +505,13 @@ impl<'a, Handler> Subscriber<'a, Handler> { pub fn undeclare(self) -> SubscriberUndeclaration<'a> { self.subscriber.undeclare() } + + /// Make the subscriber run in background, until the session is closed. + #[inline] + #[zenoh_macros::unstable] + pub fn background(self) { + std::mem::forget(self); + } } impl<'a, T> Undeclarable<(), SubscriberUndeclaration<'a>> for Subscriber<'a, T> { From 2720fe80d48be6aea4134072c5296d2a86b16b37 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 10 Jun 2024 13:15:53 +0200 Subject: [PATCH 403/598] fix: remove dead code --- zenoh/src/api/publisher.rs | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index 5b60ec47e3..26f37d1d97 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -338,7 +338,6 @@ impl<'a> Publisher<'a> { pub fn matching_listener(&self) -> MatchingListenerBuilder<'_, DefaultHandler> { MatchingListenerBuilder { publisher: PublisherRef::Borrow(self), - background: false, handler: DefaultHandler::default(), } } @@ -451,7 +450,6 @@ impl PublisherDeclarations for std::sync::Arc> { fn matching_listener(&self) -> MatchingListenerBuilder<'static, DefaultHandler> { MatchingListenerBuilder { publisher: PublisherRef::Shared(self.clone()), - background: false, handler: DefaultHandler::default(), } } @@ -775,7 +773,6 @@ impl MatchingStatus { #[derive(Debug)] pub struct MatchingListenerBuilder<'a, Handler> { pub(crate) publisher: PublisherRef<'a>, - pub(crate) background: bool, pub handler: Handler, } @@ -812,12 +809,10 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { { let MatchingListenerBuilder { publisher, - background, handler: _, } = self; MatchingListenerBuilder { publisher, - background, handler: callback, } } @@ -884,14 +879,9 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { { let MatchingListenerBuilder { publisher, - background, handler: _, } = self; - MatchingListenerBuilder { - publisher, - background, - handler, - } + MatchingListenerBuilder { publisher, handler } } } From 6e29422b80d254873fbf94ab38103172d89229e6 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 10 Jun 2024 15:46:16 +0200 Subject: [PATCH 404/598] fix: fix implementation --- zenoh/src/api/liveliness.rs | 1 + zenoh/src/api/publisher.rs | 18 +++++++++++------- zenoh/src/api/queryable.rs | 10 +++++++--- zenoh/src/api/subscriber.rs | 10 +++++++--- 4 files changed, 26 insertions(+), 13 deletions(-) diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 54f74b5e49..7dab2446dc 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -553,6 +553,7 @@ where subscriber: SubscriberInner { session, state: sub_state, + background: false, }, handler, }) diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index 26f37d1d97..0cc0733bd1 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -912,6 +912,7 @@ where listener: MatchingListenerInner { publisher: self.publisher, state, + background: false, }, receiver, }) @@ -956,6 +957,7 @@ impl std::fmt::Debug for MatchingListenerState { pub(crate) struct MatchingListenerInner<'a> { pub(crate) publisher: PublisherRef<'a>, pub(crate) state: std::sync::Arc, + background: bool, } #[zenoh_macros::unstable] @@ -1029,8 +1031,8 @@ impl<'a, Receiver> MatchingListener<'a, Receiver> { /// Make the matching listener run in background, until the publisher is undeclared. #[inline] #[zenoh_macros::unstable] - pub fn background(self) { - std::mem::forget(self); + pub fn background(mut self) { + self.listener.background = true; } } @@ -1092,11 +1094,13 @@ impl IntoFuture for MatchingListenerUndeclaration<'_> { #[zenoh_macros::unstable] impl Drop for MatchingListenerInner<'_> { fn drop(&mut self) { - zlock!(self.publisher.matching_listeners).remove(&self.state.id); - let _ = self - .publisher - .session - .undeclare_matches_listener_inner(self.state.id); + if !self.background { + zlock!(self.publisher.matching_listeners).remove(&self.state.id); + let _ = self + .publisher + .session + .undeclare_matches_listener_inner(self.state.id); + } } } diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 085e52aa21..f766f67188 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -612,6 +612,7 @@ impl fmt::Debug for QueryableState { pub(crate) struct CallbackQueryable<'a> { pub(crate) session: SessionRef<'a>, pub(crate) state: Arc, + background: bool, } impl<'a> Undeclarable<(), QueryableUndeclaration<'a>> for CallbackQueryable<'a> { @@ -665,7 +666,9 @@ impl<'a> IntoFuture for QueryableUndeclaration<'a> { impl Drop for CallbackQueryable<'_> { fn drop(&mut self) { - let _ = self.session.close_queryable(self.state.id); + if !self.background { + let _ = self.session.close_queryable(self.state.id); + } } } @@ -900,8 +903,8 @@ impl<'a, Handler> Queryable<'a, Handler> { /// Make the queryable run in background, until the session is closed. #[inline] #[zenoh_macros::unstable] - pub fn background(self) { - std::mem::forget(self); + pub fn background(mut self) { + self.queryable.background = true; } } @@ -952,6 +955,7 @@ where queryable: CallbackQueryable { session, state: qable_state, + background: false, }, handler: receiver, }) diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index d51e98b25f..097df3feb9 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -79,6 +79,7 @@ impl fmt::Debug for SubscriberState { pub(crate) struct SubscriberInner<'a> { pub(crate) session: SessionRef<'a>, pub(crate) state: Arc, + pub(crate) background: bool, } impl<'a> SubscriberInner<'a> { @@ -163,7 +164,9 @@ impl IntoFuture for SubscriberUndeclaration<'_> { impl Drop for SubscriberInner<'_> { fn drop(&mut self) { - let _ = self.session.undeclare_subscriber_inner(self.state.id); + if !self.background { + let _ = self.session.undeclare_subscriber_inner(self.state.id); + } } } @@ -388,6 +391,7 @@ where subscriber: SubscriberInner { session, state: sub_state, + background: false, }, handler: receiver, }) @@ -509,8 +513,8 @@ impl<'a, Handler> Subscriber<'a, Handler> { /// Make the subscriber run in background, until the session is closed. #[inline] #[zenoh_macros::unstable] - pub fn background(self) { - std::mem::forget(self); + pub fn background(mut self) { + self.subscriber.background = false; } } From ad6c4147f6ad7181ffdf3647a1182c3ce506766f Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 10 Jun 2024 16:35:30 +0200 Subject: [PATCH 405/598] hello wrapped, getters added (#1109) * hello wrapped, getters added * cargo fmt --- commons/zenoh-protocol/src/scouting/hello.rs | 11 ------- zenoh/src/api/scouting.rs | 34 ++++++++++++++++++-- zenoh/src/lib.rs | 5 +-- 3 files changed, 33 insertions(+), 17 deletions(-) diff --git a/commons/zenoh-protocol/src/scouting/hello.rs b/commons/zenoh-protocol/src/scouting/hello.rs index 62ea915e5a..6639792976 100644 --- a/commons/zenoh-protocol/src/scouting/hello.rs +++ b/commons/zenoh-protocol/src/scouting/hello.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // use alloc::vec::Vec; -use core::fmt; use crate::core::{Locator, WhatAmI, ZenohId}; @@ -107,16 +106,6 @@ pub struct Hello { pub locators: Vec, } -impl fmt::Display for Hello { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Hello") - .field("zid", &self.zid) - .field("whatami", &self.whatami) - .field("locators", &self.locators) - .finish() - } -} - impl Hello { #[cfg(feature = "test")] pub fn rand() -> Self { diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 566f18f061..e16f31da2e 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -21,7 +21,7 @@ use std::{ use tokio::net::UdpSocket; use zenoh_core::{Resolvable, Wait}; -use zenoh_protocol::{core::WhatAmIMatcher, scouting::Hello}; +use zenoh_protocol::core::WhatAmIMatcher; use zenoh_result::ZResult; use zenoh_task::TerminatableTask; @@ -30,6 +30,36 @@ use crate::{ net::runtime::{orchestrator::Loop, Runtime}, }; +/// A zenoh Hello message. +pub struct Hello(zenoh_protocol::scouting::Hello); + +impl Hello { + /// Get the locators of this Hello message. + pub fn locators(&self) -> &[zenoh_protocol::core::Locator] { + &self.0.locators + } + + /// Get the zenoh id of this Hello message. + pub fn zid(&self) -> zenoh_protocol::core::ZenohId { + self.0.zid + } + + /// Get the whatami of this Hello message. + pub fn whatami(&self) -> zenoh_protocol::core::WhatAmI { + self.0.whatami + } +} + +impl fmt::Display for Hello { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Hello") + .field("zid", &self.zid()) + .field("whatami", &self.whatami()) + .field("locators", &self.locators()) + .finish() + } +} + /// A builder for initializing a [`Scout`]. /// /// # Examples @@ -324,7 +354,7 @@ fn _scout( let scout = Runtime::scout(&sockets, what, &addr, move |hello| { let callback = callback.clone(); async move { - callback(hello); + callback(Hello(hello)); Loop::Continue } }); diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c1cb39fdee..c64bdeb138 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -324,10 +324,7 @@ pub mod handlers { /// Scouting primitives pub mod scouting { - /// A zenoh Hello message. - pub use zenoh_protocol::scouting::Hello; - - pub use crate::api::scouting::{scout, Scout, ScoutBuilder}; + pub use crate::api::scouting::{scout, Hello, Scout, ScoutBuilder}; } /// Liveliness primitives From 94bd2e96f3c56a446e3715e792ac3fc0c270319f Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 10 Jun 2024 16:46:40 +0200 Subject: [PATCH 406/598] fix: rework everything --- zenoh/src/api/admin.rs | 8 ++-- zenoh/src/api/builders/publisher.rs | 2 + zenoh/src/api/liveliness.rs | 31 +++++++++----- zenoh/src/api/publisher.rs | 41 +++++++++--------- zenoh/src/api/queryable.rs | 24 +++++------ zenoh/src/api/session.rs | 66 +++++++++-------------------- zenoh/src/api/subscriber.rs | 24 +++++------ 7 files changed, 90 insertions(+), 106 deletions(-) diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index b3142199b0..6e7605e95b 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -30,7 +30,7 @@ use super::{ key_expr::KeyExpr, queryable::Query, sample::{DataInfo, Locality, SampleKind}, - session::{Session, SessionClone}, + session::Session, }; macro_rules! ke_for_sure { @@ -121,11 +121,11 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { #[derive(Clone)] pub(crate) struct Handler { - pub(crate) session: Arc, + pub(crate) session: Arc, } impl Handler { - pub(crate) fn new(session: SessionClone) -> Self { + pub(crate) fn new(session: Session) -> Self { Self { session: Arc::new(session), } @@ -193,7 +193,7 @@ impl TransportMulticastEventHandler for Handler { pub(crate) struct PeerHandler { pub(crate) expr: WireExpr<'static>, - pub(crate) session: Arc, + pub(crate) session: Arc, } impl TransportPeerEventHandler for PeerHandler { diff --git a/zenoh/src/api/builders/publisher.rs b/zenoh/src/api/builders/publisher.rs index 91100829e5..32eea81eaa 100644 --- a/zenoh/src/api/builders/publisher.rs +++ b/zenoh/src/api/builders/publisher.rs @@ -321,6 +321,7 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { is_express: self.is_express, destination: self.destination, matching_listeners: Default::default(), + undeclare_on_drop: true, }) } } @@ -373,6 +374,7 @@ impl<'a, 'b> Wait for PublisherBuilder<'a, 'b> { is_express: self.is_express, destination: self.destination, matching_listeners: Default::default(), + undeclare_on_drop: true, }) } } diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 7dab2446dc..7cd640d7e4 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -15,7 +15,6 @@ use std::{ convert::TryInto, future::{IntoFuture, Ready}, - mem::ManuallyDrop, sync::Arc, time::Duration, }; @@ -236,6 +235,7 @@ impl Wait for LivelinessTokenBuilder<'_, '_> { .map(|tok_state| LivelinessToken { session, state: tok_state, + undeclare_on_drop: true, }) } } @@ -291,6 +291,7 @@ pub(crate) struct LivelinessTokenState { pub struct LivelinessToken<'a> { pub(crate) session: SessionRef<'a>, pub(crate) state: Arc, + undeclare_on_drop: bool, } /// A [`Resolvable`] returned when undeclaring a [`LivelinessToken`](LivelinessToken). @@ -314,9 +315,7 @@ pub struct LivelinessToken<'a> { #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[zenoh_macros::unstable] pub struct LivelinessTokenUndeclaration<'a> { - // ManuallyDrop wrapper prevents the drop code to be executed, - // which would lead to a double undeclaration - token: ManuallyDrop>, + token: LivelinessToken<'a>, } #[zenoh_macros::unstable] @@ -326,7 +325,9 @@ impl Resolvable for LivelinessTokenUndeclaration<'_> { #[zenoh_macros::unstable] impl Wait for LivelinessTokenUndeclaration<'_> { - fn wait(self) -> ::To { + fn wait(mut self) -> ::To { + // set the flag first to avoid double panic if this function panic + self.token.undeclare_on_drop = false; self.token.session.undeclare_liveliness(self.token.state.id) } } @@ -369,21 +370,31 @@ impl<'a> LivelinessToken<'a> { pub fn undeclare(self) -> impl Resolve> + 'a { Undeclarable::undeclare_inner(self, ()) } + + /// Keep this liveliness token in background, until the session is closed. + #[inline] + #[zenoh_macros::unstable] + pub fn background(mut self) { + // It's not necessary to undeclare this resource when session close, as other sessions + // will clean all resources related to the closed one. + // So we can just never undeclare it. + self.undeclare_on_drop = false; + } } #[zenoh_macros::unstable] impl<'a> Undeclarable<(), LivelinessTokenUndeclaration<'a>> for LivelinessToken<'a> { fn undeclare_inner(self, _: ()) -> LivelinessTokenUndeclaration<'a> { - LivelinessTokenUndeclaration { - token: ManuallyDrop::new(self), - } + LivelinessTokenUndeclaration { token: self } } } #[zenoh_macros::unstable] impl Drop for LivelinessToken<'_> { fn drop(&mut self) { - let _ = self.session.undeclare_liveliness(self.state.id); + if self.undeclare_on_drop { + let _ = self.session.undeclare_liveliness(self.state.id); + } } } @@ -553,7 +564,7 @@ where subscriber: SubscriberInner { session, state: sub_state, - background: false, + undeclare_on_drop: true, }, handler, }) diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index 0cc0733bd1..2a0a47047b 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -17,7 +17,6 @@ use std::{ convert::TryFrom, fmt, future::{IntoFuture, Ready}, - mem::ManuallyDrop, pin::Pin, sync::{Arc, Mutex}, task::{Context, Poll}, @@ -137,6 +136,7 @@ pub struct Publisher<'a> { pub(crate) is_express: bool, pub(crate) destination: Locality, pub(crate) matching_listeners: Arc>>, + pub(crate) undeclare_on_drop: bool, } impl<'a> Publisher<'a> { @@ -457,9 +457,7 @@ impl PublisherDeclarations for std::sync::Arc> { impl<'a> Undeclarable<(), PublisherUndeclaration<'a>> for Publisher<'a> { fn undeclare_inner(self, _: ()) -> PublisherUndeclaration<'a> { - PublisherUndeclaration { - publisher: ManuallyDrop::new(self), - } + PublisherUndeclaration { publisher: self } } } @@ -478,9 +476,7 @@ impl<'a> Undeclarable<(), PublisherUndeclaration<'a>> for Publisher<'a> { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct PublisherUndeclaration<'a> { - // ManuallyDrop wrapper prevents the drop code to be executed, - // which would lead to a double undeclaration - publisher: ManuallyDrop>, + publisher: Publisher<'a>, } impl Resolvable for PublisherUndeclaration<'_> { @@ -488,7 +484,9 @@ impl Resolvable for PublisherUndeclaration<'_> { } impl Wait for PublisherUndeclaration<'_> { - fn wait(self) -> ::To { + fn wait(mut self) -> ::To { + // set the flag first to avoid double panic if this function panic + self.publisher.undeclare_on_drop = false; self.publisher.undeclare_matching_listeners()?; self.publisher .session @@ -507,8 +505,10 @@ impl IntoFuture for PublisherUndeclaration<'_> { impl Drop for Publisher<'_> { fn drop(&mut self) { - let _ = self.undeclare_matching_listeners(); - let _ = self.session.undeclare_publisher_inner(self.id); + if self.undeclare_on_drop { + let _ = self.undeclare_matching_listeners(); + let _ = self.session.undeclare_publisher_inner(self.id); + } } } @@ -912,7 +912,7 @@ where listener: MatchingListenerInner { publisher: self.publisher, state, - background: false, + undeclare_on_drop: true, }, receiver, }) @@ -957,7 +957,7 @@ impl std::fmt::Debug for MatchingListenerState { pub(crate) struct MatchingListenerInner<'a> { pub(crate) publisher: PublisherRef<'a>, pub(crate) state: std::sync::Arc, - background: bool, + undeclare_on_drop: bool, } #[zenoh_macros::unstable] @@ -971,9 +971,7 @@ impl<'a> MatchingListenerInner<'a> { #[zenoh_macros::unstable] impl<'a> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListenerInner<'a> { fn undeclare_inner(self, _: ()) -> MatchingListenerUndeclaration<'a> { - MatchingListenerUndeclaration { - subscriber: ManuallyDrop::new(self), - } + MatchingListenerUndeclaration { subscriber: self } } } @@ -1032,7 +1030,8 @@ impl<'a, Receiver> MatchingListener<'a, Receiver> { #[inline] #[zenoh_macros::unstable] pub fn background(mut self) { - self.listener.background = true; + // The matching listener will be undeclared as part of publisher undeclaration. + self.listener.undeclare_on_drop = false; } } @@ -1060,9 +1059,7 @@ impl std::ops::DerefMut for MatchingListener<'_, Receiver> { #[zenoh_macros::unstable] pub struct MatchingListenerUndeclaration<'a> { - // ManuallyDrop wrapper prevents the drop code to be executed, - // which would lead to a double undeclaration - subscriber: ManuallyDrop>, + subscriber: MatchingListenerInner<'a>, } #[zenoh_macros::unstable] @@ -1072,7 +1069,9 @@ impl Resolvable for MatchingListenerUndeclaration<'_> { #[zenoh_macros::unstable] impl Wait for MatchingListenerUndeclaration<'_> { - fn wait(self) -> ::To { + fn wait(mut self) -> ::To { + // set the flag first to avoid double panic if this function panic + self.subscriber.undeclare_on_drop = false; zlock!(self.subscriber.publisher.matching_listeners).remove(&self.subscriber.state.id); self.subscriber .publisher @@ -1094,7 +1093,7 @@ impl IntoFuture for MatchingListenerUndeclaration<'_> { #[zenoh_macros::unstable] impl Drop for MatchingListenerInner<'_> { fn drop(&mut self) { - if !self.background { + if self.undeclare_on_drop { zlock!(self.publisher.matching_listeners).remove(&self.state.id); let _ = self .publisher diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index f766f67188..aef7a93b94 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -14,7 +14,6 @@ use std::{ fmt, future::{IntoFuture, Ready}, - mem::ManuallyDrop, ops::{Deref, DerefMut}, sync::Arc, }; @@ -612,14 +611,12 @@ impl fmt::Debug for QueryableState { pub(crate) struct CallbackQueryable<'a> { pub(crate) session: SessionRef<'a>, pub(crate) state: Arc, - background: bool, + undeclare_on_drop: bool, } impl<'a> Undeclarable<(), QueryableUndeclaration<'a>> for CallbackQueryable<'a> { fn undeclare_inner(self, _: ()) -> QueryableUndeclaration<'a> { - QueryableUndeclaration { - queryable: ManuallyDrop::new(self), - } + QueryableUndeclaration { queryable: self } } } @@ -638,9 +635,7 @@ impl<'a> Undeclarable<(), QueryableUndeclaration<'a>> for CallbackQueryable<'a> /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct QueryableUndeclaration<'a> { - // ManuallyDrop wrapper prevents the drop code to be executed, - // which would lead to a double undeclaration - queryable: ManuallyDrop>, + queryable: CallbackQueryable<'a>, } impl Resolvable for QueryableUndeclaration<'_> { @@ -648,7 +643,9 @@ impl Resolvable for QueryableUndeclaration<'_> { } impl Wait for QueryableUndeclaration<'_> { - fn wait(self) -> ::To { + fn wait(mut self) -> ::To { + // set the flag first to avoid double panic if this function panic + self.queryable.undeclare_on_drop = false; self.queryable .session .close_queryable(self.queryable.state.id) @@ -666,7 +663,7 @@ impl<'a> IntoFuture for QueryableUndeclaration<'a> { impl Drop for CallbackQueryable<'_> { fn drop(&mut self) { - if !self.background { + if self.undeclare_on_drop { let _ = self.session.close_queryable(self.state.id); } } @@ -904,7 +901,10 @@ impl<'a, Handler> Queryable<'a, Handler> { #[inline] #[zenoh_macros::unstable] pub fn background(mut self) { - self.queryable.background = true; + // It's not necessary to undeclare this resource when session close, as other sessions + // will clean all resources related to the closed one. + // So we can just never undeclare it. + self.queryable.undeclare_on_drop = false; } } @@ -955,7 +955,7 @@ where queryable: CallbackQueryable { session, state: qable_state, - background: false, + undeclare_on_drop: true, }, handler: receiver, }) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 642b244578..66794c0cba 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -16,7 +16,6 @@ use std::{ convert::{TryFrom, TryInto}, fmt, future::{IntoFuture, Ready}, - mem::ManuallyDrop, ops::Deref, sync::{ atomic::{AtomicU16, Ordering}, @@ -405,6 +404,7 @@ pub struct Session { pub(crate) runtime: Runtime, pub(crate) state: Arc>, pub(crate) id: u16, + close_on_drop: bool, owns_runtime: bool, task_controller: TaskController, } @@ -426,6 +426,7 @@ impl Session { runtime: runtime.clone(), state: state.clone(), id: SESSION_ID_COUNTER.fetch_add(1, Ordering::SeqCst), + close_on_drop: true, owns_runtime: false, task_controller: TaskController::default(), }; @@ -529,38 +530,17 @@ impl Session { /// session.close().await.unwrap(); /// # } /// ``` - pub fn close(self) -> impl Resolve> { - // ManuallyDrop wrapper prevents the drop code to be executed, - let session = ManuallyDrop::new(self); - // which would lead to a double close + pub fn close(mut self) -> impl Resolve> { ResolveFuture::new(async move { trace!("close()"); - let mut publishers = Vec::new(); - let mut queryables = Vec::new(); - let mut subscribers = Vec::new(); - { - let state = zread!(session.state); - publishers.extend(state.publishers.keys()); - queryables.extend(state.queryables.keys()); - subscribers.extend(state.subscribers.keys()); - } - for id in publishers { - session.undeclare_publisher_inner(id)?; - } - for id in queryables { - session.close_queryable(id)?; - } - for id in subscribers { - session.undeclare_subscriber_inner(id)?; + // set the flag first to avoid double panic if this function panic + self.close_on_drop = false; + self.task_controller.terminate_all(Duration::from_secs(10)); + if self.owns_runtime { + self.runtime.close().await?; } - session - .task_controller - .terminate_all(Duration::from_secs(10)); - if session.owns_runtime { - session.runtime.close().await?; - } - let mut state = zwrite!(session.state); - // clean up to break cyclic references from session.state to itself + let mut state = zwrite!(self.state); + // clean up to break cyclic references from self.state to itself let primitives = state.primitives.take(); state.queryables.clear(); drop(state); @@ -845,25 +825,16 @@ impl Session { } } -/// Like a [`Session`], but not closed on drop -pub(crate) struct SessionClone(ManuallyDrop); - -impl Deref for SessionClone { - type Target = Session; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - impl Session { - pub(crate) fn clone(&self) -> SessionClone { - SessionClone(ManuallyDrop::new(Session { + pub(crate) fn clone(&self) -> Self { + Self { runtime: self.runtime.clone(), state: self.state.clone(), id: self.id, + close_on_drop: false, owns_runtime: self.owns_runtime, task_controller: self.task_controller.clone(), - })) + } } #[allow(clippy::new_ret_no_self)] @@ -2058,7 +2029,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { } } -impl Primitives for SessionClone { +impl Primitives for Session { fn send_interest(&self, msg: zenoh_protocol::network::Interest) { trace!("recv Interest {} {:?}", msg.id, msg.wire_expr); } @@ -2502,8 +2473,9 @@ impl Primitives for SessionClone { impl Drop for Session { fn drop(&mut self) { - // Use clone inner session, as it will be rewrapped in ManuallyDrop inside Session::close - let _ = ManuallyDrop::into_inner(self.clone().0).close().wait(); + if self.close_on_drop { + let _ = self.clone().close().wait(); + } } } @@ -2666,7 +2638,7 @@ pub trait SessionDeclarations<'s, 'a> { fn info(&'s self) -> SessionInfo<'a>; } -impl crate::net::primitives::EPrimitives for SessionClone { +impl crate::net::primitives::EPrimitives for Session { #[inline] fn send_interest(&self, ctx: crate::net::routing::RoutingContext) { (self as &dyn Primitives).send_interest(ctx.msg) diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 097df3feb9..4628f9e95d 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -15,7 +15,6 @@ use std::{ fmt, future::{IntoFuture, Ready}, - mem::ManuallyDrop, ops::{Deref, DerefMut}, sync::Arc, }; @@ -79,7 +78,7 @@ impl fmt::Debug for SubscriberState { pub(crate) struct SubscriberInner<'a> { pub(crate) session: SessionRef<'a>, pub(crate) state: Arc, - pub(crate) background: bool, + pub(crate) undeclare_on_drop: bool, } impl<'a> SubscriberInner<'a> { @@ -112,9 +111,7 @@ impl<'a> SubscriberInner<'a> { impl<'a> Undeclarable<(), SubscriberUndeclaration<'a>> for SubscriberInner<'a> { fn undeclare_inner(self, _: ()) -> SubscriberUndeclaration<'a> { - SubscriberUndeclaration { - subscriber: ManuallyDrop::new(self), - } + SubscriberUndeclaration { subscriber: self } } } @@ -136,9 +133,7 @@ impl<'a> Undeclarable<(), SubscriberUndeclaration<'a>> for SubscriberInner<'a> { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct SubscriberUndeclaration<'a> { - // ManuallyDrop wrapper prevents the drop code to be executed, - // which would lead to a double undeclaration - subscriber: ManuallyDrop>, + subscriber: SubscriberInner<'a>, } impl Resolvable for SubscriberUndeclaration<'_> { @@ -146,7 +141,9 @@ impl Resolvable for SubscriberUndeclaration<'_> { } impl Wait for SubscriberUndeclaration<'_> { - fn wait(self) -> ::To { + fn wait(mut self) -> ::To { + // set the flag first to avoid double panic if this function panic + self.subscriber.undeclare_on_drop = false; self.subscriber .session .undeclare_subscriber_inner(self.subscriber.state.id) @@ -164,7 +161,7 @@ impl IntoFuture for SubscriberUndeclaration<'_> { impl Drop for SubscriberInner<'_> { fn drop(&mut self) { - if !self.background { + if self.undeclare_on_drop { let _ = self.session.undeclare_subscriber_inner(self.state.id); } } @@ -391,7 +388,7 @@ where subscriber: SubscriberInner { session, state: sub_state, - background: false, + undeclare_on_drop: true, }, handler: receiver, }) @@ -514,7 +511,10 @@ impl<'a, Handler> Subscriber<'a, Handler> { #[inline] #[zenoh_macros::unstable] pub fn background(mut self) { - self.subscriber.background = false; + // It's not necessary to undeclare this resource when session close, as other sessions + // will clean all resources related to the closed one. + // So we can just never undeclare it. + self.subscriber.undeclare_on_drop = false; } } From 288333af9d1e3b2274be7b1e222b1bc5d09fe32a Mon Sep 17 00:00:00 2001 From: yellowhatter <104833606+yellowhatter@users.noreply.github.com> Date: Mon, 10 Jun 2024 18:25:41 +0300 Subject: [PATCH 407/598] fix forgotten QoS and Timestamp ext processing for query/reply (#1108) * fix forgotten QoS and Timestamp ext processing for query/reply #1107 * fix priority bitfield managing --- commons/zenoh-protocol/src/common/mod.rs | 6 ++++++ commons/zenoh-protocol/src/network/mod.rs | 2 +- zenoh/src/net/routing/dispatcher/face.rs | 4 ++++ zenoh/src/net/routing/dispatcher/queries.rs | 15 +++++++++++---- 4 files changed, 22 insertions(+), 5 deletions(-) diff --git a/commons/zenoh-protocol/src/common/mod.rs b/commons/zenoh-protocol/src/common/mod.rs index ef53e5a8ac..99bc471cfd 100644 --- a/commons/zenoh-protocol/src/common/mod.rs +++ b/commons/zenoh-protocol/src/common/mod.rs @@ -46,6 +46,12 @@ pub mod imsg { byte } + pub const fn set_bitfield(mut byte: u8, value: u8, mask: u8) -> u8 { + byte = unset_flag(byte, mask); + byte |= value; + byte + } + pub const fn has_option(options: u64, flag: u64) -> bool { options & flag != 0 } diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index 952fe74e89..371f3eda78 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -274,7 +274,7 @@ pub mod ext { } pub fn set_priority(&mut self, priority: Priority) { - self.inner = imsg::set_flag(self.inner, priority as u8); + self.inner = imsg::set_bitfield(self.inner, priority as u8, Self::P_MASK); } pub const fn get_priority(&self) -> Priority { diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 6eb9ee5b90..b21253d55f 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -370,6 +370,8 @@ impl Primitives for Face { &self.state, &msg.wire_expr, msg.id, + msg.ext_qos, + msg.ext_tstamp, msg.ext_target, msg.ext_budget, msg.ext_timeout, @@ -385,6 +387,8 @@ impl Primitives for Face { &self.tables, &mut self.state.clone(), msg.rid, + msg.ext_qos, + msg.ext_tstamp, msg.ext_respid, msg.wire_expr, msg.payload, diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index c557e3da50..240ddb3a7d 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -473,6 +473,8 @@ impl Timed for QueryCleanup { &self.tables, &mut face, self.qid, + response::ext::QoSType::RESPONSE, + None, ext_respid, WireExpr::empty(), ResponseBody::Err(zenoh::Err { @@ -609,6 +611,8 @@ pub fn route_query( face: &Arc, expr: &WireExpr, qid: RequestId, + ext_qos: ext::QoSType, + ext_tstamp: Option, ext_target: TargetType, ext_budget: Option, ext_timeout: Option, @@ -733,8 +737,8 @@ pub fn route_query( Request { id: *qid, wire_expr: key_expr.into(), - ext_qos: ext::QoSType::REQUEST, - ext_tstamp: None, + ext_qos, + ext_tstamp, ext_nodeid: ext::NodeIdType { node_id: *context }, ext_target, ext_budget, @@ -781,10 +785,13 @@ pub fn route_query( } } +#[allow(clippy::too_many_arguments)] pub(crate) fn route_send_response( tables_ref: &Arc, face: &mut Arc, qid: RequestId, + ext_qos: ext::QoSType, + ext_tstamp: Option, ext_respid: Option, key_expr: WireExpr, body: ResponseBody, @@ -819,8 +826,8 @@ pub(crate) fn route_send_response( rid: query.src_qid, wire_expr: key_expr.to_owned(), payload: body, - ext_qos: response::ext::QoSType::RESPONSE, - ext_tstamp: None, + ext_qos, + ext_tstamp, ext_respid, }, "".to_string(), // @TODO provide the proper key expression of the response for interceptors From 13ed78eab837e5f9d0b6984fd5cc331c01e4f97f Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 10 Jun 2024 18:05:50 +0200 Subject: [PATCH 408/598] Remove StringOrBase64 (#1112) * Remove StringOrBase64 * Fix cargo fmt * Use serde_json::from_reader * Align error text * Fix cargo clippy --- plugins/zenoh-plugin-rest/src/lib.rs | 6 +-- .../src/replica/align_queryable.rs | 12 +++-- .../src/replica/aligner.rs | 19 ++++--- .../src/replica/mod.rs | 29 ++++++----- .../tests/operations.rs | 12 ++--- .../tests/wildcard.rs | 28 ++++++++--- zenoh/src/api/bytes.rs | 49 +------------------ zenoh/src/lib.rs | 4 +- 8 files changed, 65 insertions(+), 94 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 4f0ca3f67d..ee66ae7dbb 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -26,7 +26,7 @@ use http_types::Method; use serde::{Deserialize, Serialize}; use tide::{http::Mime, sse::Sender, Request, Response, Server, StatusCode}; use zenoh::{ - bytes::{StringOrBase64, ZBytes}, + bytes::ZBytes, encoding::Encoding, internal::{ plugins::{RunningPluginTrait, ZenohPlugin}, @@ -76,11 +76,11 @@ fn payload_to_json(payload: &ZBytes, encoding: &Encoding) -> serde_json::Value { payload .deserialize::() .unwrap_or_else(|_| { - serde_json::Value::String(StringOrBase64::from(payload).into_string()) + serde_json::Value::String(base64_encode(&Cow::from(payload))) }) } // otherwise convert to JSON string - _ => serde_json::Value::String(StringOrBase64::from(payload).into_string()), + _ => serde_json::Value::String(base64_encode(&Cow::from(payload))), } } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 66233d2535..1dc5d438c8 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::{ + borrow::Cow, cmp::Ordering, collections::{BTreeSet, HashMap, HashSet}, str, @@ -20,8 +21,8 @@ use std::{ use async_std::sync::Arc; use zenoh::{ - bytes::StringOrBase64, key_expr::OwnedKeyExpr, prelude::*, sample::Sample, selector::Selector, - time::Timestamp, value::Value, Session, + key_expr::OwnedKeyExpr, prelude::*, sample::Sample, selector::Selector, time::Timestamp, + value::Value, Session, }; use super::{digest::*, Snapshotter}; @@ -234,8 +235,11 @@ impl AlignQueryable { tracing::trace!( "[ALIGN QUERYABLE] Received ('{}': '{}' @ {:?})", sample.key_expr().as_str(), - StringOrBase64::from(sample.payload()), - sample.timestamp() + sample + .payload() + .deserialize::>() + .unwrap_or(Cow::Borrowed("")), + sample.timestamp(), ); if let Some(timestamp) = sample.timestamp() { match timestamp.cmp(&logentry.timestamp) { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index eaecee5246..fd4b5460a7 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -13,6 +13,7 @@ // use std::{ + borrow::Cow, collections::{HashMap, HashSet}, str, }; @@ -20,7 +21,6 @@ use std::{ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use zenoh::{ - bytes::StringOrBase64, key_expr::{KeyExpr, OwnedKeyExpr}, prelude::*, sample::{Sample, SampleBuilder}, @@ -216,7 +216,7 @@ impl Aligner { let mut other_intervals: HashMap = HashMap::new(); // expecting sample.payload to be a vec of intervals with their checksum for each in reply_content { - match serde_json::from_str(&StringOrBase64::from(each.payload())) { + match serde_json::from_reader(each.payload().reader()) { Ok((i, c)) => { other_intervals.insert(i, c); } @@ -224,7 +224,7 @@ impl Aligner { tracing::error!("[ALIGNER] Error decoding reply: {}", e); no_err = false; } - }; + } } (other_intervals, no_err) } else { @@ -262,7 +262,7 @@ impl Aligner { let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; let mut other_subintervals: HashMap = HashMap::new(); for each in reply_content { - match serde_json::from_str(&StringOrBase64::from(each.payload())) { + match serde_json::from_reader(each.payload().reader()) { Ok((i, c)) => { other_subintervals.insert(i, c); } @@ -270,7 +270,7 @@ impl Aligner { tracing::error!("[ALIGNER] Error decoding reply: {}", e); no_err = false; } - }; + } } (other_subintervals, no_err) }; @@ -303,7 +303,7 @@ impl Aligner { let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; let mut other_content: HashMap> = HashMap::new(); for each in reply_content { - match serde_json::from_str(&StringOrBase64::from(each.payload())) { + match serde_json::from_reader(each.payload().reader()) { Ok((i, c)) => { other_content.insert(i, c); } @@ -311,7 +311,7 @@ impl Aligner { tracing::error!("[ALIGNER] Error decoding reply: {}", e); no_err = false; } - }; + } } // get subintervals diff let result = this.get_full_content_diff(other_content); @@ -343,7 +343,10 @@ impl Aligner { tracing::trace!( "[ALIGNER] Received ('{}': '{}')", sample.key_expr().as_str(), - StringOrBase64::from(sample.payload()) + sample + .payload() + .deserialize::>() + .unwrap_or(Cow::Borrowed("")) ); return_val.push(sample); } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 0e4ffbd70a..114e5c206b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -16,8 +16,7 @@ use std::{ collections::{HashMap, HashSet}, - str, - str::FromStr, + str::{self, FromStr}, time::{Duration, SystemTime}, }; @@ -44,9 +43,7 @@ pub use aligner::Aligner; pub use digest::{Digest, DigestConfig, EraType, LogEntry}; pub use snapshotter::Snapshotter; pub use storage::{ReplicationService, StorageService}; -use zenoh::{ - bytes::StringOrBase64, key_expr::OwnedKeyExpr, sample::Locality, time::Timestamp, Session, -}; +use zenoh::{key_expr::OwnedKeyExpr, sample::Locality, time::Timestamp, Session}; const ERA: &str = "era"; const INTERVALS: &str = "intervals"; @@ -227,21 +224,23 @@ impl Replica { }; let from = &sample.key_expr().as_str() [Replica::get_digest_key(&self.key_expr, ALIGN_PREFIX).len() + 1..]; - tracing::trace!( - "[DIGEST_SUB] From {} Received {} ('{}': '{}')", - from, - sample.kind(), - sample.key_expr().as_str(), - StringOrBase64::from(sample.payload()) - ); - let digest: Digest = match serde_json::from_str(&StringOrBase64::from(sample.payload())) - { + + let digest: Digest = match serde_json::from_reader(sample.payload().reader()) { Ok(digest) => digest, Err(e) => { tracing::error!("[DIGEST_SUB] Error in decoding the digest: {}", e); continue; } }; + + tracing::trace!( + "[DIGEST_SUB] From {} Received {} ('{}': '{:?}')", + from, + sample.kind(), + sample.key_expr().as_str(), + digest, + ); + let ts = digest.timestamp; let to_be_processed = self .processing_needed( @@ -260,7 +259,7 @@ impl Replica { tracing::error!("[DIGEST_SUB] Error sending digest to aligner: {}", e) } } - }; + } received.insert(from.to_string(), ts); } } diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index c6c473d77b..505634e6fb 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -16,12 +16,12 @@ // 1. normal case, just some wild card puts and deletes on existing keys and ensure it works // 2. check for dealing with out of order updates -use std::{str::FromStr, thread::sleep}; +use std::{borrow::Cow, str::FromStr, thread::sleep}; use async_std::task; use zenoh::{ - bytes::StringOrBase64, internal::zasync_executor_init, prelude::*, query::Reply, - sample::Sample, time::Timestamp, Config, Session, + internal::zasync_executor_init, prelude::*, query::Reply, sample::Sample, time::Timestamp, + Config, Session, }; use zenoh_plugin_trait::Plugin; @@ -96,7 +96,7 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/a").await; assert_eq!(data.len(), 1); - assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "1"); + assert_eq!(data[0].payload().deserialize::>().unwrap(), "1"); put_data( &session, @@ -112,7 +112,7 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/b").await; assert_eq!(data.len(), 1); - assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "2"); + assert_eq!(data[0].payload().deserialize::>().unwrap(), "2"); delete_data( &session, @@ -131,7 +131,7 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/b").await; assert_eq!(data.len(), 1); - assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "2"); + assert_eq!(data[0].payload().deserialize::>().unwrap(), "2"); assert_eq!(data[0].key_expr().as_str(), "operation/test/b"); drop(storage); diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 9b29dba77c..04e4549508 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -16,13 +16,13 @@ // 1. normal case, just some wild card puts and deletes on existing keys and ensure it works // 2. check for dealing with out of order updates -use std::{str::FromStr, thread::sleep}; +use std::{borrow::Cow, str::FromStr, thread::sleep}; // use std::collections::HashMap; use async_std::task; use zenoh::{ - bytes::StringOrBase64, internal::zasync_executor_init, prelude::*, query::Reply, - sample::Sample, time::Timestamp, Config, Session, + internal::zasync_executor_init, prelude::*, query::Reply, sample::Sample, time::Timestamp, + Config, Session, }; use zenoh_plugin_trait::Plugin; @@ -113,7 +113,7 @@ async fn test_wild_card_in_order() { let data = get_data(&session, "wild/test/*").await; assert_eq!(data.len(), 1); assert_eq!(data[0].key_expr().as_str(), "wild/test/a"); - assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "2"); + assert_eq!(data[0].payload().deserialize::>().unwrap(), "2"); put_data( &session, @@ -131,8 +131,20 @@ async fn test_wild_card_in_order() { assert_eq!(data.len(), 2); assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr().as_str())); assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr().as_str())); - assert!(["2", "3"].contains(&StringOrBase64::from(data[0].payload()).as_str())); - assert!(["2", "3"].contains(&StringOrBase64::from(data[1].payload()).as_str())); + assert!(["2", "3"].contains( + &data[0] + .payload() + .deserialize::>() + .unwrap() + .as_ref() + )); + assert!(["2", "3"].contains( + &data[1] + .payload() + .deserialize::>() + .unwrap() + .as_ref() + )); put_data( &session, @@ -150,8 +162,8 @@ async fn test_wild_card_in_order() { assert_eq!(data.len(), 2); assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr().as_str())); assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr().as_str())); - assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "4"); - assert_eq!(StringOrBase64::from(data[1].payload()).as_str(), "4"); + assert_eq!(data[0].payload().deserialize::>().unwrap(), "4"); + assert_eq!(data[1].payload().deserialize::>().unwrap(), "4"); delete_data( &session, diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 76ffdc1650..55159e5beb 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -14,7 +14,7 @@ //! ZBytes primitives. use std::{ - borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, str::Utf8Error, + borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, str::Utf8Error, string::FromUtf8Error, sync::Arc, }; @@ -1806,53 +1806,6 @@ where } } -// For convenience to always convert a Value in the examples -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum StringOrBase64 { - String(String), - Base64(String), -} - -impl StringOrBase64 { - pub fn into_string(self) -> String { - match self { - StringOrBase64::String(s) | StringOrBase64::Base64(s) => s, - } - } -} - -impl Deref for StringOrBase64 { - type Target = String; - - fn deref(&self) -> &Self::Target { - match self { - Self::String(s) | Self::Base64(s) => s, - } - } -} - -impl std::fmt::Display for StringOrBase64 { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(self) - } -} - -impl From<&ZBytes> for StringOrBase64 { - fn from(v: &ZBytes) -> Self { - use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; - match v.deserialize::() { - Ok(s) => StringOrBase64::String(s), - Err(_) => StringOrBase64::Base64(b64_std_engine.encode(v.into::>())), - } - } -} - -impl From<&mut ZBytes> for StringOrBase64 { - fn from(v: &mut ZBytes) -> Self { - StringOrBase64::from(&*v) - } -} - // Protocol attachment extension impl From for AttachmentType { fn from(this: ZBytes) -> Self { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c64bdeb138..c8062667ca 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -240,8 +240,8 @@ pub mod encoding { /// Payload primitives pub mod bytes { pub use crate::api::bytes::{ - Deserialize, OptionZBytes, Serialize, StringOrBase64, ZBytes, ZBytesIterator, ZBytesReader, - ZBytesWriter, ZDeserializeError, ZSerde, + Deserialize, OptionZBytes, Serialize, ZBytes, ZBytesIterator, ZBytesReader, ZBytesWriter, + ZDeserializeError, ZSerde, }; } From 2f01adca5f7731b23ae91c861dbdb57651126ec4 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 10 Jun 2024 20:10:54 +0200 Subject: [PATCH 409/598] reply_key_expr in trait --- plugins/zenoh-plugin-rest/src/lib.rs | 4 +- .../src/replica/storage.rs | 1 - zenoh-ext/src/publication_cache.rs | 4 +- zenoh/src/api/query.rs | 8 +--- zenoh/src/api/queryable.rs | 8 ++-- zenoh/src/api/selector.rs | 42 +++++++++++++------ zenoh/src/api/session.rs | 22 +++++----- zenoh/src/lib.rs | 4 -- zenoh/src/prelude.rs | 1 + 9 files changed, 49 insertions(+), 45 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 072a060d1a..a230aa8748 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -35,7 +35,7 @@ use zenoh::{ key_expr::{keyexpr, KeyExpr}, query::{QueryConsolidation, Reply}, sample::{Sample, SampleKind, ValueBuilderTrait}, - selector::{Parameters, Selector, TIME_RANGE_KEY}, + selector::{Parameters, PredefinedParameters, Selector}, session::{Session, SessionDeclarations}, value::Value, }; @@ -383,7 +383,7 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result PublicationCache<'a> { if !query.key_expr().as_str().contains('*') { if let Some(queue) = cache.get(query.key_expr().as_keyexpr()) { for sample in queue { - if let (Ok(Some(time_range)), Some(timestamp)) = (query.parameters().time_range(), sample.timestamp()) { + if let (Some(Ok(time_range)), Some(timestamp)) = (query.parameters().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ continue; } @@ -230,7 +230,7 @@ impl<'a> PublicationCache<'a> { for (key_expr, queue) in cache.iter() { if query.key_expr().intersects(unsafe{ keyexpr::from_str_unchecked(key_expr) }) { for sample in queue { - if let (Ok(Some(time_range)), Some(timestamp)) = (query.parameters().time_range(), sample.timestamp()) { + if let (Some(Ok(time_range)), Some(timestamp)) = (query.parameters().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ continue; } diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index ba925876c9..0cc6b1e388 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -35,7 +35,7 @@ use super::{ key_expr::KeyExpr, publisher::Priority, sample::{Locality, QoSBuilder, Sample}, - selector::Selector, + selector::{PredefinedParameters, Selector}, session::Session, value::Value, }; @@ -423,7 +423,7 @@ impl<'a, 'b, Handler> SessionGetBuilder<'a, 'b, Handler> { }| { if accept == ReplyKeyExpr::Any { let mut parameters = parameters.into_owned(); - parameters.insert(_REPLY_KEY_EXPR_ANY_SEL_PARAM, ""); + parameters.set_reply_key_expr_any(); let parameters = Cow::Owned(parameters); Selector { key_expr, @@ -442,10 +442,6 @@ impl<'a, 'b, Handler> SessionGetBuilder<'a, 'b, Handler> { } } -pub(crate) const _REPLY_KEY_EXPR_ANY_SEL_PARAM: &str = "_anyke"; -#[zenoh_macros::unstable] -pub const REPLY_KEY_EXPR_ANY_SEL_PARAM: &str = _REPLY_KEY_EXPR_ANY_SEL_PARAM; - #[zenoh_macros::unstable] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)] pub enum ReplyKeyExpr { diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index f3c23146d3..f4f16e8ecf 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -32,6 +32,8 @@ use { zenoh_protocol::core::EntityGlobalId, }; +#[zenoh_macros::unstable] +use super::selector::PredefinedParameters; use super::{ builders::sample::{ QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, @@ -235,11 +237,7 @@ impl Query { } #[cfg(feature = "unstable")] fn _accepts_any_replies(&self) -> ZResult { - use crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; - - Ok(self - .parameters() - .contains_key(_REPLY_KEY_EXPR_ANY_SEL_PARAM)) + Ok(self.parameters().reply_key_expr_any()) } } diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 6e2e0e7890..ef63719e01 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -68,9 +68,6 @@ pub struct Selector<'a> { pub parameters: Cow<'a, Parameters<'a>>, } -#[zenoh_macros::unstable] -pub const TIME_RANGE_KEY: &str = "_time"; - impl<'a> Selector<'a> { /// Builds a new selector which owns keyexpr and parameters pub fn owned(key_expr: K, parameters: P) -> Self @@ -120,32 +117,49 @@ impl<'a> From<&'a Selector<'a>> for (&'a KeyExpr<'a>, &'a Parameters<'a>) { #[zenoh_macros::unstable] pub trait PredefinedParameters { + const REPLY_KEY_EXPR_ANY_SEL_PARAM: &'static str = "_anyke"; const TIME_RANGE_KEY: &'static str = "_time"; /// Sets the time range targeted by the selector parameters. fn set_time_range>>(&mut self, time_range: T); + /// Sets parameter allowing to querier to reply to this request even + /// it the requested key expression does not match the reply key expression. + /// TODO: add example + fn set_reply_key_expr_any(&mut self); /// Extracts the standardized `_time` argument from the selector parameters. - fn time_range(&self) -> ZResult>; + /// Returns `None` if the `_time` argument is not present or `Some` with the result of parsing the `_time` argument + /// if it is present. + fn time_range(&self) -> Option>; + /// Returns true if `_anyke` parameter is present in the selector parameters + fn reply_key_expr_any(&self) -> bool; } -#[zenoh_macros::unstable] impl PredefinedParameters for Parameters<'_> { /// Sets the time range targeted by the selector parameters. fn set_time_range>>(&mut self, time_range: T) { let mut time_range: Option = time_range.into(); match time_range.take() { - Some(tr) => self.insert(TIME_RANGE_KEY, format!("{}", tr)), - None => self.remove(TIME_RANGE_KEY), + Some(tr) => self.insert(Self::TIME_RANGE_KEY, format!("{}", tr)), + None => self.remove(Self::TIME_RANGE_KEY), }; } + /// Sets parameter allowing to querier to reply to this request even + /// it the requested key expression does not match the reply key expression. + fn set_reply_key_expr_any(&mut self) { + self.insert(Self::REPLY_KEY_EXPR_ANY_SEL_PARAM, ""); + } + /// Extracts the standardized `_time` argument from the selector parameters. /// /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. - fn time_range(&self) -> ZResult> { - match self.get(TIME_RANGE_KEY) { - Some(tr) => Ok(Some(tr.parse()?)), - None => Ok(None), - } + fn time_range(&self) -> Option> { + self.get(Self::TIME_RANGE_KEY) + .map(|tr| tr.parse().map_err(Into::into)) + } + + /// Returns true if `_anyke` parameter is present in the selector parameters + fn reply_key_expr_any(&self) -> bool { + self.contains_key(Self::REPLY_KEY_EXPR_ANY_SEL_PARAM) } } @@ -270,7 +284,6 @@ impl<'a> From> for Selector<'a> { #[test] fn selector_accessors() { - use crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; use std::collections::HashMap; for s in [ @@ -294,6 +307,9 @@ fn selector_accessors() { assert_eq!(parameters.get("_timetrick").unwrap(), ""); + const TIME_RANGE_KEY: &str = Parameters::TIME_RANGE_KEY; + const ANYKE: &str = Parameters::REPLY_KEY_EXPR_ANY_SEL_PARAM; + let time_range = "[now(-2s)..now(2s)]"; zcondfeat!( "unstable", diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 663ddb866a..4eec95bb87 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -87,14 +87,15 @@ use super::{ liveliness::{Liveliness, LivelinessTokenState}, publisher::Publisher, publisher::{MatchingListenerState, MatchingStatus}, - query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, sample::SourceInfo, - selector::TIME_RANGE_KEY, }; -use crate::net::{ - primitives::Primitives, - routing::dispatcher::face::Face, - runtime::{Runtime, RuntimeBuilder}, +use crate::{ + api::selector::PredefinedParameters, + net::{ + primitives::Primitives, + routing::dispatcher::face::Face, + runtime::{Runtime, RuntimeBuilder}, + }, }; zconfigurable! { @@ -1672,7 +1673,7 @@ impl Session { let mut state = zwrite!(self.state); let consolidation = match consolidation.mode { ConsolidationMode::Auto => { - if parameters.contains_key(TIME_RANGE_KEY) { + if parameters.time_range().is_none() { ConsolidationMode::None } else { ConsolidationMode::Latest @@ -2223,11 +2224,8 @@ impl Primitives for Session { }; match state.queries.get_mut(&msg.rid) { Some(query) => { - let c = zcondfeat!( - "unstable", - !query.parameters.contains_key(_REPLY_KEY_EXPR_ANY_SEL_PARAM), - true - ); + let c = + zcondfeat!("unstable", !query.parameters.reply_key_expr_any(), true); if c && !query.key_expr.intersects(&key_expr) { tracing::warn!( "Received Reply for `{}` from `{:?}, which didn't match query `{}`: dropping Reply.", diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 3bf3e9f9f7..6b1c300f72 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -254,8 +254,6 @@ pub mod selector { pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; pub use crate::api::selector::Selector; - #[zenoh_macros::unstable] - pub use crate::api::selector::TIME_RANGE_KEY; } /// Subscribing primitives @@ -301,8 +299,6 @@ pub mod querier { pub mod query { #[zenoh_macros::unstable] pub use crate::api::query::ReplyKeyExpr; - #[zenoh_macros::unstable] - pub use crate::api::query::REPLY_KEY_EXPR_ANY_SEL_PARAM; #[zenoh_macros::internal] pub use crate::api::queryable::ReplySample; pub use crate::api::{ diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 605b0638ab..b08b58a8ed 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -28,6 +28,7 @@ mod _prelude { #[zenoh_macros::unstable] pub use crate::api::publisher::PublisherDeclarations; pub use crate::{ + api::selector::PredefinedParameters, api::{ builders::sample::{ QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, From bca5c4dc0329a00a1759adbb1806b7e90e102cab Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Tue, 11 Jun 2024 15:10:09 +0200 Subject: [PATCH 410/598] fix: make publisher lock safer (#1116) --- zenoh/src/api/publisher.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index 2a0a47047b..ec8a8aff35 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -360,7 +360,8 @@ impl<'a> Publisher<'a> { } fn undeclare_matching_listeners(&self) -> ZResult<()> { - for id in zlock!(self.matching_listeners).drain() { + let ids: Vec = zlock!(self.matching_listeners).drain().collect(); + for id in ids { self.session.undeclare_matches_listener_inner(id)? } Ok(()) From 63ca9cb69ba6a4b43e117294b7ffa56dc53077b2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 11 Jun 2024 17:21:45 +0200 Subject: [PATCH 411/598] parameters temporary renamed back, clippy fixes --- commons/zenoh-protocol/src/core/mod.rs | 5 +++-- .../zenoh-protocol/src/core/{parameters.rs => properties.rs} | 0 zenoh/src/api/liveliness.rs | 2 +- zenoh/src/api/session.rs | 1 - zenoh/src/net/runtime/adminspace.rs | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) rename commons/zenoh-protocol/src/core/{parameters.rs => properties.rs} (100%) diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 47f240e8ef..e6c2d0eb7f 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -56,8 +56,9 @@ pub use resolution::*; pub mod parameters_view; pub use parameters_view::*; -pub mod parameters; -pub use parameters::*; +pub mod properties; +pub use properties as parameters; +pub use properties::*; /// The global unique id of a zenoh peer. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/properties.rs similarity index 100% rename from commons/zenoh-protocol/src/core/parameters.rs rename to commons/zenoh-protocol/src/core/properties.rs diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 1737540a42..00498b7ac3 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -756,7 +756,7 @@ where let (callback, receiver) = self.handler.into_handler(); self.session .query( - &self.key_expr?.into(), + &self.key_expr?, &Parameters::default(), &Some(KeyExpr::from(*KE_PREFIX_LIVELINESS)), QueryTarget::DEFAULT, diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 21311a572e..20daca49d8 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -88,7 +88,6 @@ use super::{ publisher::Publisher, publisher::{MatchingListenerState, MatchingStatus}, sample::SourceInfo, - selector::TIME_RANGE_KEY, }; use crate::{ api::selector::PredefinedParameters, diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 90325ed6ce..a808443593 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -819,7 +819,7 @@ fn plugins_status(context: &AdminContext, query: Query) { return; } match std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { - plugin.instance().adminspace_getter(&key_expr, plugin_key) + plugin.instance().adminspace_getter(key_expr, plugin_key) })) { Ok(Ok(responses)) => { for response in responses { From e77401e8691610fbe8964d63472772eedfe09fbc Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 11 Jun 2024 17:50:04 +0200 Subject: [PATCH 412/598] simplified code --- zenoh/src/api/query.rs | 37 ++++++++++++++----------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 0cc6b1e388..0c798185ae 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -415,30 +415,21 @@ impl<'a, 'b, Handler> SessionGetBuilder<'a, 'b, Handler> { /// expressions that don't intersect with the query's. #[zenoh_macros::unstable] pub fn accept_replies(self, accept: ReplyKeyExpr) -> Self { - Self { - selector: self.selector.map( - |Selector { - key_expr, - parameters, - }| { - if accept == ReplyKeyExpr::Any { - let mut parameters = parameters.into_owned(); - parameters.set_reply_key_expr_any(); - let parameters = Cow::Owned(parameters); - Selector { - key_expr, - parameters, - } - } else { - Selector { - key_expr, - parameters, - } - } - }, - ), - ..self + if accept == ReplyKeyExpr::Any { + if let Ok(Selector { + key_expr, + mut parameters, + }) = self.selector + { + parameters.to_mut().set_reply_key_expr_any(); + let selector = Ok(Selector { + key_expr, + parameters, + }); + return Self { selector, ..self }; + } } + self } } From 4b4c6cb1a119416f0cddc67195f929d014c7ed77 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 11 Jun 2024 18:05:46 +0200 Subject: [PATCH 413/598] unstable predefined parameters unfinihsed --- zenoh/src/api/selector.rs | 10 ++++++++++ zenoh/src/prelude.rs | 3 ++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index ef63719e01..9ff5e97a9e 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -133,6 +133,16 @@ pub trait PredefinedParameters { fn reply_key_expr_any(&self) -> bool; } +#[cfg(not(feature = "unstable"))] +pub(crate) trait PredefinedParameters { + const REPLY_KEY_EXPR_ANY_SEL_PARAM: &'static str = "_anyke"; + const TIME_RANGE_KEY: &'static str = "_time"; + fn set_time_range>>(&mut self, time_range: T); + fn set_reply_key_expr_any(&mut self); + fn time_range(&self) -> Option>; + fn reply_key_expr_any(&self) -> bool; +} + impl PredefinedParameters for Parameters<'_> { /// Sets the time range targeted by the selector parameters. fn set_time_range>>(&mut self, time_range: T) { diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index b08b58a8ed..0119397c75 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -28,7 +28,6 @@ mod _prelude { #[zenoh_macros::unstable] pub use crate::api::publisher::PublisherDeclarations; pub use crate::{ - api::selector::PredefinedParameters, api::{ builders::sample::{ QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, @@ -38,6 +37,8 @@ mod _prelude { config::ValidatedMap, core::{Error as ZError, Resolvable, Resolve, Result as ZResult}, }; + #[zenoh_macros::unstable] + pub use crate::api::selector::PredefinedParameters; } pub use _prelude::*; From 05e6716c4949153066a9df0cdb0594f970bb35fb Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Tue, 11 Jun 2024 20:18:52 +0200 Subject: [PATCH 414/598] fix: make `ZBytesIterator` yield deserialization result (#1120) * fix: make `ZBytesIterator` yield deserialization result * fix: fix tests --- zenoh/src/api/bytes.rs | 42 ++++++++++++++++++++------------------ zenoh/tests/attachments.rs | 38 +++++++++++++++++++++++----------- 2 files changed, 48 insertions(+), 32 deletions(-) diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 55159e5beb..110560f6cd 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -209,10 +209,7 @@ impl std::io::Write for ZBytesWriter<'_> { /// Note that [`ZBytes`] contains a serialized version of `T` and iterating over a [`ZBytes`] performs lazy deserialization. #[repr(transparent)] #[derive(Debug)] -pub struct ZBytesIterator<'a, T> -where - ZSerde: Deserialize<'a, T>, -{ +pub struct ZBytesIterator<'a, T> { reader: ZBufReader<'a>, _t: PhantomData, } @@ -222,7 +219,7 @@ where for<'a> ZSerde: Deserialize<'a, T, Input = &'a ZBytes>, for<'a> >::Error: Debug, { - type Item = T; + type Item = ZResult; fn next(&mut self) -> Option { let codec = Zenoh080::new(); @@ -230,8 +227,10 @@ where let kbuf: ZBuf = codec.read(&mut self.reader).ok()?; let kpld = ZBytes::new(kbuf); - let t = ZSerde.deserialize(&kpld).ok()?; - Some(t) + let result = ZSerde + .deserialize(&kpld) + .map_err(|err| zerror!("{err:?}").into()); + Some(result) } } @@ -1993,7 +1992,7 @@ mod tests { let p = ZBytes::from_iter(v.iter()); println!("Deserialize:\t{:?}\n", p); for (i, t) in p.iter::().enumerate() { - assert_eq!(i, t); + assert_eq!(i, t.unwrap()); } let mut v = vec![[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]; @@ -2001,10 +2000,10 @@ mod tests { let p = ZBytes::from_iter(v.drain(..)); println!("Deserialize:\t{:?}\n", p); let mut iter = p.iter::<[u8; 4]>(); - assert_eq!(iter.next().unwrap(), [0, 1, 2, 3]); - assert_eq!(iter.next().unwrap(), [4, 5, 6, 7]); - assert_eq!(iter.next().unwrap(), [8, 9, 10, 11]); - assert_eq!(iter.next().unwrap(), [12, 13, 14, 15]); + assert_eq!(iter.next().unwrap().unwrap(), [0, 1, 2, 3]); + assert_eq!(iter.next().unwrap().unwrap(), [4, 5, 6, 7]); + assert_eq!(iter.next().unwrap().unwrap(), [8, 9, 10, 11]); + assert_eq!(iter.next().unwrap().unwrap(), [12, 13, 14, 15]); assert!(iter.next().is_none()); use std::collections::HashMap; @@ -2014,7 +2013,7 @@ mod tests { println!("Serialize:\t{:?}", hm); let p = ZBytes::from_iter(hm.clone().drain()); println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, usize)>()); + let o = HashMap::from_iter(p.iter::<(usize, usize)>().map(Result::unwrap)); assert_eq!(hm, o); let mut hm: HashMap> = HashMap::new(); @@ -2023,7 +2022,7 @@ mod tests { println!("Serialize:\t{:?}", hm); let p = ZBytes::from_iter(hm.clone().drain()); println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); + let o = HashMap::from_iter(p.iter::<(usize, Vec)>().map(Result::unwrap)); assert_eq!(hm, o); let mut hm: HashMap> = HashMap::new(); @@ -2032,7 +2031,7 @@ mod tests { println!("Serialize:\t{:?}", hm); let p = ZBytes::from_iter(hm.clone().drain()); println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); + let o = HashMap::from_iter(p.iter::<(usize, Vec)>().map(Result::unwrap)); assert_eq!(hm, o); let mut hm: HashMap = HashMap::new(); @@ -2041,7 +2040,7 @@ mod tests { println!("Serialize:\t{:?}", hm); let p = ZBytes::from_iter(hm.clone().drain()); println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, ZSlice)>()); + let o = HashMap::from_iter(p.iter::<(usize, ZSlice)>().map(Result::unwrap)); assert_eq!(hm, o); let mut hm: HashMap = HashMap::new(); @@ -2050,7 +2049,7 @@ mod tests { println!("Serialize:\t{:?}", hm); let p = ZBytes::from_iter(hm.clone().drain()); println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, ZBuf)>()); + let o = HashMap::from_iter(p.iter::<(usize, ZBuf)>().map(Result::unwrap)); assert_eq!(hm, o); let mut hm: HashMap> = HashMap::new(); @@ -2059,7 +2058,7 @@ mod tests { println!("Serialize:\t{:?}", hm); let p = ZBytes::from_iter(hm.clone().iter().map(|(k, v)| (k, Cow::from(v)))); println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); + let o = HashMap::from_iter(p.iter::<(usize, Vec)>().map(Result::unwrap)); assert_eq!(hm, o); let mut hm: HashMap = HashMap::new(); @@ -2068,7 +2067,7 @@ mod tests { println!("Serialize:\t{:?}", hm); let p = ZBytes::from_iter(hm.iter()); println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(String, String)>()); + let o = HashMap::from_iter(p.iter::<(String, String)>().map(Result::unwrap)); assert_eq!(hm, o); let mut hm: HashMap, Cow<'static, str>> = HashMap::new(); @@ -2077,7 +2076,10 @@ mod tests { println!("Serialize:\t{:?}", hm); let p = ZBytes::from_iter(hm.iter()); println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(Cow<'static, str>, Cow<'static, str>)>()); + let o = HashMap::from_iter( + p.iter::<(Cow<'static, str>, Cow<'static, str>)>() + .map(Result::unwrap), + ); assert_eq!(hm, o); } } diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index b52fd067ba..057045ba60 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -21,10 +21,15 @@ fn attachment_pubsub() { .declare_subscriber("test/attachment") .callback(|sample| { println!("{}", sample.payload().deserialize::().unwrap()); - for (k, v) in sample.attachment().unwrap().iter::<( - [u8; std::mem::size_of::()], - [u8; std::mem::size_of::()], - )>() { + for (k, v) in sample + .attachment() + .unwrap() + .iter::<( + [u8; std::mem::size_of::()], + [u8; std::mem::size_of::()], + )>() + .map(Result::unwrap) + { assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)) } }) @@ -69,10 +74,13 @@ fn attachment_queries() { let attachment = query.attachment().unwrap(); println!("Query attachment: {:?}", attachment); - for (k, v) in attachment.iter::<( - [u8; std::mem::size_of::()], - [u8; std::mem::size_of::()], - )>() { + for (k, v) in attachment + .iter::<( + [u8; std::mem::size_of::()], + [u8; std::mem::size_of::()], + )>() + .map(Result::unwrap) + { assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)); } @@ -87,6 +95,7 @@ fn attachment_queries() { [u8; std::mem::size_of::()], [u8; std::mem::size_of::()], )>() + .map(Result::unwrap) .map(|(k, _)| (k, k)), )) .wait() @@ -111,10 +120,15 @@ fn attachment_queries() { .unwrap(); while let Ok(reply) = get.recv() { let response = reply.result().unwrap(); - for (k, v) in response.attachment().unwrap().iter::<( - [u8; std::mem::size_of::()], - [u8; std::mem::size_of::()], - )>() { + for (k, v) in response + .attachment() + .unwrap() + .iter::<( + [u8; std::mem::size_of::()], + [u8; std::mem::size_of::()], + )>() + .map(Result::unwrap) + { assert_eq!(k, v) } } From 90df380ed2e612af557c26cf401e4da3a5331b7b Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 12 Jun 2024 12:22:04 +0200 Subject: [PATCH 415/598] Remove set_priority and set_congestion_control from publisher --- zenoh/src/api/publisher.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index ec8a8aff35..92ab04145c 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -174,24 +174,12 @@ impl<'a> Publisher<'a> { self.congestion_control } - /// Change the `congestion_control` to apply when routing the data. - #[inline] - pub fn set_congestion_control(&mut self, congestion_control: CongestionControl) { - self.congestion_control = congestion_control; - } - /// Get the priority of the written data. #[inline] pub fn priority(&self) -> Priority { self.priority } - /// Change the priority of the written data. - #[inline] - pub fn set_priority(&mut self, priority: Priority) { - self.priority = priority; - } - /// Consumes the given `Publisher`, returning a thread-safe reference-counting /// pointer to it (`Arc`). This is equivalent to `Arc::new(Publisher)`. /// From 3393cdc1591962316af4468c051d494bb236719e Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 12 Jun 2024 14:57:21 +0200 Subject: [PATCH 416/598] cargo fmt --- zenoh/src/api/query.rs | 4 ++-- zenoh/src/lib.rs | 4 ++-- zenoh/src/prelude.rs | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 0c798185ae..2395a5120c 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -12,14 +12,14 @@ // ZettaScale Zenoh Team, // +#[zenoh_macros::unstable] +use std::borrow::Cow; use std::{ collections::HashMap, future::{IntoFuture, Ready}, time::Duration, }; -#[zenoh_macros::unstable] -use std::borrow::Cow; use zenoh_core::{Resolvable, Wait}; use zenoh_keyexpr::OwnedKeyExpr; use zenoh_protocol::core::{CongestionControl, Parameters, ZenohId}; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 8490679827..da3409056d 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -247,12 +247,12 @@ pub mod bytes { /// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries pub mod selector { - #[zenoh_macros::unstable] - pub use crate::api::selector::PredefinedParameters; pub use zenoh_protocol::core::Parameters; #[zenoh_macros::unstable] pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; + #[zenoh_macros::unstable] + pub use crate::api::selector::PredefinedParameters; pub use crate::api::selector::Selector; } diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 0119397c75..bc9eb6951c 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -27,6 +27,8 @@ mod _prelude { #[zenoh_macros::unstable] pub use crate::api::publisher::PublisherDeclarations; + #[zenoh_macros::unstable] + pub use crate::api::selector::PredefinedParameters; pub use crate::{ api::{ builders::sample::{ @@ -37,8 +39,6 @@ mod _prelude { config::ValidatedMap, core::{Error as ZError, Resolvable, Resolve, Result as ZResult}, }; - #[zenoh_macros::unstable] - pub use crate::api::selector::PredefinedParameters; } pub use _prelude::*; From 6a5bd62f18ba625194514cda1adc8b4ed1dccca6 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 12 Jun 2024 15:01:39 +0200 Subject: [PATCH 417/598] clippy fix --- zenoh/src/api/query.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 2395a5120c..0e5195c0ee 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -12,8 +12,6 @@ // ZettaScale Zenoh Team, // -#[zenoh_macros::unstable] -use std::borrow::Cow; use std::{ collections::HashMap, future::{IntoFuture, Ready}, From ff560e7851fc1e2e8032042f36a77ecd1f5d6b52 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 12 Jun 2024 15:51:08 +0200 Subject: [PATCH 418/598] module reorg --- commons/zenoh-protocol/src/core/endpoint.rs | 46 ++-- commons/zenoh-protocol/src/core/mod.rs | 8 +- .../core/{properties.rs => parameters/mod.rs} | 25 ++- .../src/core/parameters/parameters.rs | 209 +++++++++++++++++ .../src/core/parameters_view.rs | 211 ------------------ io/zenoh-links/zenoh-link-quic/src/utils.rs | 4 +- io/zenoh-links/zenoh-link-tls/src/utils.rs | 4 +- .../zenoh-link-unixpipe/src/unix/mod.rs | 4 +- io/zenoh-transport/src/multicast/manager.rs | 4 +- io/zenoh-transport/src/unicast/manager.rs | 6 +- 10 files changed, 259 insertions(+), 262 deletions(-) rename commons/zenoh-protocol/src/core/{properties.rs => parameters/mod.rs} (94%) create mode 100644 commons/zenoh-protocol/src/core/parameters/parameters.rs delete mode 100644 commons/zenoh-protocol/src/core/parameters_view.rs diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index 3c6f3dad1b..96b9b40665 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -16,7 +16,7 @@ use core::{borrow::Borrow, convert::TryFrom, fmt, str::FromStr}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; -use super::{locator::*, parameters_view::ParametersView}; +use super::{locator::*, parameters}; // Parsing chars pub const PROTO_SEPARATOR: char = '/'; @@ -196,15 +196,15 @@ impl<'a> Metadata<'a> { } pub fn iter(&'a self) -> impl DoubleEndedIterator + Clone { - ParametersView::iter(self.0) + parameters::iter(self.0) } pub fn get(&'a self, k: &str) -> Option<&'a str> { - ParametersView::get(self.0, k) + parameters::get(self.0, k) } pub fn values(&'a self, k: &str) -> impl DoubleEndedIterator { - ParametersView::values(self.0, k) + parameters::values(self.0, k) } } @@ -250,7 +250,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - ParametersView::from_iter(ParametersView::sort(ParametersView::join( + parameters::from_iter(parameters::sort(parameters::join( self.0.metadata().iter(), iter.map(|(k, v)| (k.borrow(), v.borrow())), ))), @@ -269,7 +269,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - ParametersView::insert_sort(self.0.metadata().as_str(), k.borrow(), v.borrow()).0, + parameters::insert_sort(self.0.metadata().as_str(), k.borrow(), v.borrow()).0, self.0.config(), )?; @@ -284,7 +284,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - ParametersView::remove(self.0.metadata().as_str(), k.borrow()).0, + parameters::remove(self.0.metadata().as_str(), k.borrow()).0, self.0.config(), )?; @@ -326,15 +326,15 @@ impl<'a> Config<'a> { } pub fn iter(&'a self) -> impl DoubleEndedIterator + Clone { - ParametersView::iter(self.0) + parameters::iter(self.0) } pub fn get(&'a self, k: &str) -> Option<&'a str> { - ParametersView::get(self.0, k) + parameters::get(self.0, k) } pub fn values(&'a self, k: &str) -> impl DoubleEndedIterator { - ParametersView::values(self.0, k) + parameters::values(self.0, k) } } @@ -381,7 +381,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - ParametersView::from_iter(ParametersView::sort(ParametersView::join( + parameters::from_iter(parameters::sort(parameters::join( self.0.config().iter(), iter.map(|(k, v)| (k.borrow(), v.borrow())), ))), @@ -400,7 +400,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - ParametersView::insert_sort(self.0.config().as_str(), k.borrow(), v.borrow()).0, + parameters::insert_sort(self.0.config().as_str(), k.borrow(), v.borrow()).0, )?; self.0.inner = ep.inner; @@ -415,7 +415,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - ParametersView::remove(self.0.config().as_str(), k.borrow()).0, + parameters::remove(self.0.config().as_str(), k.borrow()).0, )?; self.0.inner = ep.inner; @@ -577,8 +577,8 @@ impl TryFrom for EndPoint { (Some(midx), None) if midx > pidx && !s[midx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - ParametersView::from_iter_into( - ParametersView::sort(ParametersView::iter(&s[midx + 1..])), + parameters::from_iter_into( + parameters::sort(parameters::iter(&s[midx + 1..])), &mut inner, ); Ok(EndPoint { inner }) @@ -587,8 +587,8 @@ impl TryFrom for EndPoint { (None, Some(cidx)) if cidx > pidx && !s[cidx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..cidx + 1]); // Includes config separator - ParametersView::from_iter_into( - ParametersView::sort(ParametersView::iter(&s[cidx + 1..])), + parameters::from_iter_into( + parameters::sort(parameters::iter(&s[cidx + 1..])), &mut inner, ); Ok(EndPoint { inner }) @@ -603,14 +603,14 @@ impl TryFrom for EndPoint { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - ParametersView::from_iter_into( - ParametersView::sort(ParametersView::iter(&s[midx + 1..cidx])), + parameters::from_iter_into( + parameters::sort(parameters::iter(&s[midx + 1..cidx])), &mut inner, ); inner.push(CONFIG_SEPARATOR); - ParametersView::from_iter_into( - ParametersView::sort(ParametersView::iter(&s[cidx + 1..])), + parameters::from_iter_into( + parameters::sort(parameters::iter(&s[cidx + 1..])), &mut inner, ); @@ -655,11 +655,11 @@ impl EndPoint { if rng.gen_bool(0.5) { endpoint.push(METADATA_SEPARATOR); - ParametersView::rand(&mut endpoint); + parameters::rand(&mut endpoint); } if rng.gen_bool(0.5) { endpoint.push(CONFIG_SEPARATOR); - ParametersView::rand(&mut endpoint); + parameters::rand(&mut endpoint); } endpoint.parse().unwrap() diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index e6c2d0eb7f..2daa7a3b49 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -53,12 +53,8 @@ pub use endpoint::*; pub mod resolution; pub use resolution::*; -pub mod parameters_view; -pub use parameters_view::*; - -pub mod properties; -pub use properties as parameters; -pub use properties::*; +pub mod parameters; +pub use parameters::*; /// The global unique id of a zenoh peer. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/parameters/mod.rs similarity index 94% rename from commons/zenoh-protocol/src/core/properties.rs rename to commons/zenoh-protocol/src/core/parameters/mod.rs index b5cfc92e05..e18b5e0157 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/parameters/mod.rs @@ -19,7 +19,10 @@ use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] use std::collections::HashMap; -use super::parameters_view::{ParametersView, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; +// This module contains utility functions for parsing and manipulating &str as set of key=value pairs +#[allow(clippy::module_inception)] +mod parameters; +pub use parameters::*; /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties @@ -71,7 +74,7 @@ impl<'s> Parameters<'s> { where K: Borrow, { - ParametersView::get(self.as_str(), k.borrow()).is_some() + parameters::get(self.as_str(), k.borrow()).is_some() } /// Returns a reference to the `&str`-value corresponding to the key. @@ -79,7 +82,7 @@ impl<'s> Parameters<'s> { where K: Borrow, { - ParametersView::get(self.as_str(), k.borrow()) + parameters::get(self.as_str(), k.borrow()) } /// Returns an iterator to the `&str`-values corresponding to the key. @@ -87,12 +90,12 @@ impl<'s> Parameters<'s> { where K: Borrow, { - ParametersView::values(self.as_str(), k.borrow()) + parameters::values(self.as_str(), k.borrow()) } /// Returns an iterator on the key-value pairs as `(&str, &str)`. pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { - ParametersView::iter(self.as_str()) + parameters::iter(self.as_str()) } /// Inserts a key-value pair into the map. @@ -103,7 +106,7 @@ impl<'s> Parameters<'s> { K: Borrow, V: Borrow, { - let (inner, item) = ParametersView::insert(self.as_str(), k.borrow(), v.borrow()); + let (inner, item) = parameters::insert(self.as_str(), k.borrow(), v.borrow()); let item = item.map(|i| i.to_string()); self.0 = Cow::Owned(inner); item @@ -114,7 +117,7 @@ impl<'s> Parameters<'s> { where K: Borrow, { - let (inner, item) = ParametersView::remove(self.as_str(), k.borrow()); + let (inner, item) = parameters::remove(self.as_str(), k.borrow()); let item = item.map(|i| i.to_string()); self.0 = Cow::Owned(inner); item @@ -132,7 +135,7 @@ impl<'s> Parameters<'s> { K: Borrow + 'e + ?Sized, V: Borrow + 'e + ?Sized, { - let inner = ParametersView::from_iter(ParametersView::join( + let inner = parameters::from_iter(parameters::join( self.iter(), iter.map(|(k, v)| (k.borrow(), v.borrow())), )); @@ -146,7 +149,7 @@ impl<'s> Parameters<'s> { /// Returns `true`` if all keys are sorted in alphabetical order. pub fn is_ordered(&self) -> bool { - ParametersView::is_ordered(self.as_str()) + parameters::is_ordered(self.as_str()) } } @@ -197,7 +200,7 @@ where { fn from_iter>(iter: T) -> Self { let iter = iter.into_iter(); - let inner = ParametersView::from_iter(iter.map(|(k, v)| (k.borrow(), v.borrow()))); + let inner = parameters::from_iter(iter.map(|(k, v)| (k.borrow(), v.borrow()))); Self(Cow::Owned(inner)) } } @@ -360,7 +363,7 @@ impl<'s> OrderedProperties<'s> { fn order(&mut self) { if !self.0.is_ordered() { - self.0 = Parameters(Cow::Owned(ParametersView::from_iter(ParametersView::sort( + self.0 = Parameters(Cow::Owned(parameters::from_iter(parameters::sort( self.iter(), )))); } diff --git a/commons/zenoh-protocol/src/core/parameters/parameters.rs b/commons/zenoh-protocol/src/core/parameters/parameters.rs new file mode 100644 index 0000000000..4f1c1f5eb4 --- /dev/null +++ b/commons/zenoh-protocol/src/core/parameters/parameters.rs @@ -0,0 +1,209 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +/// Module provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. +/// +/// `;` is the separator between the key-value `(&str, &str)` elements. +/// +/// `=` is the separator between the `&str`-key and `&str`-value +/// +/// `|` is the separator between multiple elements of the values. + +pub(super) const LIST_SEPARATOR: char = ';'; +pub(super) const FIELD_SEPARATOR: char = '='; +pub(super) const VALUE_SEPARATOR: char = '|'; + +use alloc::{string::String, vec::Vec}; + +fn split_once(s: &str, c: char) -> (&str, &str) { + match s.find(c) { + Some(index) => { + let (l, r) = s.split_at(index); + (l, &r[1..]) + } + None => (s, ""), + } +} + +/// Returns an iterator of key-value `(&str, &str)` pairs according to the parameters format. +pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { + s.split(LIST_SEPARATOR) + .filter(|p| !p.is_empty()) + .map(|p| split_once(p, FIELD_SEPARATOR)) +} + +/// Same as [`Self::from_iter_into`] but keys are sorted in alphabetical order. +pub fn sort<'s, I>(iter: I) -> impl Iterator +where + I: Iterator, +{ + let mut from = iter.collect::>(); + from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); + from.into_iter() +} + +/// Joins two key-value `(&str, &str)` iterators removing from `current` any element whose key is present in `new`. +pub fn join<'s, C, N>(current: C, new: N) -> impl Iterator + Clone +where + C: Iterator + Clone, + N: Iterator + Clone + 's, +{ + let n = new.clone(); + let current = current + .clone() + .filter(move |(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); + current.chain(new) +} + +/// Builds a string from an iterator preserving the order. +#[allow(clippy::should_implement_trait)] +pub fn from_iter<'s, I>(iter: I) -> String +where + I: Iterator, +{ + let mut into = String::new(); + from_iter_into(iter, &mut into); + into +} + +/// Same as [`Self::from_iter`] but it writes into a user-provided string instead of allocating a new one. +pub fn from_iter_into<'s, I>(iter: I, into: &mut String) +where + I: Iterator, +{ + concat_into(iter, into); +} + +/// Get the a `&str`-value for a `&str`-key according to the parameters format. +pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { + iter(s) + .find(|(key, _)| *key == k) + .map(|(_, value)| value) +} + +/// Get the a `&str`-value iterator for a `&str`-key according to the parameters format. +pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { + match get(s, k) { + Some(v) => v.split(VALUE_SEPARATOR), + None => { + let mut i = "".split(VALUE_SEPARATOR); + i.next(); + i + } + } +} + +fn _insert<'s, I>( + i: I, + k: &'s str, + v: &'s str, +) -> (impl Iterator, Option<&'s str>) +where + I: Iterator + Clone, +{ + let mut iter = i.clone(); + let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); + + let current = i.filter(move |x| x.0 != k); + let new = Some((k, v)).into_iter(); + (current.chain(new), item) +} + +/// Insert a key-value `(&str, &str)` pair by appending it at the end of `s` preserving the insertion order. +pub fn insert<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { + let (iter, item) = _insert(iter(s), k, v); + (from_iter(iter), item) +} + +/// Same as [`Self::insert`] but keys are sorted in alphabetical order. +pub fn insert_sort<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { + let (iter, item) = _insert(iter(s), k, v); + (from_iter(sort(iter)), item) +} + +/// Remove a key-value `(&str, &str)` pair from `s` preserving the insertion order. +pub fn remove<'s>(s: &'s str, k: &str) -> (String, Option<&'s str>) { + let mut iter = iter(s); + let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); + let iter = iter.filter(|x| x.0 != k); + (concat(iter), item) +} + +/// Returns `true` if all keys are sorted in alphabetical order +pub fn is_ordered(s: &str) -> bool { + let mut prev = None; + for (k, _) in iter(s) { + match prev.take() { + Some(p) if k < p => return false, + _ => prev = Some(k), + } + } + true +} + +fn concat<'s, I>(iter: I) -> String +where + I: Iterator, +{ + let mut into = String::new(); + concat_into(iter, &mut into); + into +} + +fn concat_into<'s, I>(iter: I, into: &mut String) +where + I: Iterator, +{ + let mut first = true; + for (k, v) in iter.filter(|(k, _)| !k.is_empty()) { + if !first { + into.push(LIST_SEPARATOR); + } + into.push_str(k); + if !v.is_empty() { + into.push(FIELD_SEPARATOR); + into.push_str(v); + } + first = false; + } +} + +#[cfg(feature = "test")] +pub fn rand(into: &mut String) { + use rand::{ + distributions::{Alphanumeric, DistString}, + Rng, + }; + + const MIN: usize = 2; + const MAX: usize = 8; + + let mut rng = rand::thread_rng(); + + let num = rng.gen_range(MIN..MAX); + for i in 0..num { + if i != 0 { + into.push(LIST_SEPARATOR); + } + let len = rng.gen_range(MIN..MAX); + let key = Alphanumeric.sample_string(&mut rng, len); + into.push_str(key.as_str()); + + into.push(FIELD_SEPARATOR); + + let len = rng.gen_range(MIN..MAX); + let value = Alphanumeric.sample_string(&mut rng, len); + into.push_str(value.as_str()); + } +} diff --git a/commons/zenoh-protocol/src/core/parameters_view.rs b/commons/zenoh-protocol/src/core/parameters_view.rs deleted file mode 100644 index adcf0ea0fb..0000000000 --- a/commons/zenoh-protocol/src/core/parameters_view.rs +++ /dev/null @@ -1,211 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -pub(super) const LIST_SEPARATOR: char = ';'; -pub(super) const FIELD_SEPARATOR: char = '='; -pub(super) const VALUE_SEPARATOR: char = '|'; - -use alloc::{string::String, vec::Vec}; - -fn split_once(s: &str, c: char) -> (&str, &str) { - match s.find(c) { - Some(index) => { - let (l, r) = s.split_at(index); - (l, &r[1..]) - } - None => (s, ""), - } -} - -/// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. -/// -/// `;` is the separator between the key-value `(&str, &str)` elements. -/// -/// `=` is the separator between the `&str`-key and `&str`-value -/// -/// `|` is the separator between multiple elements of the values. -pub struct ParametersView; - -impl ParametersView { - /// Returns an iterator of key-value `(&str, &str)` pairs according to the parameters format. - pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { - s.split(LIST_SEPARATOR) - .filter(|p| !p.is_empty()) - .map(|p| split_once(p, FIELD_SEPARATOR)) - } - - /// Same as [`Self::from_iter_into`] but keys are sorted in alphabetical order. - pub fn sort<'s, I>(iter: I) -> impl Iterator - where - I: Iterator, - { - let mut from = iter.collect::>(); - from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); - from.into_iter() - } - - /// Joins two key-value `(&str, &str)` iterators removing from `current` any element whose key is present in `new`. - pub fn join<'s, C, N>(current: C, new: N) -> impl Iterator + Clone - where - C: Iterator + Clone, - N: Iterator + Clone + 's, - { - let n = new.clone(); - let current = current - .clone() - .filter(move |(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); - current.chain(new) - } - - /// Builds a string from an iterator preserving the order. - #[allow(clippy::should_implement_trait)] - pub fn from_iter<'s, I>(iter: I) -> String - where - I: Iterator, - { - let mut into = String::new(); - ParametersView::from_iter_into(iter, &mut into); - into - } - - /// Same as [`Self::from_iter`] but it writes into a user-provided string instead of allocating a new one. - pub fn from_iter_into<'s, I>(iter: I, into: &mut String) - where - I: Iterator, - { - ParametersView::concat_into(iter, into); - } - - /// Get the a `&str`-value for a `&str`-key according to the parameters format. - pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { - ParametersView::iter(s) - .find(|(key, _)| *key == k) - .map(|(_, value)| value) - } - - /// Get the a `&str`-value iterator for a `&str`-key according to the parameters format. - pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { - match ParametersView::get(s, k) { - Some(v) => v.split(VALUE_SEPARATOR), - None => { - let mut i = "".split(VALUE_SEPARATOR); - i.next(); - i - } - } - } - - fn _insert<'s, I>( - i: I, - k: &'s str, - v: &'s str, - ) -> (impl Iterator, Option<&'s str>) - where - I: Iterator + Clone, - { - let mut iter = i.clone(); - let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); - - let current = i.filter(move |x| x.0 != k); - let new = Some((k, v)).into_iter(); - (current.chain(new), item) - } - - /// Insert a key-value `(&str, &str)` pair by appending it at the end of `s` preserving the insertion order. - pub fn insert<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { - let (iter, item) = ParametersView::_insert(ParametersView::iter(s), k, v); - (ParametersView::from_iter(iter), item) - } - - /// Same as [`Self::insert`] but keys are sorted in alphabetical order. - pub fn insert_sort<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { - let (iter, item) = ParametersView::_insert(ParametersView::iter(s), k, v); - (ParametersView::from_iter(ParametersView::sort(iter)), item) - } - - /// Remove a key-value `(&str, &str)` pair from `s` preserving the insertion order. - pub fn remove<'s>(s: &'s str, k: &str) -> (String, Option<&'s str>) { - let mut iter = ParametersView::iter(s); - let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); - let iter = iter.filter(|x| x.0 != k); - (ParametersView::concat(iter), item) - } - - /// Returns `true` if all keys are sorted in alphabetical order - pub fn is_ordered(s: &str) -> bool { - let mut prev = None; - for (k, _) in ParametersView::iter(s) { - match prev.take() { - Some(p) if k < p => return false, - _ => prev = Some(k), - } - } - true - } - - fn concat<'s, I>(iter: I) -> String - where - I: Iterator, - { - let mut into = String::new(); - ParametersView::concat_into(iter, &mut into); - into - } - - fn concat_into<'s, I>(iter: I, into: &mut String) - where - I: Iterator, - { - let mut first = true; - for (k, v) in iter.filter(|(k, _)| !k.is_empty()) { - if !first { - into.push(LIST_SEPARATOR); - } - into.push_str(k); - if !v.is_empty() { - into.push(FIELD_SEPARATOR); - into.push_str(v); - } - first = false; - } - } - - #[cfg(feature = "test")] - pub fn rand(into: &mut String) { - use rand::{ - distributions::{Alphanumeric, DistString}, - Rng, - }; - - const MIN: usize = 2; - const MAX: usize = 8; - - let mut rng = rand::thread_rng(); - - let num = rng.gen_range(MIN..MAX); - for i in 0..num { - if i != 0 { - into.push(LIST_SEPARATOR); - } - let len = rng.gen_range(MIN..MAX); - let key = Alphanumeric.sample_string(&mut rng, len); - into.push_str(key.as_str()); - - into.push(FIELD_SEPARATOR); - - let len = rng.gen_range(MIN..MAX); - let value = Alphanumeric.sample_string(&mut rng, len); - into.push_str(value.as_str()); - } - } -} diff --git a/io/zenoh-links/zenoh-link-quic/src/utils.rs b/io/zenoh-links/zenoh-link-quic/src/utils.rs index 059734f9c9..f8c151cdd7 100644 --- a/io/zenoh-links/zenoh-link-quic/src/utils.rs +++ b/io/zenoh-links/zenoh-link-quic/src/utils.rs @@ -31,7 +31,7 @@ use zenoh_config::Config as ZenohConfig; use zenoh_link_commons::ConfigurationInspector; use zenoh_protocol::core::{ endpoint::{Address, Config}, - ParametersView, + parameters, }; use zenoh_result::{bail, zerror, ZError, ZResult}; @@ -140,7 +140,7 @@ impl ConfigurationInspector for TlsConfigurator { }; } - Ok(ParametersView::from_iter(ps.drain(..))) + Ok(parameters::from_iter(ps.drain(..))) } } diff --git a/io/zenoh-links/zenoh-link-tls/src/utils.rs b/io/zenoh-links/zenoh-link-tls/src/utils.rs index 1acaa05454..421c5817f2 100644 --- a/io/zenoh-links/zenoh-link-tls/src/utils.rs +++ b/io/zenoh-links/zenoh-link-tls/src/utils.rs @@ -33,7 +33,7 @@ use zenoh_config::Config as ZenohConfig; use zenoh_link_commons::{tls::WebPkiVerifierAnyServerName, ConfigurationInspector}; use zenoh_protocol::core::{ endpoint::{Address, Config}, - ParametersView, + parameters, }; use zenoh_result::{bail, zerror, ZError, ZResult}; @@ -142,7 +142,7 @@ impl ConfigurationInspector for TlsConfigurator { }; } - Ok(ParametersView::from_iter(ps.drain(..))) + Ok(parameters::from_iter(ps.drain(..))) } } diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs index 6d11878409..9266534f2b 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs @@ -24,7 +24,7 @@ pub use unicast::*; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::core::{Locator, ParametersView}; +use zenoh_protocol::core::{Locator, parameters}; use zenoh_result::ZResult; pub const UNIXPIPE_LOCATOR_PREFIX: &str = "unixpipe"; @@ -56,7 +56,7 @@ impl ConfigurationInspector for UnixPipeConfigurator { properties.push((config::FILE_ACCESS_MASK, &file_access_mask_)); } - let s = ParametersView::from_iter(properties.drain(..)); + let s = parameters::from_iter(properties.drain(..)); Ok(s) } diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index cab59dfb32..2552c6ec8b 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -22,7 +22,7 @@ use zenoh_config::{Config, LinkTxConf}; use zenoh_core::zasynclock; use zenoh_link::*; use zenoh_protocol::{ - core::{ParametersView, ZenohId}, + core::{parameters, ZenohId}, transport::close, }; use zenoh_result::{bail, zerror, ZResult}; @@ -258,7 +258,7 @@ impl TransportManager { if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { endpoint .config_mut() - .extend_from_iter(ParametersView::iter(config))?; + .extend_from_iter(parameters::iter(config))?; } // Open the link diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index a3bc1a56a8..933e415367 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -30,7 +30,7 @@ use zenoh_core::{zasynclock, zcondfeat}; use zenoh_crypto::PseudoRng; use zenoh_link::*; use zenoh_protocol::{ - core::{ParametersView, ZenohId}, + core::{parameters, ZenohId}, transport::{close, TransportSn}, }; use zenoh_result::{bail, zerror, ZResult}; @@ -387,7 +387,7 @@ impl TransportManager { if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { endpoint .config_mut() - .extend_from_iter(ParametersView::iter(config))?; + .extend_from_iter(parameters::iter(config))?; }; manager.new_listener(endpoint).await } @@ -698,7 +698,7 @@ impl TransportManager { if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { endpoint .config_mut() - .extend_from_iter(ParametersView::iter(config))?; + .extend_from_iter(parameters::iter(config))?; }; // Create a new link associated by calling the Link Manager From 10158d67e5939121ea806bed77244e8b00673cfa Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 12 Jun 2024 16:02:47 +0200 Subject: [PATCH 419/598] more renamings --- .../zenoh-protocol/src/core/parameters/mod.rs | 518 +---------------- .../src/core/parameters/properties.rs | 528 ++++++++++++++++++ 2 files changed, 532 insertions(+), 514 deletions(-) create mode 100644 commons/zenoh-protocol/src/core/parameters/properties.rs diff --git a/commons/zenoh-protocol/src/core/parameters/mod.rs b/commons/zenoh-protocol/src/core/parameters/mod.rs index e18b5e0157..eda078f39d 100644 --- a/commons/zenoh-protocol/src/core/parameters/mod.rs +++ b/commons/zenoh-protocol/src/core/parameters/mod.rs @@ -11,522 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use alloc::{ - borrow::Cow, - string::{String, ToString}, -}; -use core::{borrow::Borrow, fmt}; -#[cfg(feature = "std")] -use std::collections::HashMap; -// This module contains utility functions for parsing and manipulating &str as set of key=value pairs +// This module contains utility functions for parsing and manipulating &str as key-value pairs #[allow(clippy::module_inception)] mod parameters; pub use parameters::*; -/// A map of key/value (String,String) properties. -/// It can be parsed from a String, using `;` or `` as separator between each properties -/// and `=` as separator between a key and its value. Keys and values are trimed. -/// -/// Example: -/// ``` -/// use zenoh_protocol::core::Properties; -/// -/// let a = "a=1;b=2;c=3|4|5;d=6"; -/// let p = Properties::from(a); -/// -/// // Retrieve values -/// assert!(!p.is_empty()); -/// assert_eq!(p.get("a").unwrap(), "1"); -/// assert_eq!(p.get("b").unwrap(), "2"); -/// assert_eq!(p.get("c").unwrap(), "3|4|5"); -/// assert_eq!(p.get("d").unwrap(), "6"); -/// assert_eq!(p.values("c").collect::>(), vec!["3", "4", "5"]); -/// -/// // Iterate over properties -/// let mut iter = p.iter(); -/// assert_eq!(iter.next().unwrap(), ("a", "1")); -/// assert_eq!(iter.next().unwrap(), ("b", "2")); -/// assert_eq!(iter.next().unwrap(), ("c", "3|4|5")); -/// assert_eq!(iter.next().unwrap(), ("d", "6")); -/// assert!(iter.next().is_none()); -/// -/// // Create properties from iterators -/// let pi = Properties::from_iter(vec![("a", "1"), ("b", "2"), ("c", "3|4|5"), ("d", "6")]); -/// assert_eq!(p, pi); -/// ``` -#[derive(Clone, PartialEq, Eq, Hash, Default)] -pub struct Parameters<'s>(Cow<'s, str>); - -impl<'s> Parameters<'s> { - /// Returns `true` if properties does not contain anything. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Returns properties as [`str`]. - pub fn as_str(&'s self) -> &'s str { - &self.0 - } - - /// Returns `true` if properties contains the specified key. - pub fn contains_key(&self, k: K) -> bool - where - K: Borrow, - { - parameters::get(self.as_str(), k.borrow()).is_some() - } - - /// Returns a reference to the `&str`-value corresponding to the key. - pub fn get(&'s self, k: K) -> Option<&'s str> - where - K: Borrow, - { - parameters::get(self.as_str(), k.borrow()) - } - - /// Returns an iterator to the `&str`-values corresponding to the key. - pub fn values(&'s self, k: K) -> impl DoubleEndedIterator - where - K: Borrow, - { - parameters::values(self.as_str(), k.borrow()) - } - - /// Returns an iterator on the key-value pairs as `(&str, &str)`. - pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { - parameters::iter(self.as_str()) - } - - /// Inserts a key-value pair into the map. - /// If the map did not have this key present, [`None`]` is returned. - /// If the map did have this key present, the value is updated, and the old value is returned. - pub fn insert(&mut self, k: K, v: V) -> Option - where - K: Borrow, - V: Borrow, - { - let (inner, item) = parameters::insert(self.as_str(), k.borrow(), v.borrow()); - let item = item.map(|i| i.to_string()); - self.0 = Cow::Owned(inner); - item - } - - /// Removes a key from the map, returning the value at the key if the key was previously in the properties. - pub fn remove(&mut self, k: K) -> Option - where - K: Borrow, - { - let (inner, item) = parameters::remove(self.as_str(), k.borrow()); - let item = item.map(|i| i.to_string()); - self.0 = Cow::Owned(inner); - item - } - - /// Extend these properties with other properties. - pub fn extend(&mut self, other: &Parameters) { - self.extend_from_iter(other.iter()); - } - - /// Extend these properties from an iterator. - pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) - where - I: Iterator + Clone, - K: Borrow + 'e + ?Sized, - V: Borrow + 'e + ?Sized, - { - let inner = parameters::from_iter(parameters::join( - self.iter(), - iter.map(|(k, v)| (k.borrow(), v.borrow())), - )); - self.0 = Cow::Owned(inner); - } - - /// Convert these properties into owned properties. - pub fn into_owned(self) -> Parameters<'static> { - Parameters(Cow::Owned(self.0.into_owned())) - } - - /// Returns `true`` if all keys are sorted in alphabetical order. - pub fn is_ordered(&self) -> bool { - parameters::is_ordered(self.as_str()) - } -} - -impl<'s> From<&'s str> for Parameters<'s> { - fn from(mut value: &'s str) -> Self { - value = value.trim_end_matches(|c| { - c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR - }); - Self(Cow::Borrowed(value)) - } -} - -impl From for Parameters<'_> { - fn from(mut value: String) -> Self { - let s = value.trim_end_matches(|c| { - c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR - }); - value.truncate(s.len()); - Self(Cow::Owned(value)) - } -} - -impl<'s> From> for Parameters<'s> { - fn from(value: Cow<'s, str>) -> Self { - match value { - Cow::Borrowed(s) => Parameters::from(s), - Cow::Owned(s) => Parameters::from(s), - } - } -} - -impl<'a> From> for Cow<'_, Parameters<'a>> { - fn from(props: Parameters<'a>) -> Self { - Cow::Owned(props) - } -} - -impl<'a> From<&'a Parameters<'a>> for Cow<'a, Parameters<'a>> { - fn from(props: &'a Parameters<'a>) -> Self { - Cow::Borrowed(props) - } -} - -impl<'s, K, V> FromIterator<(&'s K, &'s V)> for Parameters<'_> -where - K: Borrow + 's + ?Sized, - V: Borrow + 's + ?Sized, -{ - fn from_iter>(iter: T) -> Self { - let iter = iter.into_iter(); - let inner = parameters::from_iter(iter.map(|(k, v)| (k.borrow(), v.borrow()))); - Self(Cow::Owned(inner)) - } -} - -impl<'s, K, V> FromIterator<&'s (K, V)> for Parameters<'_> -where - K: Borrow + 's, - V: Borrow + 's, -{ - fn from_iter>(iter: T) -> Self { - Self::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))) - } -} - -impl<'s, K, V> From<&'s [(K, V)]> for Parameters<'_> -where - K: Borrow + 's, - V: Borrow + 's, -{ - fn from(value: &'s [(K, V)]) -> Self { - Self::from_iter(value.iter()) - } -} - -#[cfg(feature = "std")] -impl From> for Parameters<'_> -where - K: Borrow, - V: Borrow, -{ - fn from(map: HashMap) -> Self { - Self::from_iter(map.iter()) - } -} - -#[cfg(feature = "std")] -impl<'s> From<&'s Parameters<'s>> for HashMap<&'s str, &'s str> { - fn from(props: &'s Parameters<'s>) -> Self { - HashMap::from_iter(props.iter()) - } -} - -#[cfg(feature = "std")] -impl From<&Parameters<'_>> for HashMap { - fn from(props: &Parameters<'_>) -> Self { - HashMap::from_iter(props.iter().map(|(k, v)| (k.to_string(), v.to_string()))) - } -} - -#[cfg(feature = "std")] -impl<'s> From<&'s Parameters<'s>> for HashMap, Cow<'s, str>> { - fn from(props: &'s Parameters<'s>) -> Self { - HashMap::from_iter(props.iter().map(|(k, v)| (Cow::from(k), Cow::from(v)))) - } -} - -#[cfg(feature = "std")] -impl From> for HashMap { - fn from(props: Parameters) -> Self { - HashMap::from(&props) - } -} - -impl fmt::Display for Parameters<'_> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -impl fmt::Debug for Parameters<'_> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self) - } -} - -#[derive(Clone, PartialEq, Eq, Hash, Default)] -pub struct OrderedProperties<'s>(Parameters<'s>); - -impl<'s> OrderedProperties<'s> { - /// Returns `true` if properties does not contain anything. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Returns properties as [`str`]. - pub fn as_str(&'s self) -> &'s str { - self.0.as_str() - } - - /// Returns `true` if properties contains the specified key. - pub fn contains_key(&self, k: K) -> bool - where - K: Borrow, - { - self.0.contains_key(k) - } - - /// Returns a reference to the `&str`-value corresponding to the key. - pub fn get(&'s self, k: K) -> Option<&'s str> - where - K: Borrow, - { - self.0.get(k) - } - - /// Returns an iterator to the `&str`-values corresponding to the key. - pub fn values(&'s self, k: K) -> impl DoubleEndedIterator - where - K: Borrow, - { - self.0.values(k) - } - - /// Returns an iterator on the key-value pairs as `(&str, &str)`. - pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { - self.0.iter() - } - - /// Removes a key from the map, returning the value at the key if the key was previously in the properties. - pub fn remove(&mut self, k: K) -> Option - where - K: Borrow, - { - self.0.remove(k) - } - - /// Inserts a key-value pair into the map. - /// If the map did not have this key present, [`None`]` is returned. - /// If the map did have this key present, the value is updated, and the old value is returned. - pub fn insert(&mut self, k: K, v: V) -> Option - where - K: Borrow, - V: Borrow, - { - let item = self.0.insert(k, v); - self.order(); - item - } - - /// Extend these properties with other properties. - pub fn extend(&mut self, other: &Parameters) { - self.extend_from_iter(other.iter()); - } - - /// Extend these properties from an iterator. - pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) - where - I: Iterator + Clone, - K: Borrow + 'e + ?Sized, - V: Borrow + 'e + ?Sized, - { - self.0.extend_from_iter(iter); - self.order(); - } - - /// Convert these properties into owned properties. - pub fn into_owned(self) -> OrderedProperties<'static> { - OrderedProperties(self.0.into_owned()) - } - - fn order(&mut self) { - if !self.0.is_ordered() { - self.0 = Parameters(Cow::Owned(parameters::from_iter(parameters::sort( - self.iter(), - )))); - } - } -} - -impl<'s> From> for OrderedProperties<'s> { - fn from(value: Parameters<'s>) -> Self { - let mut props = Self(value); - props.order(); - props - } -} - -impl<'s> From<&'s str> for OrderedProperties<'s> { - fn from(value: &'s str) -> Self { - Self::from(Parameters::from(value)) - } -} - -impl From for OrderedProperties<'_> { - fn from(value: String) -> Self { - Self::from(Parameters::from(value)) - } -} - -impl<'s> From> for OrderedProperties<'s> { - fn from(value: Cow<'s, str>) -> Self { - Self::from(Parameters::from(value)) - } -} - -impl<'s, K, V> FromIterator<(&'s K, &'s V)> for OrderedProperties<'_> -where - K: Borrow + 's + ?Sized, - V: Borrow + 's + ?Sized, -{ - fn from_iter>(iter: T) -> Self { - Self::from(Parameters::from_iter(iter)) - } -} - -impl<'s, K, V> FromIterator<&'s (K, V)> for OrderedProperties<'_> -where - K: Borrow + 's, - V: Borrow + 's, -{ - fn from_iter>(iter: T) -> Self { - Self::from(Parameters::from_iter(iter)) - } -} - -impl<'s, K, V> From<&'s [(K, V)]> for OrderedProperties<'_> -where - K: Borrow + 's, - V: Borrow + 's, -{ - fn from(value: &'s [(K, V)]) -> Self { - Self::from_iter(value.iter()) - } -} - -#[cfg(feature = "std")] -impl From> for OrderedProperties<'_> -where - K: Borrow, - V: Borrow, -{ - fn from(map: HashMap) -> Self { - Self::from_iter(map.iter()) - } -} - -#[cfg(feature = "std")] -impl<'s> From<&'s OrderedProperties<'s>> for HashMap<&'s str, &'s str> { - fn from(props: &'s OrderedProperties<'s>) -> Self { - HashMap::from(&props.0) - } -} - -#[cfg(feature = "std")] -impl From<&OrderedProperties<'_>> for HashMap { - fn from(props: &OrderedProperties<'_>) -> Self { - HashMap::from(&props.0) - } -} - -#[cfg(feature = "std")] -impl<'s> From<&'s OrderedProperties<'s>> for HashMap, Cow<'s, str>> { - fn from(props: &'s OrderedProperties<'s>) -> Self { - HashMap::from(&props.0) - } -} - -#[cfg(feature = "std")] -impl From> for HashMap { - fn from(props: OrderedProperties) -> Self { - HashMap::from(&props) - } -} - -impl fmt::Display for OrderedProperties<'_> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -impl fmt::Debug for OrderedProperties<'_> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_properties() { - assert!(Parameters::from("").0.is_empty()); - - assert_eq!(Parameters::from("p1"), Parameters::from(&[("p1", "")][..])); - - assert_eq!( - Parameters::from("p1=v1"), - Parameters::from(&[("p1", "v1")][..]) - ); - - assert_eq!( - Parameters::from("p1=v1;p2=v2;"), - Parameters::from(&[("p1", "v1"), ("p2", "v2")][..]) - ); - - assert_eq!( - Parameters::from("p1=v1;p2=v2;|="), - Parameters::from(&[("p1", "v1"), ("p2", "v2")][..]) - ); - - assert_eq!( - Parameters::from("p1=v1;p2;p3=v3"), - Parameters::from(&[("p1", "v1"), ("p2", ""), ("p3", "v3")][..]) - ); - - assert_eq!( - Parameters::from("p1=v 1;p 2=v2"), - Parameters::from(&[("p1", "v 1"), ("p 2", "v2")][..]) - ); - - assert_eq!( - Parameters::from("p1=x=y;p2=a==b"), - Parameters::from(&[("p1", "x=y"), ("p2", "a==b")][..]) - ); - - let mut hm: HashMap = HashMap::new(); - hm.insert("p1".to_string(), "v1".to_string()); - assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); - - let mut hm: HashMap<&str, &str> = HashMap::new(); - hm.insert("p1", "v1"); - assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); - - let mut hm: HashMap, Cow> = HashMap::new(); - hm.insert(Cow::from("p1"), Cow::from("v1")); - assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); - } -} +// This module contains the `Parameters` struct which provides a HashMap-like interface for string with the key-value pairs +mod properties; +pub use properties::Parameters; diff --git a/commons/zenoh-protocol/src/core/parameters/properties.rs b/commons/zenoh-protocol/src/core/parameters/properties.rs new file mode 100644 index 0000000000..95a2907b2d --- /dev/null +++ b/commons/zenoh-protocol/src/core/parameters/properties.rs @@ -0,0 +1,528 @@ +// +// Copyright (c) 2022 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use alloc::{ + borrow::Cow, + string::{String, ToString}, +}; +use core::{borrow::Borrow, fmt}; +#[cfg(feature = "std")] +use std::collections::HashMap; +use super::{parameters as parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; + +/// A map of key/value (String,String) properties. +/// It can be parsed from a String, using `;` or `` as separator between each properties +/// and `=` as separator between a key and its value. Keys and values are trimed. +/// +/// Example: +/// ``` +/// use zenoh_protocol::core::Properties; +/// +/// let a = "a=1;b=2;c=3|4|5;d=6"; +/// let p = Properties::from(a); +/// +/// // Retrieve values +/// assert!(!p.is_empty()); +/// assert_eq!(p.get("a").unwrap(), "1"); +/// assert_eq!(p.get("b").unwrap(), "2"); +/// assert_eq!(p.get("c").unwrap(), "3|4|5"); +/// assert_eq!(p.get("d").unwrap(), "6"); +/// assert_eq!(p.values("c").collect::>(), vec!["3", "4", "5"]); +/// +/// // Iterate over properties +/// let mut iter = p.iter(); +/// assert_eq!(iter.next().unwrap(), ("a", "1")); +/// assert_eq!(iter.next().unwrap(), ("b", "2")); +/// assert_eq!(iter.next().unwrap(), ("c", "3|4|5")); +/// assert_eq!(iter.next().unwrap(), ("d", "6")); +/// assert!(iter.next().is_none()); +/// +/// // Create properties from iterators +/// let pi = Properties::from_iter(vec![("a", "1"), ("b", "2"), ("c", "3|4|5"), ("d", "6")]); +/// assert_eq!(p, pi); +/// ``` +#[derive(Clone, PartialEq, Eq, Hash, Default)] +pub struct Parameters<'s>(Cow<'s, str>); + +impl<'s> Parameters<'s> { + /// Returns `true` if properties does not contain anything. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns properties as [`str`]. + pub fn as_str(&'s self) -> &'s str { + &self.0 + } + + /// Returns `true` if properties contains the specified key. + pub fn contains_key(&self, k: K) -> bool + where + K: Borrow, + { + parameters::get(self.as_str(), k.borrow()).is_some() + } + + /// Returns a reference to the `&str`-value corresponding to the key. + pub fn get(&'s self, k: K) -> Option<&'s str> + where + K: Borrow, + { + parameters::get(self.as_str(), k.borrow()) + } + + /// Returns an iterator to the `&str`-values corresponding to the key. + pub fn values(&'s self, k: K) -> impl DoubleEndedIterator + where + K: Borrow, + { + parameters::values(self.as_str(), k.borrow()) + } + + /// Returns an iterator on the key-value pairs as `(&str, &str)`. + pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { + parameters::iter(self.as_str()) + } + + /// Inserts a key-value pair into the map. + /// If the map did not have this key present, [`None`]` is returned. + /// If the map did have this key present, the value is updated, and the old value is returned. + pub fn insert(&mut self, k: K, v: V) -> Option + where + K: Borrow, + V: Borrow, + { + let (inner, item) = parameters::insert(self.as_str(), k.borrow(), v.borrow()); + let item = item.map(|i| i.to_string()); + self.0 = Cow::Owned(inner); + item + } + + /// Removes a key from the map, returning the value at the key if the key was previously in the properties. + pub fn remove(&mut self, k: K) -> Option + where + K: Borrow, + { + let (inner, item) = parameters::remove(self.as_str(), k.borrow()); + let item = item.map(|i| i.to_string()); + self.0 = Cow::Owned(inner); + item + } + + /// Extend these properties with other properties. + pub fn extend(&mut self, other: &Parameters) { + self.extend_from_iter(other.iter()); + } + + /// Extend these properties from an iterator. + pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) + where + I: Iterator + Clone, + K: Borrow + 'e + ?Sized, + V: Borrow + 'e + ?Sized, + { + let inner = parameters::from_iter(parameters::join( + self.iter(), + iter.map(|(k, v)| (k.borrow(), v.borrow())), + )); + self.0 = Cow::Owned(inner); + } + + /// Convert these properties into owned properties. + pub fn into_owned(self) -> Parameters<'static> { + Parameters(Cow::Owned(self.0.into_owned())) + } + + /// Returns `true`` if all keys are sorted in alphabetical order. + pub fn is_ordered(&self) -> bool { + parameters::is_ordered(self.as_str()) + } +} + +impl<'s> From<&'s str> for Parameters<'s> { + fn from(mut value: &'s str) -> Self { + value = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); + Self(Cow::Borrowed(value)) + } +} + +impl From for Parameters<'_> { + fn from(mut value: String) -> Self { + let s = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); + value.truncate(s.len()); + Self(Cow::Owned(value)) + } +} + +impl<'s> From> for Parameters<'s> { + fn from(value: Cow<'s, str>) -> Self { + match value { + Cow::Borrowed(s) => Parameters::from(s), + Cow::Owned(s) => Parameters::from(s), + } + } +} + +impl<'a> From> for Cow<'_, Parameters<'a>> { + fn from(props: Parameters<'a>) -> Self { + Cow::Owned(props) + } +} + +impl<'a> From<&'a Parameters<'a>> for Cow<'a, Parameters<'a>> { + fn from(props: &'a Parameters<'a>) -> Self { + Cow::Borrowed(props) + } +} + +impl<'s, K, V> FromIterator<(&'s K, &'s V)> for Parameters<'_> +where + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, +{ + fn from_iter>(iter: T) -> Self { + let iter = iter.into_iter(); + let inner = parameters::from_iter(iter.map(|(k, v)| (k.borrow(), v.borrow()))); + Self(Cow::Owned(inner)) + } +} + +impl<'s, K, V> FromIterator<&'s (K, V)> for Parameters<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from_iter>(iter: T) -> Self { + Self::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))) + } +} + +impl<'s, K, V> From<&'s [(K, V)]> for Parameters<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from(value: &'s [(K, V)]) -> Self { + Self::from_iter(value.iter()) + } +} + +#[cfg(feature = "std")] +impl From> for Parameters<'_> +where + K: Borrow, + V: Borrow, +{ + fn from(map: HashMap) -> Self { + Self::from_iter(map.iter()) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s Parameters<'s>> for HashMap<&'s str, &'s str> { + fn from(props: &'s Parameters<'s>) -> Self { + HashMap::from_iter(props.iter()) + } +} + +#[cfg(feature = "std")] +impl From<&Parameters<'_>> for HashMap { + fn from(props: &Parameters<'_>) -> Self { + HashMap::from_iter(props.iter().map(|(k, v)| (k.to_string(), v.to_string()))) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s Parameters<'s>> for HashMap, Cow<'s, str>> { + fn from(props: &'s Parameters<'s>) -> Self { + HashMap::from_iter(props.iter().map(|(k, v)| (Cow::from(k), Cow::from(v)))) + } +} + +#[cfg(feature = "std")] +impl From> for HashMap { + fn from(props: Parameters) -> Self { + HashMap::from(&props) + } +} + +impl fmt::Display for Parameters<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl fmt::Debug for Parameters<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self) + } +} + +#[derive(Clone, PartialEq, Eq, Hash, Default)] +pub struct OrderedProperties<'s>(Parameters<'s>); + +impl<'s> OrderedProperties<'s> { + /// Returns `true` if properties does not contain anything. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns properties as [`str`]. + pub fn as_str(&'s self) -> &'s str { + self.0.as_str() + } + + /// Returns `true` if properties contains the specified key. + pub fn contains_key(&self, k: K) -> bool + where + K: Borrow, + { + self.0.contains_key(k) + } + + /// Returns a reference to the `&str`-value corresponding to the key. + pub fn get(&'s self, k: K) -> Option<&'s str> + where + K: Borrow, + { + self.0.get(k) + } + + /// Returns an iterator to the `&str`-values corresponding to the key. + pub fn values(&'s self, k: K) -> impl DoubleEndedIterator + where + K: Borrow, + { + self.0.values(k) + } + + /// Returns an iterator on the key-value pairs as `(&str, &str)`. + pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { + self.0.iter() + } + + /// Removes a key from the map, returning the value at the key if the key was previously in the properties. + pub fn remove(&mut self, k: K) -> Option + where + K: Borrow, + { + self.0.remove(k) + } + + /// Inserts a key-value pair into the map. + /// If the map did not have this key present, [`None`]` is returned. + /// If the map did have this key present, the value is updated, and the old value is returned. + pub fn insert(&mut self, k: K, v: V) -> Option + where + K: Borrow, + V: Borrow, + { + let item = self.0.insert(k, v); + self.order(); + item + } + + /// Extend these properties with other properties. + pub fn extend(&mut self, other: &Parameters) { + self.extend_from_iter(other.iter()); + } + + /// Extend these properties from an iterator. + pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) + where + I: Iterator + Clone, + K: Borrow + 'e + ?Sized, + V: Borrow + 'e + ?Sized, + { + self.0.extend_from_iter(iter); + self.order(); + } + + /// Convert these properties into owned properties. + pub fn into_owned(self) -> OrderedProperties<'static> { + OrderedProperties(self.0.into_owned()) + } + + fn order(&mut self) { + if !self.0.is_ordered() { + self.0 = Parameters(Cow::Owned(parameters::from_iter(parameters::sort( + self.iter(), + )))); + } + } +} + +impl<'s> From> for OrderedProperties<'s> { + fn from(value: Parameters<'s>) -> Self { + let mut props = Self(value); + props.order(); + props + } +} + +impl<'s> From<&'s str> for OrderedProperties<'s> { + fn from(value: &'s str) -> Self { + Self::from(Parameters::from(value)) + } +} + +impl From for OrderedProperties<'_> { + fn from(value: String) -> Self { + Self::from(Parameters::from(value)) + } +} + +impl<'s> From> for OrderedProperties<'s> { + fn from(value: Cow<'s, str>) -> Self { + Self::from(Parameters::from(value)) + } +} + +impl<'s, K, V> FromIterator<(&'s K, &'s V)> for OrderedProperties<'_> +where + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, +{ + fn from_iter>(iter: T) -> Self { + Self::from(Parameters::from_iter(iter)) + } +} + +impl<'s, K, V> FromIterator<&'s (K, V)> for OrderedProperties<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from_iter>(iter: T) -> Self { + Self::from(Parameters::from_iter(iter)) + } +} + +impl<'s, K, V> From<&'s [(K, V)]> for OrderedProperties<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from(value: &'s [(K, V)]) -> Self { + Self::from_iter(value.iter()) + } +} + +#[cfg(feature = "std")] +impl From> for OrderedProperties<'_> +where + K: Borrow, + V: Borrow, +{ + fn from(map: HashMap) -> Self { + Self::from_iter(map.iter()) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s OrderedProperties<'s>> for HashMap<&'s str, &'s str> { + fn from(props: &'s OrderedProperties<'s>) -> Self { + HashMap::from(&props.0) + } +} + +#[cfg(feature = "std")] +impl From<&OrderedProperties<'_>> for HashMap { + fn from(props: &OrderedProperties<'_>) -> Self { + HashMap::from(&props.0) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s OrderedProperties<'s>> for HashMap, Cow<'s, str>> { + fn from(props: &'s OrderedProperties<'s>) -> Self { + HashMap::from(&props.0) + } +} + +#[cfg(feature = "std")] +impl From> for HashMap { + fn from(props: OrderedProperties) -> Self { + HashMap::from(&props) + } +} + +impl fmt::Display for OrderedProperties<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl fmt::Debug for OrderedProperties<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_properties() { + assert!(Parameters::from("").0.is_empty()); + + assert_eq!(Parameters::from("p1"), Parameters::from(&[("p1", "")][..])); + + assert_eq!( + Parameters::from("p1=v1"), + Parameters::from(&[("p1", "v1")][..]) + ); + + assert_eq!( + Parameters::from("p1=v1;p2=v2;"), + Parameters::from(&[("p1", "v1"), ("p2", "v2")][..]) + ); + + assert_eq!( + Parameters::from("p1=v1;p2=v2;|="), + Parameters::from(&[("p1", "v1"), ("p2", "v2")][..]) + ); + + assert_eq!( + Parameters::from("p1=v1;p2;p3=v3"), + Parameters::from(&[("p1", "v1"), ("p2", ""), ("p3", "v3")][..]) + ); + + assert_eq!( + Parameters::from("p1=v 1;p 2=v2"), + Parameters::from(&[("p1", "v 1"), ("p 2", "v2")][..]) + ); + + assert_eq!( + Parameters::from("p1=x=y;p2=a==b"), + Parameters::from(&[("p1", "x=y"), ("p2", "a==b")][..]) + ); + + let mut hm: HashMap = HashMap::new(); + hm.insert("p1".to_string(), "v1".to_string()); + assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); + + let mut hm: HashMap<&str, &str> = HashMap::new(); + hm.insert("p1", "v1"); + assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); + + let mut hm: HashMap, Cow> = HashMap::new(); + hm.insert(Cow::from("p1"), Cow::from("v1")); + assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); + } +} From c8537bc07d89b843b7b3550a356851d5264f6067 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 12 Jun 2024 16:05:22 +0200 Subject: [PATCH 420/598] rename wrapped zenoh-protocol structures to `...Proto`, wrap and expose `EntityGlobalId` (#1118) * zenohIdInner type added * EntityGlobalIdInner * HelloInner near Hello * zenohId in info * cargo fmt * shm fix * no-std fix * internal doc test fix * renamed Inner to Proto * wrong return type fix * wrappers in zenoh-config --- commons/zenoh-codec/src/core/timestamp.rs | 4 +- commons/zenoh-codec/src/core/zenohid.rs | 36 +++--- commons/zenoh-codec/src/network/mod.rs | 4 +- commons/zenoh-codec/src/scouting/hello.rs | 22 ++-- commons/zenoh-codec/src/scouting/scout.rs | 4 +- commons/zenoh-codec/src/transport/init.rs | 6 +- commons/zenoh-codec/src/transport/join.rs | 4 +- commons/zenoh-codec/src/zenoh/mod.rs | 6 +- commons/zenoh-codec/tests/codec.rs | 6 +- commons/zenoh-config/src/lib.rs | 42 +----- commons/zenoh-config/src/mode_dependent.rs | 2 +- commons/zenoh-config/src/wrappers.rs | 120 ++++++++++++++++++ commons/zenoh-protocol/src/core/mod.rs | 56 ++++---- commons/zenoh-protocol/src/network/mod.rs | 8 +- commons/zenoh-protocol/src/scouting/hello.rs | 10 +- commons/zenoh-protocol/src/scouting/mod.rs | 10 +- commons/zenoh-protocol/src/scouting/scout.rs | 6 +- commons/zenoh-protocol/src/transport/init.rs | 10 +- commons/zenoh-protocol/src/transport/join.rs | 6 +- commons/zenoh-protocol/src/zenoh/del.rs | 4 +- commons/zenoh-protocol/src/zenoh/mod.rs | 6 +- commons/zenoh-protocol/src/zenoh/put.rs | 4 +- examples/examples/z_info.rs | 2 +- io/zenoh-transport/src/lib.rs | 4 +- io/zenoh-transport/src/manager.rs | 16 +-- io/zenoh-transport/src/multicast/link.rs | 4 +- io/zenoh-transport/src/multicast/manager.rs | 4 +- io/zenoh-transport/src/multicast/transport.rs | 4 +- .../src/unicast/establishment/accept.rs | 14 +- .../src/unicast/establishment/cookie.rs | 8 +- .../src/unicast/establishment/mod.rs | 8 +- .../src/unicast/establishment/open.rs | 10 +- .../src/unicast/lowlatency/transport.rs | 4 +- io/zenoh-transport/src/unicast/manager.rs | 10 +- io/zenoh-transport/src/unicast/mod.rs | 6 +- .../src/unicast/transport_unicast_inner.rs | 4 +- .../src/unicast/universal/transport.rs | 4 +- io/zenoh-transport/tests/endpoints.rs | 4 +- .../tests/multicast_compression.rs | 7 +- .../tests/multicast_transport.rs | 7 +- .../tests/transport_whitelist.rs | 4 +- .../tests/unicast_authenticator.rs | 16 +-- .../tests/unicast_compression.rs | 7 +- .../tests/unicast_concurrent.rs | 6 +- .../tests/unicast_defragmentation.rs | 7 +- .../tests/unicast_intermittent.rs | 10 +- io/zenoh-transport/tests/unicast_multilink.rs | 8 +- io/zenoh-transport/tests/unicast_openclose.rs | 8 +- .../tests/unicast_priorities.rs | 6 +- io/zenoh-transport/tests/unicast_shm.rs | 8 +- .../tests/unicast_simultaneous.rs | 10 +- io/zenoh-transport/tests/unicast_time.rs | 6 +- io/zenoh-transport/tests/unicast_transport.rs | 7 +- .../src/replica/snapshotter.rs | 2 +- zenoh/src/api/info.rs | 2 +- zenoh/src/api/publisher.rs | 7 +- zenoh/src/api/query.rs | 6 +- zenoh/src/api/queryable.rs | 10 +- zenoh/src/api/sample.rs | 12 +- zenoh/src/api/scouting.rs | 33 +---- zenoh/src/api/session.rs | 8 +- zenoh/src/api/subscriber.rs | 6 +- zenoh/src/lib.rs | 7 +- zenoh/src/net/codec/linkstate.rs | 4 +- zenoh/src/net/protocol/linkstate.rs | 6 +- zenoh/src/net/routing/dispatcher/face.rs | 6 +- zenoh/src/net/routing/dispatcher/tables.rs | 8 +- .../src/net/routing/hat/linkstate_peer/mod.rs | 10 +- .../net/routing/hat/linkstate_peer/network.rs | 38 +++--- .../net/routing/hat/linkstate_peer/pubsub.rs | 22 ++-- .../net/routing/hat/linkstate_peer/queries.rs | 20 +-- zenoh/src/net/routing/hat/mod.rs | 8 +- zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 26 ++-- zenoh/src/net/routing/hat/router/mod.rs | 30 ++--- zenoh/src/net/routing/hat/router/network.rs | 44 ++++--- zenoh/src/net/routing/hat/router/pubsub.rs | 42 +++--- zenoh/src/net/routing/hat/router/queries.rs | 40 +++--- .../net/routing/interceptor/access_control.rs | 12 +- zenoh/src/net/routing/router.rs | 6 +- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/net/runtime/mod.rs | 2 +- zenoh/src/net/runtime/orchestrator.rs | 22 ++-- zenoh/src/net/tests/tables.rs | 13 +- 83 files changed, 575 insertions(+), 478 deletions(-) create mode 100644 commons/zenoh-config/src/wrappers.rs diff --git a/commons/zenoh-codec/src/core/timestamp.rs b/commons/zenoh-codec/src/core/timestamp.rs index 025f8f8bf5..3ec059ae1b 100644 --- a/commons/zenoh-codec/src/core/timestamp.rs +++ b/commons/zenoh-codec/src/core/timestamp.rs @@ -17,7 +17,7 @@ use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; -use zenoh_protocol::core::{Timestamp, ZenohId}; +use zenoh_protocol::core::{Timestamp, ZenohIdProto}; use crate::{LCodec, RCodec, WCodec, Zenoh080}; @@ -53,7 +53,7 @@ where if size > (uhlc::ID::MAX_SIZE) { return Err(DidntRead); } - let mut id = [0_u8; ZenohId::MAX_SIZE]; + let mut id = [0_u8; ZenohIdProto::MAX_SIZE]; reader.read_exact(&mut id[..size])?; let time = uhlc::NTP64(time); diff --git a/commons/zenoh-codec/src/core/zenohid.rs b/commons/zenoh-codec/src/core/zenohid.rs index 5098cad534..4ea06f4887 100644 --- a/commons/zenoh-codec/src/core/zenohid.rs +++ b/commons/zenoh-codec/src/core/zenohid.rs @@ -17,70 +17,70 @@ use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; -use zenoh_protocol::core::ZenohId; +use zenoh_protocol::core::ZenohIdProto; use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Length}; -impl LCodec<&ZenohId> for Zenoh080 { - fn w_len(self, x: &ZenohId) -> usize { +impl LCodec<&ZenohIdProto> for Zenoh080 { + fn w_len(self, x: &ZenohIdProto) -> usize { x.size() } } -impl WCodec<&ZenohId, &mut W> for Zenoh080 +impl WCodec<&ZenohIdProto, &mut W> for Zenoh080 where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: &ZenohId) -> Self::Output { + fn write(self, writer: &mut W, x: &ZenohIdProto) -> Self::Output { self.write(&mut *writer, &x.to_le_bytes()[..x.size()]) } } -impl RCodec for Zenoh080 +impl RCodec for Zenoh080 where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result { + fn read(self, reader: &mut R) -> Result { let size: usize = self.read(&mut *reader)?; - if size > ZenohId::MAX_SIZE { + if size > ZenohIdProto::MAX_SIZE { return Err(DidntRead); } - let mut id = [0; ZenohId::MAX_SIZE]; + let mut id = [0; ZenohIdProto::MAX_SIZE]; reader.read_exact(&mut id[..size])?; - ZenohId::try_from(&id[..size]).map_err(|_| DidntRead) + ZenohIdProto::try_from(&id[..size]).map_err(|_| DidntRead) } } -impl WCodec<&ZenohId, &mut W> for Zenoh080Length +impl WCodec<&ZenohIdProto, &mut W> for Zenoh080Length where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: &ZenohId) -> Self::Output { - if self.length > ZenohId::MAX_SIZE { + fn write(self, writer: &mut W, x: &ZenohIdProto) -> Self::Output { + if self.length > ZenohIdProto::MAX_SIZE { return Err(DidntWrite); } writer.write_exact(&x.to_le_bytes()[..x.size()]) } } -impl RCodec for Zenoh080Length +impl RCodec for Zenoh080Length where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result { - if self.length > ZenohId::MAX_SIZE { + fn read(self, reader: &mut R) -> Result { + if self.length > ZenohIdProto::MAX_SIZE { return Err(DidntRead); } - let mut id = [0; ZenohId::MAX_SIZE]; + let mut id = [0; ZenohIdProto::MAX_SIZE]; reader.read_exact(&mut id[..self.length])?; - ZenohId::try_from(&id[..self.length]).map_err(|_| DidntRead) + ZenohIdProto::try_from(&id[..self.length]).map_err(|_| DidntRead) } } diff --git a/commons/zenoh-codec/src/network/mod.rs b/commons/zenoh-codec/src/network/mod.rs index fe9d254ee8..c68a3470aa 100644 --- a/commons/zenoh-codec/src/network/mod.rs +++ b/commons/zenoh-codec/src/network/mod.rs @@ -24,7 +24,7 @@ use zenoh_buffers::{ }; use zenoh_protocol::{ common::{imsg, ZExtZ64, ZExtZBufHeader}, - core::{EntityId, Reliability, ZenohId}, + core::{EntityId, Reliability, ZenohIdProto}, network::{ext::EntityGlobalIdType, *}, }; @@ -265,7 +265,7 @@ where let length = 1 + ((flags >> 4) as usize); let lodec = Zenoh080Length::new(length); - let zid: ZenohId = lodec.read(&mut *reader)?; + let zid: ZenohIdProto = lodec.read(&mut *reader)?; let eid: EntityId = self.codec.read(&mut *reader)?; diff --git a/commons/zenoh-codec/src/scouting/hello.rs b/commons/zenoh-codec/src/scouting/hello.rs index c3aff83667..770519855b 100644 --- a/commons/zenoh-codec/src/scouting/hello.rs +++ b/commons/zenoh-codec/src/scouting/hello.rs @@ -19,23 +19,23 @@ use zenoh_buffers::{ }; use zenoh_protocol::{ common::{imsg, ZExtUnknown}, - core::{Locator, WhatAmI, ZenohId}, + core::{Locator, WhatAmI, ZenohIdProto}, scouting::{ - hello::{flag, Hello}, + hello::{flag, HelloProto}, id, }, }; use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; -impl WCodec<&Hello, &mut W> for Zenoh080 +impl WCodec<&HelloProto, &mut W> for Zenoh080 where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: &Hello) -> Self::Output { - let Hello { + fn write(self, writer: &mut W, x: &HelloProto) -> Self::Output { + let HelloProto { version, whatami, zid, @@ -73,26 +73,26 @@ where } } -impl RCodec for Zenoh080 +impl RCodec for Zenoh080 where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result { + fn read(self, reader: &mut R) -> Result { let header: u8 = self.read(&mut *reader)?; let codec = Zenoh080Header::new(header); codec.read(reader) } } -impl RCodec for Zenoh080Header +impl RCodec for Zenoh080Header where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result { + fn read(self, reader: &mut R) -> Result { if imsg::mid(self.header) != id::HELLO { return Err(DidntRead); } @@ -108,7 +108,7 @@ where }; let length = 1 + ((flags >> 4) as usize); let lodec = Zenoh080Length::new(length); - let zid: ZenohId = lodec.read(&mut *reader)?; + let zid: ZenohIdProto = lodec.read(&mut *reader)?; let locators = if imsg::has_flag(self.header, flag::L) { let locs: Vec = self.codec.read(&mut *reader)?; @@ -124,7 +124,7 @@ where has_extensions = more; } - Ok(Hello { + Ok(HelloProto { version, zid, whatami, diff --git a/commons/zenoh-codec/src/scouting/scout.rs b/commons/zenoh-codec/src/scouting/scout.rs index 888ce2954f..f4863e69b8 100644 --- a/commons/zenoh-codec/src/scouting/scout.rs +++ b/commons/zenoh-codec/src/scouting/scout.rs @@ -19,7 +19,7 @@ use zenoh_buffers::{ }; use zenoh_protocol::{ common::{imsg, ZExtUnknown}, - core::{whatami::WhatAmIMatcher, ZenohId}, + core::{whatami::WhatAmIMatcher, ZenohIdProto}, scouting::{ id, scout::{flag, Scout}, @@ -93,7 +93,7 @@ where let zid = if imsg::has_flag(flags, flag::I) { let length = 1 + ((flags >> 4) as usize); let lodec = Zenoh080Length::new(length); - let zid: ZenohId = lodec.read(&mut *reader)?; + let zid: ZenohIdProto = lodec.read(&mut *reader)?; Some(zid) } else { None diff --git a/commons/zenoh-codec/src/transport/init.rs b/commons/zenoh-codec/src/transport/init.rs index 55e129799c..c559fdbd51 100644 --- a/commons/zenoh-codec/src/transport/init.rs +++ b/commons/zenoh-codec/src/transport/init.rs @@ -18,7 +18,7 @@ use zenoh_buffers::{ }; use zenoh_protocol::{ common::{iext, imsg}, - core::{Resolution, WhatAmI, ZenohId}, + core::{Resolution, WhatAmI, ZenohIdProto}, transport::{ batch_size, id, init::{ext, flag, InitAck, InitSyn}, @@ -160,7 +160,7 @@ where }; let length = 1 + ((flags >> 4) as usize); let lodec = Zenoh080Length::new(length); - let zid: ZenohId = lodec.read(&mut *reader)?; + let zid: ZenohIdProto = lodec.read(&mut *reader)?; let mut resolution = Resolution::default(); let mut batch_size = batch_size::UNICAST.to_le_bytes(); @@ -373,7 +373,7 @@ where }; let length = 1 + ((flags >> 4) as usize); let lodec = Zenoh080Length::new(length); - let zid: ZenohId = lodec.read(&mut *reader)?; + let zid: ZenohIdProto = lodec.read(&mut *reader)?; let mut resolution = Resolution::default(); let mut batch_size = batch_size::UNICAST.to_le_bytes(); diff --git a/commons/zenoh-codec/src/transport/join.rs b/commons/zenoh-codec/src/transport/join.rs index 896d7f6290..3f70d2ec8b 100644 --- a/commons/zenoh-codec/src/transport/join.rs +++ b/commons/zenoh-codec/src/transport/join.rs @@ -20,7 +20,7 @@ use zenoh_buffers::{ }; use zenoh_protocol::{ common::{iext, imsg, ZExtZBufHeader}, - core::{Priority, Resolution, WhatAmI, ZenohId}, + core::{Priority, Resolution, WhatAmI, ZenohIdProto}, transport::{ batch_size, id, join::{ext, flag, Join}, @@ -242,7 +242,7 @@ where }; let length = 1 + ((flags >> 4) as usize); let lodec = Zenoh080Length::new(length); - let zid: ZenohId = lodec.read(&mut *reader)?; + let zid: ZenohIdProto = lodec.read(&mut *reader)?; let mut resolution = Resolution::default(); let mut batch_size = batch_size::MULTICAST.to_le_bytes(); diff --git a/commons/zenoh-codec/src/zenoh/mod.rs b/commons/zenoh-codec/src/zenoh/mod.rs index aeb8f53102..3c8170adea 100644 --- a/commons/zenoh-codec/src/zenoh/mod.rs +++ b/commons/zenoh-codec/src/zenoh/mod.rs @@ -26,7 +26,7 @@ use zenoh_buffers::{ use zenoh_protocol::common::{iext, ZExtUnit}; use zenoh_protocol::{ common::{imsg, ZExtZBufHeader}, - core::{Encoding, EntityGlobalId, EntityId, ZenohId}, + core::{Encoding, EntityGlobalIdProto, EntityId, ZenohIdProto}, zenoh::{ext, id, PushBody, RequestBody, ResponseBody}, }; @@ -186,14 +186,14 @@ where let length = 1 + ((flags >> 4) as usize); let lodec = Zenoh080Length::new(length); - let zid: ZenohId = lodec.read(&mut *reader)?; + let zid: ZenohIdProto = lodec.read(&mut *reader)?; let eid: EntityId = self.codec.read(&mut *reader)?; let sn: u32 = self.codec.read(&mut *reader)?; Ok(( ext::SourceInfoType { - id: EntityGlobalId { zid, eid }, + id: EntityGlobalIdProto { zid, eid }, sn, }, more, diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index c2cc71ea17..1e1bbe18a3 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -272,7 +272,7 @@ fn codec_string_bounded() { #[test] fn codec_zid() { - run!(ZenohId, ZenohId::default()); + run!(ZenohIdProto, ZenohIdProto::default()); } #[test] @@ -348,7 +348,7 @@ fn codec_locator() { fn codec_timestamp() { run!(Timestamp, { let time = uhlc::NTP64(thread_rng().gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); + let id = uhlc::ID::try_from(ZenohIdProto::rand().to_le_bytes()).unwrap(); Timestamp::new(time, id) }); } @@ -447,7 +447,7 @@ fn codec_scout() { #[test] fn codec_hello() { - run!(Hello, Hello::rand()); + run!(HelloProto, HelloProto::rand()); } #[test] diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 07112b2c5f..150487791c 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -15,6 +15,7 @@ //! Configuration to pass to `zenoh::open()` and `zenoh::scout()` functions and associated constants. pub mod defaults; mod include; +pub mod wrappers; #[allow(unused_imports)] use std::convert::TryFrom; // This is a false positive from the rust analyser @@ -25,7 +26,6 @@ use std::{ io::Read, net::SocketAddr, path::Path, - str::FromStr, sync::{Arc, Mutex, MutexGuard, Weak}, }; @@ -35,6 +35,7 @@ use serde::{Deserialize, Serialize}; use serde_json::{Map, Value}; use validated_struct::ValidatedMapAssociatedTypes; pub use validated_struct::{GetError, ValidatedMap}; +use wrappers::ZenohId; use zenoh_core::zlock; pub use zenoh_protocol::core::{ whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, @@ -52,45 +53,6 @@ pub use mode_dependent::*; pub mod connection_retry; pub use connection_retry::*; -/// The global unique id of a zenoh peer. -#[derive( - Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug, Default, -)] -#[repr(transparent)] -pub struct ZenohId(zenoh_protocol::core::ZenohId); - -impl fmt::Display for ZenohId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - -impl From for ZenohId { - fn from(id: zenoh_protocol::core::ZenohId) -> Self { - Self(id) - } -} - -impl From for zenoh_protocol::core::ZenohId { - fn from(id: ZenohId) -> Self { - id.0 - } -} - -impl From for uhlc::ID { - fn from(zid: ZenohId) -> Self { - zid.0.into() - } -} - -impl FromStr for ZenohId { - type Err = zenoh_result::Error; - - fn from_str(s: &str) -> Result { - zenoh_protocol::core::ZenohId::from_str(s).map(|zid| zid.into()) - } -} - // Wrappers for secrecy of values #[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] pub struct SecretString(String); diff --git a/commons/zenoh-config/src/mode_dependent.rs b/commons/zenoh-config/src/mode_dependent.rs index 074dd823d9..20bcb3481b 100644 --- a/commons/zenoh-config/src/mode_dependent.rs +++ b/commons/zenoh-config/src/mode_dependent.rs @@ -19,7 +19,7 @@ use serde::{ Deserialize, Serialize, }; pub use zenoh_protocol::core::{ - whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, + whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohIdProto, }; pub trait ModeDependent { diff --git a/commons/zenoh-config/src/wrappers.rs b/commons/zenoh-config/src/wrappers.rs new file mode 100644 index 0000000000..bf8570de35 --- /dev/null +++ b/commons/zenoh-config/src/wrappers.rs @@ -0,0 +1,120 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Wrappers around types reexported by `zenoh` from subcrates. +//! These wrappers are used to avoid exposing the the API necessary only for zenoh internals into the public API. + +use core::fmt; +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; +use zenoh_protocol::{ + core::{EntityGlobalIdProto, EntityId, Locator, WhatAmI, ZenohIdProto}, + scouting::HelloProto, +}; + +/// The global unique id of a zenoh peer. +#[derive( + Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug, Default, +)] +#[repr(transparent)] +pub struct ZenohId(ZenohIdProto); + +impl fmt::Display for ZenohId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl From for ZenohId { + fn from(id: ZenohIdProto) -> Self { + Self(id) + } +} + +impl From for ZenohIdProto { + fn from(id: ZenohId) -> Self { + id.0 + } +} + +impl From for uhlc::ID { + fn from(zid: ZenohId) -> Self { + zid.0.into() + } +} + +impl FromStr for ZenohId { + type Err = zenoh_result::Error; + + fn from_str(s: &str) -> Result { + ZenohIdProto::from_str(s).map(|zid| zid.into()) + } +} + +/// A zenoh Hello message. +pub struct Hello(HelloProto); + +impl Hello { + /// Get the locators of this Hello message. + pub fn locators(&self) -> &[Locator] { + &self.0.locators + } + + /// Get the zenoh id of this Hello message. + pub fn zid(&self) -> ZenohIdProto { + self.0.zid + } + + /// Get the whatami of this Hello message. + pub fn whatami(&self) -> WhatAmI { + self.0.whatami + } +} + +impl From for Hello { + fn from(inner: HelloProto) -> Self { + Hello(inner) + } +} + +impl fmt::Display for Hello { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Hello") + .field("zid", &self.zid()) + .field("whatami", &self.whatami()) + .field("locators", &self.locators()) + .finish() + } +} + +#[derive(Debug, Default, Copy, Clone, Eq, Hash, PartialEq)] +#[repr(transparent)] +pub struct EntityGlobalId(EntityGlobalIdProto); + +impl EntityGlobalId { + pub fn zid(&self) -> ZenohId { + self.0.zid.into() + } + + pub fn eid(&self) -> EntityId { + self.0.eid + } +} + +impl From for EntityGlobalId { + fn from(id: EntityGlobalIdProto) -> Self { + Self(id) + } +} diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 9c8eee58a1..71a3659d8d 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -59,12 +59,11 @@ pub use parameters::*; pub mod properties; pub use properties::*; -/// The global unique id of a zenoh peer. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] -pub struct ZenohId(uhlc::ID); +pub struct ZenohIdProto(uhlc::ID); -impl ZenohId { +impl ZenohIdProto { pub const MAX_SIZE: usize = 16; #[inline] @@ -77,8 +76,8 @@ impl ZenohId { self.0.to_le_bytes() } - pub fn rand() -> ZenohId { - ZenohId(uhlc::ID::rand()) + pub fn rand() -> ZenohIdProto { + ZenohIdProto(uhlc::ID::rand()) } pub fn into_keyexpr(self) -> OwnedKeyExpr { @@ -86,7 +85,7 @@ impl ZenohId { } } -impl Default for ZenohId { +impl Default for ZenohIdProto { fn default() -> Self { Self::rand() } @@ -121,7 +120,7 @@ impl fmt::Display for SizeError { macro_rules! derive_tryfrom { ($T: ty) => { - impl TryFrom<$T> for ZenohId { + impl TryFrom<$T> for ZenohIdProto { type Error = zenoh_result::Error; fn try_from(val: $T) -> Result { match val.try_into() { @@ -166,7 +165,7 @@ derive_tryfrom!([u8; 16]); derive_tryfrom!(&[u8; 16]); derive_tryfrom!(&[u8]); -impl FromStr for ZenohId { +impl FromStr for ZenohIdProto { type Err = zenoh_result::Error; fn from_str(s: &str) -> Result { @@ -179,37 +178,37 @@ impl FromStr for ZenohId { let u: uhlc::ID = s .parse() .map_err(|e: uhlc::ParseIDError| zerror!("Invalid id: {} - {}", s, e.cause))?; - Ok(ZenohId(u)) + Ok(ZenohIdProto(u)) } } -impl fmt::Debug for ZenohId { +impl fmt::Debug for ZenohIdProto { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } -impl fmt::Display for ZenohId { +impl fmt::Display for ZenohIdProto { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(self, f) } } // A PeerID can be converted into a Timestamp's ID -impl From<&ZenohId> for uhlc::ID { - fn from(zid: &ZenohId) -> Self { +impl From<&ZenohIdProto> for uhlc::ID { + fn from(zid: &ZenohIdProto) -> Self { zid.0 } } -impl From for uhlc::ID { - fn from(zid: ZenohId) -> Self { +impl From for uhlc::ID { + fn from(zid: ZenohIdProto) -> Self { zid.0 } } -impl From for OwnedKeyExpr { - fn from(zid: ZenohId) -> Self { +impl From for OwnedKeyExpr { + fn from(zid: ZenohIdProto) -> Self { // SAFETY: zid.to_string() returns an stringified hexadecimal // representation of the zid. Therefore, building a OwnedKeyExpr // by calling from_string_unchecked() is safe because it is @@ -218,13 +217,13 @@ impl From for OwnedKeyExpr { } } -impl From<&ZenohId> for OwnedKeyExpr { - fn from(zid: &ZenohId) -> Self { +impl From<&ZenohIdProto> for OwnedKeyExpr { + fn from(zid: &ZenohIdProto) -> Self { (*zid).into() } } -impl serde::Serialize for ZenohId { +impl serde::Serialize for ZenohIdProto { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, @@ -233,7 +232,7 @@ impl serde::Serialize for ZenohId { } } -impl<'de> serde::Deserialize<'de> for ZenohId { +impl<'de> serde::Deserialize<'de> for ZenohIdProto { fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de>, @@ -241,10 +240,13 @@ impl<'de> serde::Deserialize<'de> for ZenohId { struct ZenohIdVisitor; impl<'de> serde::de::Visitor<'de> for ZenohIdVisitor { - type Value = ZenohId; + type Value = ZenohIdProto; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str(&format!("An hex string of 1-{} bytes", ZenohId::MAX_SIZE)) + formatter.write_str(&format!( + "An hex string of 1-{} bytes", + ZenohIdProto::MAX_SIZE + )) } fn visit_str(self, v: &str) -> Result @@ -278,17 +280,17 @@ pub type EntityId = u32; /// The global unique id of a zenoh entity. #[derive(Debug, Default, Copy, Clone, Eq, Hash, PartialEq)] -pub struct EntityGlobalId { - pub zid: ZenohId, +pub struct EntityGlobalIdProto { + pub zid: ZenohIdProto, pub eid: EntityId, } -impl EntityGlobalId { +impl EntityGlobalIdProto { #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; Self { - zid: ZenohId::rand(), + zid: ZenohIdProto::rand(), eid: rand::thread_rng().gen(), } } diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index 371f3eda78..b9f3076581 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -221,7 +221,7 @@ pub mod ext { use crate::{ common::{imsg, ZExtZ64}, - core::{CongestionControl, EntityId, Priority, ZenohId}, + core::{CongestionControl, EntityId, Priority, ZenohIdProto}, }; /// ```text @@ -366,7 +366,7 @@ pub mod ext { let mut rng = rand::thread_rng(); let time = uhlc::NTP64(rng.gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); + let id = uhlc::ID::try_from(ZenohIdProto::rand().to_le_bytes()).unwrap(); let timestamp = uhlc::Timestamp::new(time, id); Self { timestamp } } @@ -428,7 +428,7 @@ pub mod ext { /// +---------------+ #[derive(Debug, Clone, PartialEq, Eq)] pub struct EntityGlobalIdType { - pub zid: ZenohId, + pub zid: ZenohIdProto, pub eid: EntityId, } @@ -438,7 +438,7 @@ pub mod ext { use rand::Rng; let mut rng = rand::thread_rng(); - let zid = ZenohId::rand(); + let zid = ZenohIdProto::rand(); let eid: EntityId = rng.gen(); Self { zid, eid } } diff --git a/commons/zenoh-protocol/src/scouting/hello.rs b/commons/zenoh-protocol/src/scouting/hello.rs index 6639792976..61c7db4ce6 100644 --- a/commons/zenoh-protocol/src/scouting/hello.rs +++ b/commons/zenoh-protocol/src/scouting/hello.rs @@ -13,7 +13,7 @@ // use alloc::vec::Vec; -use crate::core::{Locator, WhatAmI, ZenohId}; +use crate::core::{Locator, WhatAmI, ZenohIdProto}; /// # Hello message /// @@ -99,14 +99,14 @@ pub mod flag { } #[derive(Debug, Clone, PartialEq, Eq)] -pub struct Hello { +pub struct HelloProto { pub version: u8, pub whatami: WhatAmI, - pub zid: ZenohId, + pub zid: ZenohIdProto, pub locators: Vec, } -impl Hello { +impl HelloProto { #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -114,7 +114,7 @@ impl Hello { let mut rng = rand::thread_rng(); let version: u8 = rng.gen(); - let zid = ZenohId::default(); + let zid = ZenohIdProto::default(); let whatami = WhatAmI::rand(); let locators = if rng.gen_bool(0.5) { Vec::from_iter((1..5).map(|_| Locator::rand())) diff --git a/commons/zenoh-protocol/src/scouting/mod.rs b/commons/zenoh-protocol/src/scouting/mod.rs index 9e7fd27c2d..2cb5d1c0f0 100644 --- a/commons/zenoh-protocol/src/scouting/mod.rs +++ b/commons/zenoh-protocol/src/scouting/mod.rs @@ -14,7 +14,7 @@ pub mod hello; pub mod scout; -pub use hello::Hello; +pub use hello::HelloProto; pub use scout::Scout; pub mod id { @@ -27,7 +27,7 @@ pub mod id { #[derive(Debug, Clone, PartialEq, Eq)] pub enum ScoutingBody { Scout(Scout), - Hello(Hello), + Hello(HelloProto), } #[derive(Debug, Clone, PartialEq, Eq)] @@ -46,7 +46,7 @@ impl ScoutingMessage { match rng.gen_range(0..2) { 0 => ScoutingBody::Scout(Scout::rand()), - 1 => ScoutingBody::Hello(Hello::rand()), + 1 => ScoutingBody::Hello(HelloProto::rand()), _ => unreachable!(), } .into() @@ -69,8 +69,8 @@ impl From for ScoutingMessage { } } -impl From for ScoutingMessage { - fn from(hello: Hello) -> Self { +impl From for ScoutingMessage { + fn from(hello: HelloProto) -> Self { ScoutingBody::Hello(hello).into() } } diff --git a/commons/zenoh-protocol/src/scouting/scout.rs b/commons/zenoh-protocol/src/scouting/scout.rs index b7a51642df..6d2b49f335 100644 --- a/commons/zenoh-protocol/src/scouting/scout.rs +++ b/commons/zenoh-protocol/src/scouting/scout.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::core::{whatami::WhatAmIMatcher, ZenohId}; +use crate::core::{whatami::WhatAmIMatcher, ZenohIdProto}; /// # Scout message /// @@ -75,7 +75,7 @@ pub mod flag { pub struct Scout { pub version: u8, pub what: WhatAmIMatcher, - pub zid: Option, + pub zid: Option, } impl Scout { @@ -87,7 +87,7 @@ impl Scout { let version: u8 = rng.gen(); let what = WhatAmIMatcher::rand(); - let zid = rng.gen_bool(0.5).then_some(ZenohId::rand()); + let zid = rng.gen_bool(0.5).then_some(ZenohIdProto::rand()); Self { version, what, zid } } } diff --git a/commons/zenoh-protocol/src/transport/init.rs b/commons/zenoh-protocol/src/transport/init.rs index b1febac4b5..7e56bfd770 100644 --- a/commons/zenoh-protocol/src/transport/init.rs +++ b/commons/zenoh-protocol/src/transport/init.rs @@ -14,7 +14,7 @@ use zenoh_buffers::ZSlice; use crate::{ - core::{Resolution, WhatAmI, ZenohId}, + core::{Resolution, WhatAmI, ZenohIdProto}, transport::BatchSize, }; @@ -111,7 +111,7 @@ pub mod flag { pub struct InitSyn { pub version: u8, pub whatami: WhatAmI, - pub zid: ZenohId, + pub zid: ZenohIdProto, pub resolution: Resolution, pub batch_size: BatchSize, pub ext_qos: Option, @@ -167,7 +167,7 @@ impl InitSyn { let version: u8 = rng.gen(); let whatami = WhatAmI::rand(); - let zid = ZenohId::default(); + let zid = ZenohIdProto::default(); let resolution = Resolution::rand(); let batch_size: BatchSize = rng.gen(); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); @@ -199,7 +199,7 @@ impl InitSyn { pub struct InitAck { pub version: u8, pub whatami: WhatAmI, - pub zid: ZenohId, + pub zid: ZenohIdProto, pub resolution: Resolution, pub batch_size: BatchSize, pub cookie: ZSlice, @@ -223,7 +223,7 @@ impl InitAck { let version: u8 = rng.gen(); let whatami = WhatAmI::rand(); - let zid = ZenohId::default(); + let zid = ZenohIdProto::default(); let resolution = if rng.gen_bool(0.5) { Resolution::default() } else { diff --git a/commons/zenoh-protocol/src/transport/join.rs b/commons/zenoh-protocol/src/transport/join.rs index 9918de6acf..e1e3f97c33 100644 --- a/commons/zenoh-protocol/src/transport/join.rs +++ b/commons/zenoh-protocol/src/transport/join.rs @@ -14,7 +14,7 @@ use core::time::Duration; use crate::{ - core::{Priority, Resolution, WhatAmI, ZenohId}, + core::{Priority, Resolution, WhatAmI, ZenohIdProto}, transport::{BatchSize, PrioritySn}, }; @@ -105,7 +105,7 @@ pub mod flag { pub struct Join { pub version: u8, pub whatami: WhatAmI, - pub zid: ZenohId, + pub zid: ZenohIdProto, pub resolution: Resolution, pub batch_size: BatchSize, pub lease: Duration, @@ -142,7 +142,7 @@ impl Join { let version: u8 = rng.gen(); let whatami = WhatAmI::rand(); - let zid = ZenohId::default(); + let zid = ZenohIdProto::default(); let resolution = Resolution::rand(); let batch_size: BatchSize = rng.gen(); let lease = if rng.gen_bool(0.5) { diff --git a/commons/zenoh-protocol/src/zenoh/del.rs b/commons/zenoh-protocol/src/zenoh/del.rs index 4723cd5415..d4c6b8b3ac 100644 --- a/commons/zenoh-protocol/src/zenoh/del.rs +++ b/commons/zenoh-protocol/src/zenoh/del.rs @@ -66,12 +66,12 @@ impl Del { pub fn rand() -> Self { use rand::Rng; - use crate::{common::iext, core::ZenohId}; + use crate::{common::iext, core::ZenohIdProto}; let mut rng = rand::thread_rng(); let timestamp = rng.gen_bool(0.5).then_some({ let time = uhlc::NTP64(rng.gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); + let id = uhlc::ID::try_from(ZenohIdProto::rand().to_le_bytes()).unwrap(); Timestamp::new(time, id) }); let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index 9dcb2e52ac..eeb1a63c1d 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -136,7 +136,7 @@ impl From for ResponseBody { pub mod ext { use zenoh_buffers::ZBuf; - use crate::core::{Encoding, EntityGlobalId}; + use crate::core::{Encoding, EntityGlobalIdProto}; /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ @@ -150,7 +150,7 @@ pub mod ext { /// +---------------+ #[derive(Debug, Clone, PartialEq, Eq)] pub struct SourceInfoType { - pub id: EntityGlobalId, + pub id: EntityGlobalIdProto, pub sn: u32, } @@ -160,7 +160,7 @@ pub mod ext { use rand::Rng; let mut rng = rand::thread_rng(); - let id = EntityGlobalId::rand(); + let id = EntityGlobalIdProto::rand(); let sn: u32 = rng.gen(); Self { id, sn } } diff --git a/commons/zenoh-protocol/src/zenoh/put.rs b/commons/zenoh-protocol/src/zenoh/put.rs index ef0a71db09..91a0a8f50b 100644 --- a/commons/zenoh-protocol/src/zenoh/put.rs +++ b/commons/zenoh-protocol/src/zenoh/put.rs @@ -84,12 +84,12 @@ impl Put { pub fn rand() -> Self { use rand::Rng; - use crate::{common::iext, core::ZenohId}; + use crate::{common::iext, core::ZenohIdProto}; let mut rng = rand::thread_rng(); let timestamp = rng.gen_bool(0.5).then_some({ let time = uhlc::NTP64(rng.gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); + let id = uhlc::ID::try_from(ZenohIdProto::rand().to_le_bytes()).unwrap(); Timestamp::new(time, id) }); let encoding = Encoding::rand(); diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index 9d3f1bb223..d2e4bfdbc0 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::{config::ZenohId, prelude::*}; +use zenoh::{info::ZenohId, prelude::*}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/io/zenoh-transport/src/lib.rs b/io/zenoh-transport/src/lib.rs index f4c135c9d6..bfdd79685d 100644 --- a/io/zenoh-transport/src/lib.rs +++ b/io/zenoh-transport/src/lib.rs @@ -34,7 +34,7 @@ pub use manager::*; use serde::Serialize; use zenoh_link::Link; use zenoh_protocol::{ - core::{WhatAmI, ZenohId}, + core::{WhatAmI, ZenohIdProto}, network::NetworkMessage, }; use zenoh_result::ZResult; @@ -108,7 +108,7 @@ impl TransportMulticastEventHandler for DummyTransportMulticastEventHandler { #[derive(Clone, Debug, Serialize, PartialEq, Eq)] #[serde(rename = "Transport")] pub struct TransportPeer { - pub zid: ZenohId, + pub zid: ZenohIdProto, pub whatami: WhatAmI, pub is_qos: bool, #[serde(skip)] diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index f578e4d4fa..deba9cf6f6 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -19,7 +19,7 @@ use zenoh_config::{Config, LinkRxConf, QueueConf, QueueSizeConf}; use zenoh_crypto::{BlockCipher, PseudoRng}; use zenoh_link::NewLinkChannelSender; use zenoh_protocol::{ - core::{EndPoint, Field, Locator, Priority, Resolution, WhatAmI, ZenohId}, + core::{EndPoint, Field, Locator, Priority, Resolution, WhatAmI, ZenohIdProto}, transport::BatchSize, VERSION, }; @@ -45,7 +45,7 @@ use crate::multicast::manager::{ /// ``` /// use std::sync::Arc; /// use std::time::Duration; -/// use zenoh_protocol::core::{ZenohId, Resolution, Field, Bits, WhatAmI, whatami}; +/// use zenoh_protocol::core::{ZenohIdProto, Resolution, Field, Bits, WhatAmI, whatami}; /// use zenoh_transport::*; /// use zenoh_result::ZResult; /// @@ -85,7 +85,7 @@ use crate::multicast::manager::{ /// let mut resolution = Resolution::default(); /// resolution.set(Field::FrameSN, Bits::U8); /// let manager = TransportManager::builder() -/// .zid(ZenohId::rand()) +/// .zid(ZenohIdProto::rand().into()) /// .whatami(WhatAmI::Peer) /// .batch_size(1_024) // Use a batch size of 1024 bytes /// .resolution(resolution) // Use a sequence number resolution of 128 @@ -96,7 +96,7 @@ use crate::multicast::manager::{ pub struct TransportManagerConfig { pub version: u8, - pub zid: ZenohId, + pub zid: ZenohIdProto, pub whatami: WhatAmI, pub resolution: Resolution, pub batch_size: BatchSize, @@ -125,7 +125,7 @@ pub struct TransportManagerParams { pub struct TransportManagerBuilder { version: u8, - zid: ZenohId, + zid: ZenohIdProto, whatami: WhatAmI, resolution: Resolution, batch_size: BatchSize, @@ -150,7 +150,7 @@ impl TransportManagerBuilder { self } - pub fn zid(mut self, zid: ZenohId) -> Self { + pub fn zid(mut self, zid: ZenohIdProto) -> Self { self.zid = zid; self } @@ -335,7 +335,7 @@ impl Default for TransportManagerBuilder { let wait_before_drop = *queue.congestion_control().wait_before_drop(); Self { version: VERSION, - zid: ZenohId::rand(), + zid: ZenohIdProto::rand(), whatami: zenoh_config::defaults::mode, resolution: Resolution::default(), batch_size: BatchSize::MAX, @@ -424,7 +424,7 @@ impl TransportManager { TransportManagerBuilder::default() } - pub fn zid(&self) -> ZenohId { + pub fn zid(&self) -> ZenohIdProto { self.config.zid } diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index a1c9c2bae8..382109be0b 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -23,7 +23,7 @@ use zenoh_buffers::{BBuf, ZSlice, ZSliceBuffer}; use zenoh_core::{zcondfeat, zlock}; use zenoh_link::{Link, LinkMulticast, Locator}; use zenoh_protocol::{ - core::{Bits, Priority, Resolution, WhatAmI, ZenohId}, + core::{Bits, Priority, Resolution, WhatAmI, ZenohIdProto}, transport::{BatchSize, Close, Join, PrioritySn, TransportMessage, TransportSn}, }; use zenoh_result::{zerror, ZResult}; @@ -251,7 +251,7 @@ impl fmt::Debug for TransportLinkMulticastRx { /**************************************/ pub(super) struct TransportLinkMulticastConfigUniversal { pub(super) version: u8, - pub(super) zid: ZenohId, + pub(super) zid: ZenohIdProto, pub(super) whatami: WhatAmI, pub(super) lease: Duration, pub(super) join_interval: Duration, diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index d95a5ff8c5..fed0194b77 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -22,7 +22,7 @@ use zenoh_config::{Config, LinkTxConf}; use zenoh_core::zasynclock; use zenoh_link::*; use zenoh_protocol::{ - core::{Parameters, ZenohId}, + core::{Parameters, ZenohIdProto}, transport::close, }; use zenoh_result::{bail, zerror, ZResult}; @@ -266,7 +266,7 @@ impl TransportManager { super::establishment::open_link(self, link).await } - pub async fn get_transport_multicast(&self, zid: &ZenohId) -> Option { + pub async fn get_transport_multicast(&self, zid: &ZenohIdProto) -> Option { for t in zasynclock!(self.state.multicast.transports).values() { if t.get_peers().iter().any(|p| p.zid == *zid) { return Some(t.into()); diff --git a/io/zenoh-transport/src/multicast/transport.rs b/io/zenoh-transport/src/multicast/transport.rs index a60ed180ee..f0dfec4813 100644 --- a/io/zenoh-transport/src/multicast/transport.rs +++ b/io/zenoh-transport/src/multicast/transport.rs @@ -24,7 +24,7 @@ use tokio_util::sync::CancellationToken; use zenoh_core::{zcondfeat, zread, zwrite}; use zenoh_link::{Link, Locator}; use zenoh_protocol::{ - core::{Bits, Field, Priority, Resolution, WhatAmI, ZenohId}, + core::{Bits, Field, Priority, Resolution, WhatAmI, ZenohIdProto}, transport::{batch_size, close, Close, Join, TransportMessage}, }; use zenoh_result::{bail, ZResult}; @@ -53,7 +53,7 @@ use crate::{ pub(super) struct TransportMulticastPeer { pub(super) version: u8, pub(super) locator: Locator, - pub(super) zid: ZenohId, + pub(super) zid: ZenohIdProto, pub(super) whatami: WhatAmI, pub(super) resolution: Resolution, pub(super) lease: Duration, diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index d074ea9642..ed4890d7d2 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -22,7 +22,7 @@ use zenoh_core::{zasynclock, zcondfeat, zerror}; use zenoh_crypto::{BlockCipher, PseudoRng}; use zenoh_link::LinkUnicast; use zenoh_protocol::{ - core::{Field, Resolution, WhatAmI, ZenohId}, + core::{Field, Resolution, WhatAmI, ZenohIdProto}, transport::{ batch_size, close::{self, Close}, @@ -80,7 +80,7 @@ struct RecvInitSynIn { mine_version: u8, } struct RecvInitSynOut { - other_zid: ZenohId, + other_zid: ZenohIdProto, other_whatami: WhatAmI, #[cfg(feature = "shared-memory")] ext_shm: Option, @@ -89,9 +89,9 @@ struct RecvInitSynOut { // InitAck struct SendInitAckIn { mine_version: u8, - mine_zid: ZenohId, + mine_zid: ZenohIdProto, mine_whatami: WhatAmI, - other_zid: ZenohId, + other_zid: ZenohIdProto, other_whatami: WhatAmI, #[cfg(feature = "shared-memory")] ext_shm: Option, @@ -107,7 +107,7 @@ struct RecvOpenSynIn { cookie_nonce: u64, } struct RecvOpenSynOut { - other_zid: ZenohId, + other_zid: ZenohIdProto, other_whatami: WhatAmI, other_lease: Duration, other_initial_sn: TransportSn, @@ -115,9 +115,9 @@ struct RecvOpenSynOut { // OpenAck struct SendOpenAckIn { - mine_zid: ZenohId, + mine_zid: ZenohIdProto, mine_lease: Duration, - other_zid: ZenohId, + other_zid: ZenohIdProto, } struct SendOpenAckOut { open_ack: OpenAck, diff --git a/io/zenoh-transport/src/unicast/establishment/cookie.rs b/io/zenoh-transport/src/unicast/establishment/cookie.rs index fccce5e672..4220f8e08b 100644 --- a/io/zenoh-transport/src/unicast/establishment/cookie.rs +++ b/io/zenoh-transport/src/unicast/establishment/cookie.rs @@ -20,7 +20,7 @@ use zenoh_buffers::{ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_crypto::{BlockCipher, PseudoRng}; use zenoh_protocol::{ - core::{Resolution, WhatAmI, ZenohId}, + core::{Resolution, WhatAmI, ZenohIdProto}, transport::BatchSize, }; @@ -28,7 +28,7 @@ use crate::unicast::establishment::ext; #[derive(Debug, PartialEq)] pub(crate) struct Cookie { - pub(crate) zid: ZenohId, + pub(crate) zid: ZenohIdProto, pub(crate) whatami: WhatAmI, pub(crate) resolution: Resolution, pub(crate) batch_size: BatchSize, @@ -82,7 +82,7 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let zid: ZenohId = self.read(&mut *reader)?; + let zid: ZenohIdProto = self.read(&mut *reader)?; let wai: u8 = self.read(&mut *reader)?; let whatami = WhatAmI::try_from(wai).map_err(|_| DidntRead)?; let resolution: u8 = self.read(&mut *reader)?; @@ -173,7 +173,7 @@ impl Cookie { let mut rng = rand::thread_rng(); Self { - zid: ZenohId::default(), + zid: ZenohIdProto::default(), whatami: WhatAmI::rand(), resolution: Resolution::rand(), batch_size: rng.gen(), diff --git a/io/zenoh-transport/src/unicast/establishment/mod.rs b/io/zenoh-transport/src/unicast/establishment/mod.rs index 79627f4c49..ca46b40ed1 100644 --- a/io/zenoh-transport/src/unicast/establishment/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/mod.rs @@ -23,7 +23,7 @@ use sha3::{ Shake128, }; use zenoh_protocol::{ - core::{Field, Resolution, ZenohId}, + core::{Field, Resolution, ZenohIdProto}, transport::TransportSn, }; @@ -101,7 +101,11 @@ pub trait AcceptFsm { /*************************************/ /* FUNCTIONS */ /*************************************/ -pub(super) fn compute_sn(zid1: ZenohId, zid2: ZenohId, resolution: Resolution) -> TransportSn { +pub(super) fn compute_sn( + zid1: ZenohIdProto, + zid2: ZenohIdProto, + resolution: Resolution, +) -> TransportSn { // Create a random yet deterministic initial_sn. // In case of multilink it's important that the same initial_sn is used for every connection attempt. // Instead of storing the state everywhere, we make sure that the we always compute the same initial_sn. diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index 49c57d9e9a..2857d1cc75 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -20,7 +20,7 @@ use zenoh_core::zasynclock; use zenoh_core::{zcondfeat, zerror}; use zenoh_link::LinkUnicast; use zenoh_protocol::{ - core::{Field, Resolution, WhatAmI, ZenohId}, + core::{Field, Resolution, WhatAmI, ZenohIdProto}, transport::{ batch_size, close, BatchSize, Close, InitSyn, OpenSyn, TransportBody, TransportMessage, TransportSn, @@ -75,13 +75,13 @@ struct State { // InitSyn struct SendInitSynIn { mine_version: u8, - mine_zid: ZenohId, + mine_zid: ZenohIdProto, mine_whatami: WhatAmI, } // InitAck struct RecvInitAckOut { - other_zid: ZenohId, + other_zid: ZenohIdProto, other_whatami: WhatAmI, other_cookie: ZSlice, #[cfg(feature = "shared-memory")] @@ -90,9 +90,9 @@ struct RecvInitAckOut { // OpenSyn struct SendOpenSynIn { - mine_zid: ZenohId, + mine_zid: ZenohIdProto, mine_lease: Duration, - other_zid: ZenohId, + other_zid: ZenohIdProto, other_cookie: ZSlice, #[cfg(feature = "shared-memory")] ext_shm: Option, diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index 9c46b55174..749c5507aa 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -22,7 +22,7 @@ use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh_core::{zasynclock, zasyncread, zasyncwrite, zread, zwrite}; use zenoh_link::Link; use zenoh_protocol::{ - core::{WhatAmI, ZenohId}, + core::{WhatAmI, ZenohIdProto}, network::NetworkMessage, transport::{close, Close, TransportBodyLowLatency, TransportMessageLowLatency, TransportSn}, }; @@ -183,7 +183,7 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { vec![] } - fn get_zid(&self) -> ZenohId { + fn get_zid(&self) -> ZenohIdProto { self.config.zid } diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index a9082ce705..52de702603 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -30,7 +30,7 @@ use zenoh_core::{zasynclock, zcondfeat}; use zenoh_crypto::PseudoRng; use zenoh_link::*; use zenoh_protocol::{ - core::{Parameters, ZenohId}, + core::{Parameters, ZenohIdProto}, transport::{close, TransportSn}, }; use zenoh_result::{bail, zerror, ZResult}; @@ -79,7 +79,7 @@ pub struct TransportManagerStateUnicast { // Established listeners pub(super) protocols: Arc>>, // Established transports - pub(super) transports: Arc>>>, + pub(super) transports: Arc>>>, // Multilink #[cfg(feature = "transport_multilink")] pub(super) multilink: Arc, @@ -510,7 +510,7 @@ impl TransportManager { link: LinkUnicastWithOpenAck, other_initial_sn: TransportSn, other_lease: Duration, - mut guard: AsyncMutexGuard<'_, HashMap>>, + mut guard: AsyncMutexGuard<'_, HashMap>>, ) -> InitTransportResult { macro_rules! link_error { ($s:expr, $reason:expr) => { @@ -707,7 +707,7 @@ impl TransportManager { super::establishment::open::open_link(link, self).await } - pub async fn get_transport_unicast(&self, peer: &ZenohId) -> Option { + pub async fn get_transport_unicast(&self, peer: &ZenohIdProto) -> Option { zasynclock!(self.state.unicast.transports) .get(peer) .map(|t| TransportUnicast(Arc::downgrade(t))) @@ -720,7 +720,7 @@ impl TransportManager { .collect() } - pub(super) async fn del_transport_unicast(&self, peer: &ZenohId) -> ZResult<()> { + pub(super) async fn del_transport_unicast(&self, peer: &ZenohIdProto) -> ZResult<()> { zasynclock!(self.state.unicast.transports) .remove(peer) .ok_or_else(|| { diff --git a/io/zenoh-transport/src/unicast/mod.rs b/io/zenoh-transport/src/unicast/mod.rs index 1726ba2559..04162de10a 100644 --- a/io/zenoh-transport/src/unicast/mod.rs +++ b/io/zenoh-transport/src/unicast/mod.rs @@ -32,7 +32,7 @@ pub use manager::*; use zenoh_core::zcondfeat; use zenoh_link::Link; use zenoh_protocol::{ - core::{Bits, WhatAmI, ZenohId}, + core::{Bits, WhatAmI, ZenohIdProto}, network::NetworkMessage, transport::{close, TransportSn}, }; @@ -48,7 +48,7 @@ use crate::shm::TransportShmConfig; /*************************************/ #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct TransportConfigUnicast { - pub(crate) zid: ZenohId, + pub(crate) zid: ZenohIdProto, pub(crate) whatami: WhatAmI, pub(crate) sn_resolution: Bits, pub(crate) tx_initial_sn: TransportSn, @@ -74,7 +74,7 @@ impl TransportUnicast { } #[inline(always)] - pub fn get_zid(&self) -> ZResult { + pub fn get_zid(&self) -> ZResult { let transport = self.get_inner()?; Ok(transport.get_zid()) } diff --git a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs index c687a6aa16..c7821aac9c 100644 --- a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs +++ b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs @@ -18,7 +18,7 @@ use async_trait::async_trait; use tokio::sync::MutexGuard as AsyncMutexGuard; use zenoh_link::Link; use zenoh_protocol::{ - core::{WhatAmI, ZenohId}, + core::{WhatAmI, ZenohIdProto}, network::NetworkMessage, transport::TransportSn, }; @@ -52,7 +52,7 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { fn set_callback(&self, callback: Arc); async fn get_alive(&self) -> AsyncMutexGuard<'_, bool>; - fn get_zid(&self) -> ZenohId; + fn get_zid(&self) -> ZenohIdProto; fn get_whatami(&self) -> WhatAmI; fn get_callback(&self) -> Option>; fn get_links(&self) -> Vec; diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index 538756f6ee..6a122d258e 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -22,7 +22,7 @@ use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; use zenoh_core::{zasynclock, zcondfeat, zread, zwrite}; use zenoh_link::Link; use zenoh_protocol::{ - core::{Priority, WhatAmI, ZenohId}, + core::{Priority, WhatAmI, ZenohIdProto}, network::NetworkMessage, transport::{close, Close, PrioritySn, TransportMessage, TransportSn}, }; @@ -320,7 +320,7 @@ impl TransportUnicastTrait for TransportUnicastUniversal { zasynclock!(self.alive) } - fn get_zid(&self) -> ZenohId { + fn get_zid(&self) -> ZenohIdProto { self.config.zid } diff --git a/io/zenoh-transport/tests/endpoints.rs b/io/zenoh-transport/tests/endpoints.rs index e765165a81..f4ddbd6ec4 100644 --- a/io/zenoh-transport/tests/endpoints.rs +++ b/io/zenoh-transport/tests/endpoints.rs @@ -16,7 +16,7 @@ use std::{any::Any, convert::TryFrom, sync::Arc, time::Duration}; use zenoh_core::ztimeout; use zenoh_link::{EndPoint, Link}; use zenoh_protocol::{ - core::{WhatAmI, ZenohId}, + core::{WhatAmI, ZenohIdProto}, network::NetworkMessage, }; use zenoh_result::ZResult; @@ -74,7 +74,7 @@ async fn run(endpoints: &[EndPoint]) { // Create the transport manager let sm = TransportManager::builder() .whatami(WhatAmI::Peer) - .zid(ZenohId::try_from([1]).unwrap()) + .zid(ZenohIdProto::try_from([1]).unwrap()) .build(Arc::new(SH)) .unwrap(); diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs index 124dfeaad8..129f79d55e 100644 --- a/io/zenoh-transport/tests/multicast_compression.rs +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -29,7 +29,8 @@ mod tests { use zenoh_link::Link; use zenoh_protocol::{ core::{ - Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, ZenohId, + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, + ZenohIdProto, }, network::{ push::{ @@ -144,8 +145,8 @@ mod tests { endpoint: &EndPoint, ) -> (TransportMulticastPeer, TransportMulticastPeer) { // Define peer01 and peer02 IDs - let peer01_id = ZenohId::try_from([1]).unwrap(); - let peer02_id = ZenohId::try_from([2]).unwrap(); + let peer01_id = ZenohIdProto::try_from([1]).unwrap(); + let peer02_id = ZenohIdProto::try_from([2]).unwrap(); // Create the peer01 transport manager let peer01_handler = Arc::new(SHPeer::default()); diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index e1d5bfc52c..0ffefb59b2 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -30,7 +30,8 @@ mod tests { use zenoh_link::Link; use zenoh_protocol::{ core::{ - Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, ZenohId, + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, + ZenohIdProto, }, network::{ push::{ @@ -143,8 +144,8 @@ mod tests { endpoint: &EndPoint, ) -> (TransportMulticastPeer, TransportMulticastPeer) { // Define peer01 and peer02 IDs - let peer01_id = ZenohId::try_from([1]).unwrap(); - let peer02_id = ZenohId::try_from([2]).unwrap(); + let peer01_id = ZenohIdProto::try_from([1]).unwrap(); + let peer02_id = ZenohIdProto::try_from([2]).unwrap(); // Create the peer01 transport manager let peer01_handler = Arc::new(SHPeer::default()); diff --git a/io/zenoh-transport/tests/transport_whitelist.rs b/io/zenoh-transport/tests/transport_whitelist.rs index 4ace68a87b..121db5b5d6 100644 --- a/io/zenoh-transport/tests/transport_whitelist.rs +++ b/io/zenoh-transport/tests/transport_whitelist.rs @@ -16,7 +16,7 @@ use std::{any::Any, convert::TryFrom, iter::FromIterator, sync::Arc, time::Durat use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ - core::{EndPoint, ZenohId}, + core::{EndPoint, ZenohIdProto}, network::NetworkMessage, }; use zenoh_result::ZResult; @@ -68,7 +68,7 @@ impl TransportPeerEventHandler for SCRouter { async fn run(endpoints: &[EndPoint]) { // Define client and router IDs - let router_id = ZenohId::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([1]).unwrap(); // Create the router transport manager println!(">>> Transport Whitelist [1a1]"); diff --git a/io/zenoh-transport/tests/unicast_authenticator.rs b/io/zenoh-transport/tests/unicast_authenticator.rs index b25fb77a63..87f2174598 100644 --- a/io/zenoh-transport/tests/unicast_authenticator.rs +++ b/io/zenoh-transport/tests/unicast_authenticator.rs @@ -16,7 +16,7 @@ use std::{any::Any, sync::Arc, time::Duration}; use zenoh_core::{zasyncwrite, ztimeout}; use zenoh_link::Link; use zenoh_protocol::{ - core::{EndPoint, WhatAmI, ZenohId}, + core::{EndPoint, WhatAmI, ZenohIdProto}, network::NetworkMessage, }; use zenoh_result::ZResult; @@ -111,7 +111,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { }; // Create the transport transport manager for the client 01 - let client01_id = ZenohId::try_from([2]).unwrap(); + let client01_id = ZenohIdProto::try_from([2]).unwrap(); let n = BigUint::from_bytes_le(&[ 0x41, 0x74, 0xc6, 0x40, 0x18, 0x63, 0xbd, 0x59, 0xe6, 0x0d, 0xe9, 0x23, 0x3e, 0x95, 0xca, @@ -170,7 +170,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { .unwrap(); // Create the transport transport manager for the client 02 - let client02_id = ZenohId::try_from([3]).unwrap(); + let client02_id = ZenohIdProto::try_from([3]).unwrap(); let n = BigUint::from_bytes_le(&[ 0xd1, 0x36, 0xcf, 0x94, 0xda, 0x04, 0x7e, 0x9f, 0x53, 0x39, 0xb8, 0x7b, 0x53, 0x3a, 0xe6, @@ -229,7 +229,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { .unwrap(); // Create the transport transport manager for the client 03 with the same key as client 02 - let client03_id = ZenohId::try_from([4]).unwrap(); + let client03_id = ZenohIdProto::try_from([4]).unwrap(); let mut auth = Auth::empty(); auth.set_pubkey(Some(AuthPubKey::new( client02_pub_key.clone().into(), @@ -249,7 +249,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { .unwrap(); // Create the router transport manager - let router_id = ZenohId::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([1]).unwrap(); let router_handler = Arc::new(SHRouterAuthenticator::new()); let n = BigUint::from_bytes_le(&[ 0x31, 0xd1, 0xfc, 0x7e, 0x70, 0x5f, 0xd7, 0xe3, 0xcc, 0xa4, 0xca, 0xcb, 0x38, 0x84, 0x2f, @@ -414,11 +414,11 @@ async fn auth_usrpwd(endpoint: &EndPoint, lowlatency_transport: bool) { }; /* [CLIENT] */ - let client01_id = ZenohId::try_from([2]).unwrap(); + let client01_id = ZenohIdProto::try_from([2]).unwrap(); let user01 = "user01".to_string(); let password01 = "password01".to_string(); - let client02_id = ZenohId::try_from([3]).unwrap(); + let client02_id = ZenohIdProto::try_from([3]).unwrap(); let user02 = "invalid".to_string(); let password02 = "invalid".to_string(); @@ -427,7 +427,7 @@ async fn auth_usrpwd(endpoint: &EndPoint, lowlatency_transport: bool) { let password03 = "password03".to_string(); /* [ROUTER] */ - let router_id = ZenohId::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([1]).unwrap(); let router_handler = Arc::new(SHRouterAuthenticator::new()); // Create the router transport manager let mut auth_usrpwd_router = AuthUsrPwd::new(None); diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index e07b0baa83..7c2443c5d9 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -28,7 +28,8 @@ mod tests { use zenoh_link::Link; use zenoh_protocol::{ core::{ - Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, ZenohId, + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, + ZenohIdProto, }, network::{ push::ext::{NodeIdType, QoSType}, @@ -168,8 +169,8 @@ mod tests { TransportUnicast, ) { // Define client and router IDs - let client_id = ZenohId::try_from([1]).unwrap(); - let router_id = ZenohId::try_from([2]).unwrap(); + let client_id = ZenohIdProto::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([2]).unwrap(); // Create the router transport manager let router_handler = Arc::new(SHRouter::default()); diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index 9c9b58acde..183f8a7163 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -24,7 +24,7 @@ use tokio::sync::Barrier; use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ - core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, + core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohIdProto}, network::{ push::{ ext::{NodeIdType, QoSType}, @@ -108,8 +108,8 @@ impl TransportPeerEventHandler for MHPeer { async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec) { /* [Peers] */ - let peer_id01 = ZenohId::try_from([2]).unwrap(); - let peer_id02 = ZenohId::try_from([3]).unwrap(); + let peer_id01 = ZenohIdProto::try_from([2]).unwrap(); + let peer_id02 = ZenohIdProto::try_from([3]).unwrap(); // Create the peer01 transport manager let peer_sh01 = Arc::new(SHPeer::new()); diff --git a/io/zenoh-transport/tests/unicast_defragmentation.rs b/io/zenoh-transport/tests/unicast_defragmentation.rs index 28b085ab39..fc54180c96 100644 --- a/io/zenoh-transport/tests/unicast_defragmentation.rs +++ b/io/zenoh-transport/tests/unicast_defragmentation.rs @@ -16,7 +16,8 @@ use std::{convert::TryFrom, sync::Arc, time::Duration}; use zenoh_core::ztimeout; use zenoh_protocol::{ core::{ - Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, ZenohId, + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, + ZenohIdProto, }, network::{ push::{ @@ -37,8 +38,8 @@ const MSG_DEFRAG_BUF: usize = 128_000; async fn run(endpoint: &EndPoint, channel: Channel, msg_size: usize) { // Define client and router IDs - let client_id = ZenohId::try_from([1]).unwrap(); - let router_id = ZenohId::try_from([2]).unwrap(); + let client_id = ZenohIdProto::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([2]).unwrap(); // Create the router transport manager let router_manager = TransportManager::builder() diff --git a/io/zenoh-transport/tests/unicast_intermittent.rs b/io/zenoh-transport/tests/unicast_intermittent.rs index 9830820cf1..a2cb1e2d12 100644 --- a/io/zenoh-transport/tests/unicast_intermittent.rs +++ b/io/zenoh-transport/tests/unicast_intermittent.rs @@ -25,7 +25,7 @@ use std::{ use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ - core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, + core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohIdProto}, network::{ push::{ ext::{NodeIdType, QoSType}, @@ -148,7 +148,7 @@ impl TransportPeerEventHandler for SCClient { async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) { /* [ROUTER] */ - let router_id = ZenohId::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([1]).unwrap(); let router_handler = Arc::new(SHRouterIntermittent); // Create the router transport manager @@ -168,9 +168,9 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) .unwrap(); /* [CLIENT] */ - let client01_id = ZenohId::try_from([2]).unwrap(); - let client02_id = ZenohId::try_from([3]).unwrap(); - let client03_id = ZenohId::try_from([4]).unwrap(); + let client01_id = ZenohIdProto::try_from([2]).unwrap(); + let client02_id = ZenohIdProto::try_from([3]).unwrap(); + let client03_id = ZenohIdProto::try_from([4]).unwrap(); // Create the transport transport manager for the first client let counter = Arc::new(AtomicUsize::new(0)); diff --git a/io/zenoh-transport/tests/unicast_multilink.rs b/io/zenoh-transport/tests/unicast_multilink.rs index d9337b790d..6fc0864fe2 100644 --- a/io/zenoh-transport/tests/unicast_multilink.rs +++ b/io/zenoh-transport/tests/unicast_multilink.rs @@ -17,7 +17,7 @@ mod tests { use zenoh_core::ztimeout; use zenoh_link::EndPoint; - use zenoh_protocol::core::{WhatAmI, ZenohId}; + use zenoh_protocol::core::{WhatAmI, ZenohIdProto}; use zenoh_result::ZResult; use zenoh_transport::{ multicast::TransportMulticast, unicast::TransportUnicast, DummyTransportPeerEventHandler, @@ -77,7 +77,7 @@ mod tests { async fn multilink_transport(endpoint: &EndPoint) { /* [ROUTER] */ - let router_id = ZenohId::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([1]).unwrap(); let router_handler = Arc::new(SHRouterOpenClose); // Create the router transport manager @@ -92,8 +92,8 @@ mod tests { .unwrap(); /* [CLIENT] */ - let client01_id = ZenohId::try_from([2]).unwrap(); - let client02_id = ZenohId::try_from([3]).unwrap(); + let client01_id = ZenohIdProto::try_from([2]).unwrap(); + let client02_id = ZenohIdProto::try_from([3]).unwrap(); // Create the transport transport manager for the first client let unicast = TransportManager::config_unicast() diff --git a/io/zenoh-transport/tests/unicast_openclose.rs b/io/zenoh-transport/tests/unicast_openclose.rs index 03af046a3d..8909d74402 100644 --- a/io/zenoh-transport/tests/unicast_openclose.rs +++ b/io/zenoh-transport/tests/unicast_openclose.rs @@ -15,7 +15,7 @@ use std::{convert::TryFrom, sync::Arc, time::Duration}; use zenoh_core::ztimeout; use zenoh_link::EndPoint; -use zenoh_protocol::core::{WhatAmI, ZenohId}; +use zenoh_protocol::core::{WhatAmI, ZenohIdProto}; use zenoh_result::ZResult; use zenoh_transport::{ multicast::TransportMulticast, @@ -90,7 +90,7 @@ async fn openclose_transport( lowlatency_transport: bool, ) { /* [ROUTER] */ - let router_id = ZenohId::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([1]).unwrap(); let router_handler = Arc::new(SHRouterOpenClose); // Create the router transport manager @@ -110,8 +110,8 @@ async fn openclose_transport( .unwrap(); /* [CLIENT] */ - let client01_id = ZenohId::try_from([2]).unwrap(); - let client02_id = ZenohId::try_from([3]).unwrap(); + let client01_id = ZenohIdProto::try_from([2]).unwrap(); + let client02_id = ZenohIdProto::try_from([3]).unwrap(); // Create the transport transport manager for the first client let unicast = make_transport_manager_builder( diff --git a/io/zenoh-transport/tests/unicast_priorities.rs b/io/zenoh-transport/tests/unicast_priorities.rs index c7e468b5c5..708a9fad3b 100644 --- a/io/zenoh-transport/tests/unicast_priorities.rs +++ b/io/zenoh-transport/tests/unicast_priorities.rs @@ -25,7 +25,7 @@ use std::{ use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ - core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, + core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohIdProto}, network::{ push::{ ext::{NodeIdType, QoSType}, @@ -200,8 +200,8 @@ async fn open_transport_unicast( TransportUnicast, ) { // Define client and router IDs - let client_id = ZenohId::try_from([1]).unwrap(); - let router_id = ZenohId::try_from([2]).unwrap(); + let client_id = ZenohIdProto::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([2]).unwrap(); // Create the router transport manager let router_handler = Arc::new(SHRouter::new()); diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index 5ec01f9290..8c06a17f6d 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -27,7 +27,7 @@ mod tests { use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ - core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, + core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohIdProto}, network::{ push::ext::{NodeIdType, QoSType}, NetworkBody, NetworkMessage, Push, @@ -153,9 +153,9 @@ mod tests { println!("Transport SHM [0a]: {endpoint:?}"); // Define client and router IDs - let peer_shm01 = ZenohId::try_from([1]).unwrap(); - let peer_shm02 = ZenohId::try_from([2]).unwrap(); - let peer_net01 = ZenohId::try_from([3]).unwrap(); + let peer_shm01 = ZenohIdProto::try_from([1]).unwrap(); + let peer_shm02 = ZenohIdProto::try_from([2]).unwrap(); + let peer_net01 = ZenohIdProto::try_from([3]).unwrap(); // create SHM provider let backend = PosixShmProviderBackend::builder() diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index 8f9b23a6f1..4f529c3b74 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -26,7 +26,7 @@ mod tests { use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ - core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, + core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohIdProto}, network::{ push::ext::{NodeIdType, QoSType}, NetworkMessage, Push, @@ -47,12 +47,12 @@ mod tests { // Transport Handler for the router struct SHPeer { - zid: ZenohId, + zid: ZenohIdProto, count: Arc, } impl SHPeer { - fn new(zid: ZenohId) -> Self { + fn new(zid: ZenohIdProto) -> Self { Self { zid, count: Arc::new(AtomicUsize::new(0)), @@ -136,8 +136,8 @@ mod tests { async fn transport_simultaneous(endpoint01: Vec, endpoint02: Vec) { /* [Peers] */ - let peer_id01 = ZenohId::try_from([2]).unwrap(); - let peer_id02 = ZenohId::try_from([3]).unwrap(); + let peer_id01 = ZenohIdProto::try_from([2]).unwrap(); + let peer_id02 = ZenohIdProto::try_from([3]).unwrap(); // Create the peer01 transport manager let peer_sh01 = Arc::new(SHPeer::new(peer_id01)); diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs index efe8842c12..5c62235371 100644 --- a/io/zenoh-transport/tests/unicast_time.rs +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -19,7 +19,7 @@ use std::{ use zenoh_core::ztimeout; use zenoh_link::EndPoint; -use zenoh_protocol::core::{WhatAmI, ZenohId}; +use zenoh_protocol::core::{WhatAmI, ZenohIdProto}; use zenoh_result::ZResult; use zenoh_transport::{ multicast::TransportMulticast, @@ -96,7 +96,7 @@ async fn time_transport( println!(">>> Universal transport"); } /* [ROUTER] */ - let router_id = ZenohId::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([1]).unwrap(); let router_handler = Arc::new(SHRouterOpenClose); // Create the router transport manager @@ -116,7 +116,7 @@ async fn time_transport( .unwrap(); /* [CLIENT] */ - let client01_id = ZenohId::try_from([2]).unwrap(); + let client01_id = ZenohIdProto::try_from([2]).unwrap(); // Create the transport transport manager for the first client let unicast = make_transport_manager_builder( diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index 8fed09e8c2..b49b863991 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -26,7 +26,8 @@ use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ core::{ - Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, ZenohId, + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, + ZenohIdProto, }, network::{ push::ext::{NodeIdType, QoSType}, @@ -354,8 +355,8 @@ async fn open_transport_unicast( TransportUnicast, ) { // Define client and router IDs - let client_id = ZenohId::try_from([1]).unwrap(); - let router_id = ZenohId::try_from([2]).unwrap(); + let client_id = ZenohIdProto::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([2]).unwrap(); // Create the router transport manager let router_handler = Arc::new(SHRouter::default()); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs index bfabe92cd3..c5b2573335 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs @@ -24,7 +24,7 @@ use async_std::{ }; use flume::Receiver; use futures::join; -use zenoh::{config::ZenohId, key_expr::OwnedKeyExpr, time::Timestamp}; +use zenoh::{info::ZenohId, key_expr::OwnedKeyExpr, time::Timestamp}; use zenoh_backend_traits::config::ReplicaConfig; use super::{Digest, DigestConfig, LogEntry}; diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index 38e1fc6967..8f0680897e 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -15,7 +15,7 @@ //! Tools to access information about the current zenoh [`Session`](crate::Session). use std::future::{IntoFuture, Ready}; -use zenoh_config::ZenohId; +use zenoh_config::wrappers::ZenohId; use zenoh_core::{Resolvable, Wait}; use zenoh_protocol::core::WhatAmI; diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index 92ab04145c..48a927cab3 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -23,6 +23,8 @@ use std::{ }; use futures::Sink; +#[zenoh_macros::unstable] +use zenoh_config::wrappers::EntityGlobalId; use zenoh_core::{zread, Resolvable, Resolve, Wait}; use zenoh_protocol::{ core::CongestionControl, @@ -34,7 +36,7 @@ use zenoh_result::{Error, ZResult}; use { crate::api::handlers::{Callback, DefaultHandler, IntoHandler}, crate::api::sample::SourceInfo, - zenoh_protocol::core::EntityGlobalId, + zenoh_protocol::core::EntityGlobalIdProto, }; use super::{ @@ -157,10 +159,11 @@ impl<'a> Publisher<'a> { /// ``` #[zenoh_macros::unstable] pub fn id(&self) -> EntityGlobalId { - EntityGlobalId { + EntityGlobalIdProto { zid: self.session.zid().into(), eid: self.id, } + .into() } #[inline] diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 562069566b..0b761789b4 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -20,7 +20,7 @@ use std::{ use zenoh_core::{Resolvable, Wait}; use zenoh_keyexpr::OwnedKeyExpr; -use zenoh_protocol::core::{CongestionControl, ZenohId}; +use zenoh_protocol::core::{CongestionControl, ZenohIdProto}; use zenoh_result::ZResult; #[zenoh_macros::unstable] @@ -84,7 +84,7 @@ impl Default for QueryConsolidation { #[derive(Clone, Debug)] pub struct Reply { pub(crate) result: Result, - pub(crate) replier_id: ZenohId, + pub(crate) replier_id: ZenohIdProto, } impl Reply { @@ -104,7 +104,7 @@ impl Reply { } /// Gets the id of the zenoh instance that answered this Reply. - pub fn replier_id(&self) -> ZenohId { + pub fn replier_id(&self) -> ZenohIdProto { self.replier_id } } diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index e5643e0087..4c68b524b2 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -21,7 +21,7 @@ use std::{ use uhlc::Timestamp; use zenoh_core::{Resolvable, Resolve, Wait}; use zenoh_protocol::{ - core::{CongestionControl, EntityId, WireExpr, ZenohId}, + core::{CongestionControl, EntityId, WireExpr, ZenohIdProto}, network::{response, Mapping, RequestId, Response, ResponseFinal}, zenoh::{self, reply::ReplyBody, Del, Put, ResponseBody}, }; @@ -29,7 +29,7 @@ use zenoh_result::ZResult; #[zenoh_macros::unstable] use { super::{query::ReplyKeyExpr, sample::SourceInfo}, - zenoh_protocol::core::EntityGlobalId, + zenoh_protocol::core::EntityGlobalIdProto, }; use super::{ @@ -54,7 +54,7 @@ pub(crate) struct QueryInner { pub(crate) key_expr: KeyExpr<'static>, pub(crate) parameters: Parameters<'static>, pub(crate) qid: RequestId, - pub(crate) zid: ZenohId, + pub(crate) zid: ZenohIdProto, pub(crate) primitives: Arc, } @@ -870,8 +870,8 @@ impl<'a, Handler> Queryable<'a, Handler> { /// # } /// ``` #[zenoh_macros::unstable] - pub fn id(&self) -> EntityGlobalId { - EntityGlobalId { + pub fn id(&self) -> EntityGlobalIdProto { + EntityGlobalIdProto { zid: self.queryable.session.zid().into(), eid: self.queryable.state.id, } diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 982e29631b..a65e42048c 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -18,7 +18,7 @@ use std::{convert::TryFrom, fmt}; #[cfg(feature = "unstable")] use serde::Serialize; use zenoh_protocol::{ - core::{CongestionControl, EntityGlobalId, Timestamp}, + core::{CongestionControl, EntityGlobalIdProto, Timestamp}, network::declare::ext::QoSType, }; @@ -52,7 +52,7 @@ pub(crate) struct DataInfo { pub kind: SampleKind, pub encoding: Option, pub timestamp: Option, - pub source_id: Option, + pub source_id: Option, pub source_sn: Option, pub qos: QoS, } @@ -137,7 +137,7 @@ impl DataInfoIntoSample for Option { #[derive(Debug, Clone)] pub struct SourceInfo { /// The [`EntityGlobalId`] of the zenoh entity that published the concerned [`Sample`]. - pub source_id: Option, + pub source_id: Option, /// The sequence number of the [`Sample`] from the source. pub source_sn: Option, } @@ -145,12 +145,12 @@ pub struct SourceInfo { #[test] #[cfg(feature = "unstable")] fn source_info_stack_size() { - use zenoh_protocol::core::ZenohId; + use zenoh_protocol::core::ZenohIdProto; use crate::api::sample::{SourceInfo, SourceSn}; - assert_eq!(std::mem::size_of::(), 16); - assert_eq!(std::mem::size_of::>(), 17); + assert_eq!(std::mem::size_of::(), 16); + assert_eq!(std::mem::size_of::>(), 17); assert_eq!(std::mem::size_of::>(), 16); assert_eq!(std::mem::size_of::(), 17 + 16 + 7); } diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index e16f31da2e..59b3d0dfcb 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -20,6 +20,7 @@ use std::{ }; use tokio::net::UdpSocket; +use zenoh_config::wrappers::Hello; use zenoh_core::{Resolvable, Wait}; use zenoh_protocol::core::WhatAmIMatcher; use zenoh_result::ZResult; @@ -30,36 +31,6 @@ use crate::{ net::runtime::{orchestrator::Loop, Runtime}, }; -/// A zenoh Hello message. -pub struct Hello(zenoh_protocol::scouting::Hello); - -impl Hello { - /// Get the locators of this Hello message. - pub fn locators(&self) -> &[zenoh_protocol::core::Locator] { - &self.0.locators - } - - /// Get the zenoh id of this Hello message. - pub fn zid(&self) -> zenoh_protocol::core::ZenohId { - self.0.zid - } - - /// Get the whatami of this Hello message. - pub fn whatami(&self) -> zenoh_protocol::core::WhatAmI { - self.0.whatami - } -} - -impl fmt::Display for Hello { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Hello") - .field("zid", &self.zid()) - .field("whatami", &self.whatami()) - .field("locators", &self.locators()) - .finish() - } -} - /// A builder for initializing a [`Scout`]. /// /// # Examples @@ -354,7 +325,7 @@ fn _scout( let scout = Runtime::scout(&sockets, what, &addr, move |hello| { let callback = callback.clone(); async move { - callback(Hello(hello)); + callback(hello.into()); Loop::Continue } }); diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 5cffe1cded..4c613acfb0 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -28,7 +28,7 @@ use tracing::{error, trace, warn}; use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; -use zenoh_config::{unwrap_or_default, Config, Notifier, ZenohId}; +use zenoh_config::{unwrap_or_default, wrappers::ZenohId, Config, Notifier}; use zenoh_core::{zconfigurable, zread, Resolvable, Resolve, ResolveClosure, ResolveFuture, Wait}; #[cfg(feature = "unstable")] use zenoh_protocol::network::{declare::SubscriberId, ext}; @@ -2195,7 +2195,7 @@ impl Primitives for Session { }; let replier_id = match e.ext_sinfo { Some(info) => info.id.zid, - None => zenoh_protocol::core::ZenohId::rand(), + None => zenoh_protocol::core::ZenohIdProto::rand(), }; let new_reply = Reply { replier_id, @@ -2313,7 +2313,7 @@ impl Primitives for Session { let sample = info.into_sample(key_expr.into_owned(), payload, attachment); let new_reply = Reply { result: Ok(sample), - replier_id: zenoh_protocol::core::ZenohId::rand(), // TODO + replier_id: zenoh_protocol::core::ZenohIdProto::rand(), // TODO }; let callback = match query.reception_mode { @@ -2658,7 +2658,7 @@ impl crate::net::primitives::EPrimitives for Session { /// # #[tokio::main] /// # async fn main() { /// use std::str::FromStr; -/// use zenoh::{config::ZenohId, prelude::*}; +/// use zenoh::{info::ZenohId, prelude::*}; /// /// let mut config = zenoh::config::peer(); /// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 4628f9e95d..f7d5268772 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -21,7 +21,7 @@ use std::{ use zenoh_core::{Resolvable, Wait}; #[cfg(feature = "unstable")] -use zenoh_protocol::core::EntityGlobalId; +use zenoh_protocol::core::EntityGlobalIdProto; use zenoh_protocol::{core::Reliability, network::declare::subscriber::ext::SubscriberInfo}; use zenoh_result::ZResult; @@ -458,8 +458,8 @@ impl<'a, Handler> Subscriber<'a, Handler> { /// # } /// ``` #[zenoh_macros::unstable] - pub fn id(&self) -> EntityGlobalId { - EntityGlobalId { + pub fn id(&self) -> EntityGlobalIdProto { + EntityGlobalIdProto { zid: self.subscriber.session.zid().into(), eid: self.subscriber.state.id, } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c8062667ca..375cfd4712 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -207,6 +207,9 @@ pub mod session { /// Tools to access information about the current zenoh [`Session`](crate::Session). pub mod info { + pub use zenoh_config::wrappers::{EntityGlobalId, ZenohId}; + pub use zenoh_protocol::core::EntityId; + pub use crate::api::info::{ PeersZenohIdBuilder, RoutersZenohIdBuilder, SessionInfo, ZenohIdBuilder, }; @@ -324,7 +327,9 @@ pub mod handlers { /// Scouting primitives pub mod scouting { - pub use crate::api::scouting::{scout, Hello, Scout, ScoutBuilder}; + pub use zenoh_config::wrappers::Hello; + + pub use crate::api::scouting::{scout, Scout, ScoutBuilder}; } /// Liveliness primitives diff --git a/zenoh/src/net/codec/linkstate.rs b/zenoh/src/net/codec/linkstate.rs index a66163728c..7ee150d8bb 100644 --- a/zenoh/src/net/codec/linkstate.rs +++ b/zenoh/src/net/codec/linkstate.rs @@ -20,7 +20,7 @@ use zenoh_buffers::{ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{ common::imsg, - core::{Locator, WhatAmI, ZenohId}, + core::{Locator, WhatAmI, ZenohIdProto}, }; use super::Zenoh080Routing; @@ -85,7 +85,7 @@ where let psid: u64 = codec.read(&mut *reader)?; let sn: u64 = codec.read(&mut *reader)?; let zid = if imsg::has_option(options, linkstate::PID) { - let zid: ZenohId = codec.read(&mut *reader)?; + let zid: ZenohIdProto = codec.read(&mut *reader)?; Some(zid) } else { None diff --git a/zenoh/src/net/protocol/linkstate.rs b/zenoh/src/net/protocol/linkstate.rs index ccb5612011..cd8d1a91bf 100644 --- a/zenoh/src/net/protocol/linkstate.rs +++ b/zenoh/src/net/protocol/linkstate.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh_protocol::core::{Locator, WhatAmI, ZenohId}; +use zenoh_protocol::core::{Locator, WhatAmI, ZenohIdProto}; pub const PID: u64 = 1; // 0x01 pub const WAI: u64 = 1 << 1; // 0x02 @@ -37,7 +37,7 @@ pub const LOC: u64 = 1 << 2; // 0x04 pub(crate) struct LinkState { pub(crate) psid: u64, pub(crate) sn: u64, - pub(crate) zid: Option, + pub(crate) zid: Option, pub(crate) whatami: Option, pub(crate) locators: Option>, pub(crate) links: Vec, @@ -56,7 +56,7 @@ impl LinkState { let psid: u64 = rng.gen(); let sn: u64 = rng.gen(); let zid = if rng.gen_bool(0.5) { - Some(ZenohId::default()) + Some(ZenohIdProto::default()) } else { None }; diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index b21253d55f..7c92d5f709 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -20,7 +20,7 @@ use std::{ use tokio_util::sync::CancellationToken; use zenoh_protocol::{ - core::{ExprId, WhatAmI, ZenohId}, + core::{ExprId, WhatAmI, ZenohIdProto}, network::{ declare::ext, interest::{InterestId, InterestMode, InterestOptions}, @@ -55,7 +55,7 @@ pub(crate) struct InterestState { pub struct FaceState { pub(crate) id: usize, - pub(crate) zid: ZenohId, + pub(crate) zid: ZenohIdProto, pub(crate) whatami: WhatAmI, #[cfg(feature = "stats")] pub(crate) stats: Option>, @@ -76,7 +76,7 @@ impl FaceState { #[allow(clippy::too_many_arguments)] // @TODO fix warning pub(crate) fn new( id: usize, - zid: ZenohId, + zid: ZenohIdProto, whatami: WhatAmI, #[cfg(feature = "stats")] stats: Option>, primitives: Arc, diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 2853cc5a9f..9221522c00 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -21,7 +21,7 @@ use std::{ use uhlc::HLC; use zenoh_config::{unwrap_or_default, Config}; use zenoh_protocol::{ - core::{ExprId, WhatAmI, ZenohId}, + core::{ExprId, WhatAmI, ZenohIdProto}, network::Mapping, }; use zenoh_result::ZResult; @@ -61,7 +61,7 @@ impl<'a> RoutingExpr<'a> { } pub struct Tables { - pub(crate) zid: ZenohId, + pub(crate) zid: ZenohIdProto, pub(crate) whatami: WhatAmI, pub(crate) face_counter: usize, #[allow(dead_code)] @@ -79,7 +79,7 @@ pub struct Tables { impl Tables { pub fn new( - zid: ZenohId, + zid: ZenohIdProto, whatami: WhatAmI, hlc: Option>, config: &Config, @@ -145,7 +145,7 @@ impl Tables { } #[inline] - pub(crate) fn get_face(&self, zid: &ZenohId) -> Option<&Arc> { + pub(crate) fn get_face(&self, zid: &ZenohIdProto) -> Option<&Arc> { self.faces.values().find(|face| face.zid == *zid) } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 658c394b9a..32e4cb30e9 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -27,7 +27,7 @@ use std::{ use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ common::ZExtBody, - core::ZenohId, + core::ZenohIdProto, network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, interest::InterestId, @@ -463,9 +463,9 @@ impl HatBaseTrait for HatCode { } struct HatContext { - router_subs: HashSet, - peer_subs: HashSet, - peer_qabls: HashMap, + router_subs: HashSet, + peer_subs: HashSet, + peer_qabls: HashMap, } impl HatContext { @@ -504,7 +504,7 @@ impl HatFace { } } -fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { +fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { match hat!(tables) .peers_net .as_ref() diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index 5a1394568c..4ca3dcfb92 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -27,7 +27,7 @@ use zenoh_codec::WCodec; use zenoh_link::Locator; use zenoh_protocol::{ common::ZExtBody, - core::{WhatAmI, WhatAmIMatcher, ZenohId}, + core::{WhatAmI, WhatAmIMatcher, ZenohIdProto}, network::{oam, oam::id::OAM_LINKSTATE, NetworkBody, NetworkMessage, Oam}, }; use zenoh_transport::unicast::TransportUnicast; @@ -48,11 +48,11 @@ struct Details { #[derive(Clone)] pub(super) struct Node { - pub(super) zid: ZenohId, + pub(super) zid: ZenohIdProto, pub(super) whatami: Option, pub(super) locators: Option>, pub(super) sn: u64, - pub(super) links: Vec, + pub(super) links: Vec, } impl std::fmt::Debug for Node { @@ -63,8 +63,8 @@ impl std::fmt::Debug for Node { pub(super) struct Link { pub(super) transport: TransportUnicast, - zid: ZenohId, - mappings: VecMap, + zid: ZenohIdProto, + mappings: VecMap, local_mappings: VecMap, } @@ -80,12 +80,12 @@ impl Link { } #[inline] - pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohId) { + pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohIdProto) { self.mappings.insert(psid.try_into().unwrap(), zid); } #[inline] - pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohId> { + pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohIdProto> { self.mappings.get((*psid).try_into().unwrap()) } @@ -132,7 +132,7 @@ impl Network { #[allow(clippy::too_many_arguments)] pub(super) fn new( name: String, - zid: ZenohId, + zid: ZenohIdProto, runtime: Runtime, full_linkstate: bool, router_peers_failover_brokering: bool, @@ -177,7 +177,7 @@ impl Network { } #[inline] - pub(super) fn get_idx(&self, zid: &ZenohId) -> Option { + pub(super) fn get_idx(&self, zid: &ZenohIdProto) -> Option { self.graph .node_indices() .find(|idx| self.graph[*idx].zid == *zid) @@ -189,7 +189,7 @@ impl Network { } #[inline] - pub(super) fn get_link_from_zid(&self, zid: &ZenohId) -> Option<&Link> { + pub(super) fn get_link_from_zid(&self, zid: &ZenohIdProto) -> Option<&Link> { self.links.values().find(|link| link.zid == *zid) } @@ -344,7 +344,11 @@ impl Network { self.graph.update_edge(idx1, idx2, weight); } - pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) -> Changes { + pub(super) fn link_states( + &mut self, + link_states: Vec, + src: ZenohIdProto, + ) -> Changes { tracing::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); let strong_runtime = self.runtime.upgrade().unwrap(); @@ -409,7 +413,7 @@ impl Network { let link_states = link_states .into_iter() .map(|(zid, wai, locs, sn, links)| { - let links: Vec = links + let links: Vec = links .iter() .filter_map(|l| { if let Some(zid) = src_link.get_zid(l) { @@ -559,7 +563,7 @@ impl Network { } }, ) - .collect::, NodeIndex, bool)>>(); + .collect::, NodeIndex, bool)>>(); // Add/remove edges from graph let mut reintroduced_nodes = vec![]; @@ -611,7 +615,7 @@ impl Network { let link_states = link_states .into_iter() .filter(|ls| !removed.iter().any(|(idx, _)| idx == &ls.1)) - .collect::, NodeIndex, bool)>>(); + .collect::, NodeIndex, bool)>>(); if !self.autoconnect.is_empty() { // Connect discovered peers @@ -650,8 +654,8 @@ impl Network { #[allow(clippy::type_complexity)] // This is only used here if !link_states.is_empty() { let (new_idxs, updated_idxs): ( - Vec<(Vec, NodeIndex, bool)>, - Vec<(Vec, NodeIndex, bool)>, + Vec<(Vec, NodeIndex, bool)>, + Vec<(Vec, NodeIndex, bool)>, ) = link_states.into_iter().partition(|(_, _, new)| *new); let new_idxs = new_idxs .into_iter() @@ -815,7 +819,7 @@ impl Network { free_index } - pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { + pub(super) fn remove_link(&mut self, zid: &ZenohIdProto) -> Vec<(NodeIndex, Node)> { tracing::trace!("{} remove_link {}", self.name, zid); self.links.retain(|_, link| link.zid != *zid); self.graph[self.idx].links.retain(|link| *link != *zid); diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 0bd9f62f98..b75cb26cc1 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -19,7 +19,7 @@ use std::{ use petgraph::graph::NodeIndex; use zenoh_protocol::{ - core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI, ZenohId}, + core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI, ZenohIdProto}, network::{ declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, @@ -179,7 +179,7 @@ fn propagate_sourced_subscription( res: &Arc, sub_info: &SubscriberInfo, src_face: Option<&Arc>, - source: &ZenohId, + source: &ZenohIdProto, ) { let net = hat!(tables).peers_net.as_ref().unwrap(); match net.get_idx(source) { @@ -216,7 +216,7 @@ fn register_peer_subscription( face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, - peer: ZenohId, + peer: ZenohIdProto, ) { if !res_hat!(res).peer_subs.contains(&peer) { // Register peer subscription @@ -240,7 +240,7 @@ fn declare_peer_subscription( face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, - peer: ZenohId, + peer: ZenohIdProto, ) { register_peer_subscription(tables, face, res, sub_info, peer); } @@ -407,7 +407,7 @@ fn propagate_forget_sourced_subscription( tables: &Tables, res: &Arc, src_face: Option<&Arc>, - source: &ZenohId, + source: &ZenohIdProto, ) { let net = hat!(tables).peers_net.as_ref().unwrap(); match net.get_idx(source) { @@ -438,7 +438,7 @@ fn propagate_forget_sourced_subscription( } } -fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { +fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); if res_hat!(res).peer_subs.is_empty() { @@ -456,7 +456,7 @@ fn undeclare_peer_subscription( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, ) { if res_hat!(res).peer_subs.contains(peer) { unregister_peer_subscription(tables, res, peer); @@ -468,7 +468,7 @@ fn forget_peer_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, ) { undeclare_peer_subscription(tables, Some(face), res, peer); } @@ -554,7 +554,7 @@ fn forget_client_subscription( } } -pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId) { +pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohIdProto) { for mut res in hat!(tables) .peer_subs .iter() @@ -813,7 +813,7 @@ impl HatPubSubTrait for HatCode { tables: &Tables, net: &Network, source: NodeId, - subs: &HashSet, + subs: &HashSet, ) { if net.trees.len() > source as usize { for sub in subs { @@ -924,7 +924,7 @@ impl HatPubSubTrait for HatCode { tables: &Tables, net: &Network, source: usize, - subs: &HashSet, + subs: &HashSet, ) { if net.trees.len() > source { for sub in subs { diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index b75233409d..ea893c05b1 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -26,7 +26,7 @@ use zenoh_protocol::{ include::{Includer, DEFAULT_INCLUDER}, OwnedKeyExpr, }, - WhatAmI, WireExpr, ZenohId, + WhatAmI, WireExpr, ZenohIdProto, }, network::{ declare::{ @@ -208,7 +208,7 @@ fn propagate_sourced_queryable( res: &Arc, qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, - source: &ZenohId, + source: &ZenohIdProto, ) { let net = hat!(tables).peers_net.as_ref().unwrap(); match net.get_idx(source) { @@ -245,7 +245,7 @@ fn register_peer_queryable( mut face: Option<&mut Arc>, res: &mut Arc, qabl_info: &QueryableInfoType, - peer: ZenohId, + peer: ZenohIdProto, ) { let current_info = res_hat!(res).peer_qabls.get(&peer); if current_info.is_none() || current_info.unwrap() != qabl_info { @@ -270,7 +270,7 @@ fn declare_peer_queryable( face: &mut Arc, res: &mut Arc, qabl_info: &QueryableInfoType, - peer: ZenohId, + peer: ZenohIdProto, ) { let face = Some(face); register_peer_queryable(tables, face, res, qabl_info, peer); @@ -431,7 +431,7 @@ fn propagate_forget_sourced_queryable( tables: &mut Tables, res: &mut Arc, src_face: Option<&Arc>, - source: &ZenohId, + source: &ZenohIdProto, ) { let net = hat!(tables).peers_net.as_ref().unwrap(); match net.get_idx(source) { @@ -462,7 +462,7 @@ fn propagate_forget_sourced_queryable( } } -fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { +fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { res_hat_mut!(res).peer_qabls.remove(peer); if res_hat!(res).peer_qabls.is_empty() { @@ -480,7 +480,7 @@ fn undeclare_peer_queryable( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, ) { if res_hat!(res).peer_qabls.contains_key(peer) { unregister_peer_queryable(tables, res, peer); @@ -492,7 +492,7 @@ fn forget_peer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, ) { undeclare_peer_queryable(tables, Some(face), res, peer); } @@ -584,7 +584,7 @@ fn forget_client_queryable( } } -pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId) { +pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohIdProto) { let mut qabls = vec![]; for res in hat!(tables).peer_qabls.iter() { for qabl in res_hat!(res).peer_qabls.keys() { @@ -641,7 +641,7 @@ fn insert_target_for_qabls( tables: &Tables, net: &Network, source: NodeId, - qabls: &HashMap, + qabls: &HashMap, complete: bool, ) { if net.trees.len() > source as usize { diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 49eeaa1d38..b8aa28d5f4 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -22,7 +22,7 @@ use std::{any::Any, collections::HashMap, sync::Arc}; use zenoh_buffers::ZBuf; use zenoh_config::{unwrap_or_default, Config, WhatAmI}; use zenoh_protocol::{ - core::{WireExpr, ZenohId}, + core::{WireExpr, ZenohIdProto}, network::{ declare::{ queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, QueryableId, @@ -55,9 +55,9 @@ zconfigurable! { #[derive(serde::Serialize)] pub(crate) struct Sources { - routers: Vec, - peers: Vec, - clients: Vec, + routers: Vec, + peers: Vec, + clients: Vec, } impl Sources { diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index bde0d5a12b..b3216c6b8c 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -23,7 +23,7 @@ use zenoh_codec::WCodec; use zenoh_link::Locator; use zenoh_protocol::{ common::ZExtBody, - core::{WhatAmI, WhatAmIMatcher, ZenohId}, + core::{WhatAmI, WhatAmIMatcher, ZenohIdProto}, network::{oam, oam::id::OAM_LINKSTATE, NetworkBody, NetworkMessage, Oam}, }; use zenoh_transport::unicast::TransportUnicast; @@ -43,11 +43,11 @@ struct Details { #[derive(Clone)] pub(super) struct Node { - pub(super) zid: ZenohId, + pub(super) zid: ZenohIdProto, pub(super) whatami: Option, pub(super) locators: Option>, pub(super) sn: u64, - pub(super) links: Vec, + pub(super) links: Vec, } impl std::fmt::Debug for Node { @@ -58,8 +58,8 @@ impl std::fmt::Debug for Node { pub(super) struct Link { pub(super) transport: TransportUnicast, - zid: ZenohId, - mappings: VecMap, + zid: ZenohIdProto, + mappings: VecMap, local_mappings: VecMap, } @@ -75,12 +75,12 @@ impl Link { } #[inline] - pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohId) { + pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohIdProto) { self.mappings.insert(psid.try_into().unwrap(), zid); } #[inline] - pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohId> { + pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohIdProto> { self.mappings.get((*psid).try_into().unwrap()) } @@ -106,7 +106,7 @@ pub(super) struct Network { impl Network { pub(super) fn new( name: String, - zid: ZenohId, + zid: ZenohIdProto, runtime: Runtime, router_peers_failover_brokering: bool, gossip: bool, @@ -144,14 +144,14 @@ impl Network { // } #[inline] - pub(super) fn get_idx(&self, zid: &ZenohId) -> Option { + pub(super) fn get_idx(&self, zid: &ZenohIdProto) -> Option { self.graph .node_indices() .find(|idx| self.graph[*idx].zid == *zid) } #[inline] - pub(super) fn get_link_from_zid(&self, zid: &ZenohId) -> Option<&Link> { + pub(super) fn get_link_from_zid(&self, zid: &ZenohIdProto) -> Option<&Link> { self.links.values().find(|link| link.zid == *zid) } @@ -271,7 +271,7 @@ impl Network { })) } - pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) { + pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohIdProto) { tracing::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); let strong_runtime = self.runtime.upgrade().unwrap(); @@ -333,7 +333,7 @@ impl Network { let link_states = link_states .into_iter() .map(|(zid, wai, locs, sn, links)| { - let links: Vec = links + let links: Vec = links .iter() .filter_map(|l| { if let Some(zid) = src_link.get_zid(l) { @@ -551,7 +551,7 @@ impl Network { free_index } - pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { + pub(super) fn remove_link(&mut self, zid: &ZenohIdProto) -> Vec<(NodeIndex, Node)> { tracing::trace!("{} remove_link {}", self.name, zid); self.links.retain(|_, link| link.zid != *zid); self.graph[self.idx].links.retain(|link| *link != *zid); diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 301b300498..46dfe6f058 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -28,7 +28,7 @@ use std::{ use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ common::ZExtBody, - core::ZenohId, + core::ZenohIdProto, network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, interest::InterestId, @@ -121,7 +121,7 @@ struct HatTables { peer_qabls: HashSet>, routers_net: Option, peers_net: Option, - shared_nodes: Vec, + shared_nodes: Vec, routers_trees_task: Option, peers_trees_task: Option, router_peers_failover_brokering: bool, @@ -183,7 +183,7 @@ impl HatTables { } #[inline] - fn get_router_links(&self, peer: ZenohId) -> impl Iterator + '_ { + fn get_router_links(&self, peer: ZenohIdProto) -> impl Iterator + '_ { self.peers_net .as_ref() .unwrap() @@ -201,14 +201,14 @@ impl HatTables { #[inline] fn elect_router<'a>( &'a self, - self_zid: &'a ZenohId, + self_zid: &'a ZenohIdProto, key_expr: &str, - mut routers: impl Iterator, - ) -> &'a ZenohId { + mut routers: impl Iterator, + ) -> &'a ZenohIdProto { match routers.next() { None => self_zid, Some(router) => { - let hash = |r: &ZenohId| { + let hash = |r: &ZenohIdProto| { let mut hasher = DefaultHasher::new(); for b in key_expr.as_bytes() { hasher.write_u8(*b); @@ -233,13 +233,13 @@ impl HatTables { } #[inline] - fn failover_brokering_to(source_links: &[ZenohId], dest: ZenohId) -> bool { + fn failover_brokering_to(source_links: &[ZenohIdProto], dest: ZenohIdProto) -> bool { // if source_links is empty then gossip is probably disabled in source peer !source_links.is_empty() && !source_links.contains(&dest) } #[inline] - fn failover_brokering(&self, peer1: ZenohId, peer2: ZenohId) -> bool { + fn failover_brokering(&self, peer1: ZenohIdProto, peer2: ZenohIdProto) -> bool { self.router_peers_failover_brokering && self .peers_net @@ -762,10 +762,10 @@ impl HatBaseTrait for HatCode { } struct HatContext { - router_subs: HashSet, - peer_subs: HashSet, - router_qabls: HashMap, - peer_qabls: HashMap, + router_subs: HashSet, + peer_subs: HashSet, + router_qabls: HashMap, + peer_qabls: HashMap, } impl HatContext { @@ -805,7 +805,7 @@ impl HatFace { } } -fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { +fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { match hat!(tables) .routers_net .as_ref() @@ -832,7 +832,7 @@ fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option< } } -fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { +fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { match hat!(tables) .peers_net .as_ref() diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs index cd38d140e1..151957d19a 100644 --- a/zenoh/src/net/routing/hat/router/network.rs +++ b/zenoh/src/net/routing/hat/router/network.rs @@ -27,7 +27,7 @@ use zenoh_codec::WCodec; use zenoh_link::Locator; use zenoh_protocol::{ common::ZExtBody, - core::{WhatAmI, WhatAmIMatcher, ZenohId}, + core::{WhatAmI, WhatAmIMatcher, ZenohIdProto}, network::{oam, oam::id::OAM_LINKSTATE, NetworkBody, NetworkMessage, Oam}, }; use zenoh_transport::unicast::TransportUnicast; @@ -48,11 +48,11 @@ struct Details { #[derive(Clone)] pub(super) struct Node { - pub(super) zid: ZenohId, + pub(super) zid: ZenohIdProto, pub(super) whatami: Option, pub(super) locators: Option>, pub(super) sn: u64, - pub(super) links: Vec, + pub(super) links: Vec, } impl std::fmt::Debug for Node { @@ -63,8 +63,8 @@ impl std::fmt::Debug for Node { pub(super) struct Link { pub(super) transport: TransportUnicast, - zid: ZenohId, - mappings: VecMap, + zid: ZenohIdProto, + mappings: VecMap, local_mappings: VecMap, } @@ -80,12 +80,12 @@ impl Link { } #[inline] - pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohId) { + pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohIdProto) { self.mappings.insert(psid.try_into().unwrap(), zid); } #[inline] - pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohId> { + pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohIdProto> { self.mappings.get((*psid).try_into().unwrap()) } @@ -132,7 +132,7 @@ impl Network { #[allow(clippy::too_many_arguments)] pub(super) fn new( name: String, - zid: ZenohId, + zid: ZenohIdProto, runtime: Runtime, full_linkstate: bool, router_peers_failover_brokering: bool, @@ -177,12 +177,12 @@ impl Network { } #[inline] - pub(super) fn get_node(&self, zid: &ZenohId) -> Option<&Node> { + pub(super) fn get_node(&self, zid: &ZenohIdProto) -> Option<&Node> { self.graph.node_weights().find(|weight| weight.zid == *zid) } #[inline] - pub(super) fn get_idx(&self, zid: &ZenohId) -> Option { + pub(super) fn get_idx(&self, zid: &ZenohIdProto) -> Option { self.graph .node_indices() .find(|idx| self.graph[*idx].zid == *zid) @@ -194,7 +194,7 @@ impl Network { } #[inline] - pub(super) fn get_link_from_zid(&self, zid: &ZenohId) -> Option<&Link> { + pub(super) fn get_link_from_zid(&self, zid: &ZenohIdProto) -> Option<&Link> { self.links.values().find(|link| link.zid == *zid) } @@ -349,7 +349,11 @@ impl Network { self.graph.update_edge(idx1, idx2, weight); } - pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) -> Changes { + pub(super) fn link_states( + &mut self, + link_states: Vec, + src: ZenohIdProto, + ) -> Changes { tracing::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); let graph = &self.graph; @@ -413,7 +417,7 @@ impl Network { let link_states = link_states .into_iter() .map(|(zid, wai, locs, sn, links)| { - let links: Vec = links + let links: Vec = links .iter() .filter_map(|l| { if let Some(zid) = src_link.get_zid(l) { @@ -563,7 +567,7 @@ impl Network { } }, ) - .collect::, NodeIndex, bool)>>(); + .collect::, NodeIndex, bool)>>(); // Add/remove edges from graph let mut reintroduced_nodes = vec![]; @@ -615,7 +619,7 @@ impl Network { let link_states = link_states .into_iter() .filter(|ls| !removed.iter().any(|(idx, _)| idx == &ls.1)) - .collect::, NodeIndex, bool)>>(); + .collect::, NodeIndex, bool)>>(); if !self.autoconnect.is_empty() { // Connect discovered peers @@ -654,8 +658,8 @@ impl Network { #[allow(clippy::type_complexity)] // This is only used here if !link_states.is_empty() { let (new_idxs, updated_idxs): ( - Vec<(Vec, NodeIndex, bool)>, - Vec<(Vec, NodeIndex, bool)>, + Vec<(Vec, NodeIndex, bool)>, + Vec<(Vec, NodeIndex, bool)>, ) = link_states.into_iter().partition(|(_, _, new)| *new); let new_idxs = new_idxs .into_iter() @@ -819,7 +823,7 @@ impl Network { free_index } - pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { + pub(super) fn remove_link(&mut self, zid: &ZenohIdProto) -> Vec<(NodeIndex, Node)> { tracing::trace!("{} remove_link {}", self.name, zid); self.links.retain(|_, link| link.zid != *zid); self.graph[self.idx].links.retain(|link| *link != *zid); @@ -999,7 +1003,7 @@ impl Network { } #[inline] - pub(super) fn get_links(&self, node: ZenohId) -> &[ZenohId] { + pub(super) fn get_links(&self, node: ZenohIdProto) -> &[ZenohIdProto] { self.get_node(&node) .map(|node| &node.links[..]) .unwrap_or_default() @@ -1007,7 +1011,7 @@ impl Network { } #[inline] -pub(super) fn shared_nodes(net1: &Network, net2: &Network) -> Vec { +pub(super) fn shared_nodes(net1: &Network, net2: &Network) -> Vec { net1.graph .node_references() .filter_map(|(_, node1)| { diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 233e0b8cdf..dba4d58e85 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -19,7 +19,7 @@ use std::{ use petgraph::graph::NodeIndex; use zenoh_protocol::{ - core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI, ZenohId}, + core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI, ZenohIdProto}, network::{ declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, @@ -176,7 +176,7 @@ fn propagate_sourced_subscription( res: &Arc, sub_info: &SubscriberInfo, src_face: Option<&Arc>, - source: &ZenohId, + source: &ZenohIdProto, net_type: WhatAmI, ) { let net = hat!(tables).get_net(net_type).unwrap(); @@ -214,7 +214,7 @@ fn register_router_subscription( face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, - router: ZenohId, + router: ZenohIdProto, ) { if !res_hat!(res).router_subs.contains(&router) { // Register router subscription @@ -240,7 +240,7 @@ fn declare_router_subscription( face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, - router: ZenohId, + router: ZenohIdProto, ) { register_router_subscription(tables, face, res, sub_info, router); } @@ -250,7 +250,7 @@ fn register_peer_subscription( face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, - peer: ZenohId, + peer: ZenohIdProto, ) { if !res_hat!(res).peer_subs.contains(&peer) { // Register peer subscription @@ -269,7 +269,7 @@ fn declare_peer_subscription( face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, - peer: ZenohId, + peer: ZenohIdProto, ) { register_peer_subscription(tables, face, res, sub_info, peer); let propa_sub_info = *sub_info; @@ -491,7 +491,7 @@ fn propagate_forget_sourced_subscription( tables: &Tables, res: &Arc, src_face: Option<&Arc>, - source: &ZenohId, + source: &ZenohIdProto, net_type: WhatAmI, ) { let net = hat!(tables).get_net(net_type).unwrap(); @@ -523,7 +523,11 @@ fn propagate_forget_sourced_subscription( } } -fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { +fn unregister_router_subscription( + tables: &mut Tables, + res: &mut Arc, + router: &ZenohIdProto, +) { res_hat_mut!(res).router_subs.retain(|sub| sub != router); if res_hat!(res).router_subs.is_empty() { @@ -544,7 +548,7 @@ fn undeclare_router_subscription( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, - router: &ZenohId, + router: &ZenohIdProto, ) { if res_hat!(res).router_subs.contains(router) { unregister_router_subscription(tables, res, router); @@ -556,12 +560,12 @@ fn forget_router_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - router: &ZenohId, + router: &ZenohIdProto, ) { undeclare_router_subscription(tables, Some(face), res, router); } -fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { +fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); if res_hat!(res).peer_subs.is_empty() { @@ -575,7 +579,7 @@ fn undeclare_peer_subscription( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, ) { if res_hat!(res).peer_subs.contains(peer) { unregister_peer_subscription(tables, res, peer); @@ -587,7 +591,7 @@ fn forget_peer_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, ) { undeclare_peer_subscription(tables, Some(face), res, peer); let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); @@ -684,7 +688,7 @@ fn forget_client_subscription( } } -pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { +pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohIdProto, net_type: WhatAmI) { match net_type { WhatAmI::Router => { for mut res in hat!(tables) @@ -771,7 +775,11 @@ pub(super) fn pubsub_tree_change( update_data_routes_from(tables, &mut tables.root_res.clone()); } -pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { +pub(super) fn pubsub_linkstate_change( + tables: &mut Tables, + zid: &ZenohIdProto, + links: &[ZenohIdProto], +) { if let Some(src_face) = tables.get_face(zid).cloned() { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { for res in face_hat!(src_face).remote_subs.values() { @@ -1122,7 +1130,7 @@ impl HatPubSubTrait for HatCode { tables: &Tables, net: &Network, source: NodeId, - subs: &HashSet, + subs: &HashSet, ) { if net.trees.len() > source as usize { for sub in subs { @@ -1255,7 +1263,7 @@ impl HatPubSubTrait for HatCode { tables: &Tables, net: &Network, source: usize, - subs: &HashSet, + subs: &HashSet, ) { if net.trees.len() > source { for sub in subs { diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 3ab0ac507d..217d74955f 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -26,7 +26,7 @@ use zenoh_protocol::{ include::{Includer, DEFAULT_INCLUDER}, OwnedKeyExpr, }, - WhatAmI, WireExpr, ZenohId, + WhatAmI, WireExpr, ZenohIdProto, }, network::{ declare::{ @@ -286,7 +286,7 @@ fn propagate_sourced_queryable( res: &Arc, qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, - source: &ZenohId, + source: &ZenohIdProto, net_type: WhatAmI, ) { let net = hat!(tables).get_net(net_type).unwrap(); @@ -324,7 +324,7 @@ fn register_router_queryable( mut face: Option<&mut Arc>, res: &mut Arc, qabl_info: &QueryableInfoType, - router: ZenohId, + router: ZenohIdProto, ) { let current_info = res_hat!(res).router_qabls.get(&router); if current_info.is_none() || current_info.unwrap() != qabl_info { @@ -362,7 +362,7 @@ fn declare_router_queryable( face: &mut Arc, res: &mut Arc, qabl_info: &QueryableInfoType, - router: ZenohId, + router: ZenohIdProto, ) { register_router_queryable(tables, Some(face), res, qabl_info, router); } @@ -372,7 +372,7 @@ fn register_peer_queryable( face: Option<&mut Arc>, res: &mut Arc, qabl_info: &QueryableInfoType, - peer: ZenohId, + peer: ZenohIdProto, ) { let current_info = res_hat!(res).peer_qabls.get(&peer); if current_info.is_none() || current_info.unwrap() != qabl_info { @@ -392,7 +392,7 @@ fn declare_peer_queryable( face: &mut Arc, res: &mut Arc, qabl_info: &QueryableInfoType, - peer: ZenohId, + peer: ZenohIdProto, ) { let mut face = Some(face); register_peer_queryable(tables, face.as_deref_mut(), res, qabl_info, peer); @@ -608,7 +608,7 @@ fn propagate_forget_sourced_queryable( tables: &mut Tables, res: &mut Arc, src_face: Option<&Arc>, - source: &ZenohId, + source: &ZenohIdProto, net_type: WhatAmI, ) { let net = hat!(tables).get_net(net_type).unwrap(); @@ -640,7 +640,11 @@ fn propagate_forget_sourced_queryable( } } -fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { +fn unregister_router_queryable( + tables: &mut Tables, + res: &mut Arc, + router: &ZenohIdProto, +) { res_hat_mut!(res).router_qabls.remove(router); if res_hat!(res).router_qabls.is_empty() { @@ -661,7 +665,7 @@ fn undeclare_router_queryable( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, - router: &ZenohId, + router: &ZenohIdProto, ) { if res_hat!(res).router_qabls.contains_key(router) { unregister_router_queryable(tables, res, router); @@ -673,12 +677,12 @@ fn forget_router_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - router: &ZenohId, + router: &ZenohIdProto, ) { undeclare_router_queryable(tables, Some(face), res, router); } -fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { +fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { res_hat_mut!(res).peer_qabls.remove(peer); if res_hat!(res).peer_qabls.is_empty() { @@ -692,7 +696,7 @@ fn undeclare_peer_queryable( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, ) { if res_hat!(res).peer_qabls.contains_key(peer) { unregister_peer_queryable(tables, res, peer); @@ -704,7 +708,7 @@ fn forget_peer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, ) { undeclare_peer_queryable(tables, Some(face), res, peer); @@ -810,7 +814,7 @@ fn forget_client_queryable( } } -pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { +pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohIdProto, net_type: WhatAmI) { match net_type { WhatAmI::Router => { let mut qabls = vec![]; @@ -857,7 +861,11 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: } } -pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { +pub(super) fn queries_linkstate_change( + tables: &mut Tables, + zid: &ZenohIdProto, + links: &[ZenohIdProto], +) { if let Some(src_face) = tables.get_face(zid) { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { for res in face_hat!(src_face).remote_qabls.values() { @@ -990,7 +998,7 @@ fn insert_target_for_qabls( tables: &Tables, net: &Network, source: NodeId, - qabls: &HashMap, + qabls: &HashMap, complete: bool, ) { if net.trees.len() > source as usize { diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 6467edd13e..b237ac1b78 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -22,7 +22,7 @@ use std::{any::Any, sync::Arc}; use zenoh_config::{AclConfig, Action, InterceptorFlow, Permission, Subject}; use zenoh_protocol::{ - core::ZenohId, + core::ZenohIdProto, network::{Declare, DeclareBody, NetworkBody, NetworkMessage, Push, Request}, zenoh::{PushBody, RequestBody}, }; @@ -45,12 +45,12 @@ pub struct Interface { struct EgressAclEnforcer { policy_enforcer: Arc, interface_list: Vec, - zid: ZenohId, + zid: ZenohIdProto, } struct IngressAclEnforcer { policy_enforcer: Arc, interface_list: Vec, - zid: ZenohId, + zid: ZenohIdProto, } pub(crate) fn acl_interceptor_factories( @@ -284,7 +284,7 @@ impl InterceptorTrait for EgressAclEnforcer { pub trait AclActionMethods { fn policy_enforcer(&self) -> Arc; fn interface_list(&self) -> Vec; - fn zid(&self) -> ZenohId; + fn zid(&self) -> ZenohIdProto; fn flow(&self) -> InterceptorFlow; fn action(&self, action: Action, log_msg: &str, key_expr: &str) -> Permission { let policy_enforcer = self.policy_enforcer(); @@ -342,7 +342,7 @@ impl AclActionMethods for EgressAclEnforcer { self.interface_list.clone() } - fn zid(&self) -> ZenohId { + fn zid(&self) -> ZenohIdProto { self.zid } fn flow(&self) -> InterceptorFlow { @@ -359,7 +359,7 @@ impl AclActionMethods for IngressAclEnforcer { self.interface_list.clone() } - fn zid(&self) -> ZenohId { + fn zid(&self) -> ZenohIdProto { self.zid } fn flow(&self) -> InterceptorFlow { diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 630253e1c6..d11ab24b5b 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -18,7 +18,7 @@ use std::{ use uhlc::HLC; use zenoh_config::Config; -use zenoh_protocol::core::{WhatAmI, ZenohId}; +use zenoh_protocol::core::{WhatAmI, ZenohIdProto}; // use zenoh_collections::Timer; use zenoh_result::ZResult; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast, TransportPeer}; @@ -45,7 +45,7 @@ pub struct Router { impl Router { pub fn new( - zid: ZenohId, + zid: ZenohIdProto, whatami: WhatAmI, hlc: Option>, config: &Config, @@ -176,7 +176,7 @@ impl Router { let mux = Arc::new(McastMux::new(transport.clone(), interceptor)); let face = FaceState::new( fid, - ZenohId::from_str("1").unwrap(), + ZenohIdProto::from_str("1").unwrap(), WhatAmI::Peer, #[cfg(feature = "stats")] None, diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index da2ab9b628..1f78bb71e9 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -19,7 +19,7 @@ use std::{ use serde_json::json; use tracing::{error, trace}; use zenoh_buffers::buffer::SplitBuffer; -use zenoh_config::{unwrap_or_default, ConfigValidator, ValidatedMap, WhatAmI, ZenohId}; +use zenoh_config::{unwrap_or_default, wrappers::ZenohId, ConfigValidator, ValidatedMap, WhatAmI}; use zenoh_core::Wait; #[cfg(feature = "plugins")] use zenoh_plugin_trait::{PluginControl, PluginStatus}; diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 81a904a3da..a89ec94d61 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -36,7 +36,7 @@ use futures::{stream::StreamExt, Future}; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use uhlc::{HLCBuilder, HLC}; -use zenoh_config::ZenohId; +use zenoh_config::wrappers::ZenohId; use zenoh_link::{EndPoint, Link}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_protocol::{ diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index 138ad2e631..73a0355199 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -32,8 +32,8 @@ use zenoh_config::{ }; use zenoh_link::{Locator, LocatorInspector}; use zenoh_protocol::{ - core::{whatami::WhatAmIMatcher, EndPoint, WhatAmI, ZenohId}, - scouting::{Hello, Scout, ScoutingBody, ScoutingMessage}, + core::{whatami::WhatAmIMatcher, EndPoint, WhatAmI, ZenohIdProto}, + scouting::{HelloProto, Scout, ScoutingBody, ScoutingMessage}, }; use zenoh_result::{bail, zerror, ZResult}; @@ -53,7 +53,7 @@ pub enum Loop { #[derive(Default, Debug)] pub(crate) struct PeerConnector { - zid: Option, + zid: Option, terminated: bool, } @@ -74,7 +74,7 @@ impl StartConditions { peer_connectors.len() - 1 } - pub(crate) async fn add_peer_connector_zid(&self, zid: ZenohId) { + pub(crate) async fn add_peer_connector_zid(&self, zid: ZenohIdProto) { let mut peer_connectors = self.peer_connectors.lock().await; if !peer_connectors.iter().any(|pc| pc.zid == Some(zid)) { peer_connectors.push(PeerConnector { @@ -84,7 +84,7 @@ impl StartConditions { } } - pub(crate) async fn set_peer_connector_zid(&self, idx: usize, zid: ZenohId) { + pub(crate) async fn set_peer_connector_zid(&self, idx: usize, zid: ZenohIdProto) { let mut peer_connectors = self.peer_connectors.lock().await; if let Some(peer_connector) = peer_connectors.get_mut(idx) { peer_connector.zid = Some(zid); @@ -101,7 +101,7 @@ impl StartConditions { } } - pub(crate) async fn terminate_peer_connector_zid(&self, zid: ZenohId) { + pub(crate) async fn terminate_peer_connector_zid(&self, zid: ZenohIdProto) { let mut peer_connectors = self.peer_connectors.lock().await; if let Some(peer_connector) = peer_connectors.iter_mut().find(|pc| pc.zid == Some(zid)) { peer_connector.terminated = true; @@ -777,7 +777,7 @@ impl Runtime { } } - async fn peer_connector_retry(&self, peer: EndPoint) -> ZResult { + async fn peer_connector_retry(&self, peer: EndPoint) -> ZResult { let retry_config = self.get_connect_retry_config(&peer); let mut period = retry_config.period(); let cancellation_token = self.get_cancellation_token(); @@ -829,7 +829,7 @@ impl Runtime { mcast_addr: &SocketAddr, f: F, ) where - F: Fn(Hello) -> Fut + std::marker::Send + std::marker::Sync + Clone, + F: Fn(HelloProto) -> Fut + std::marker::Send + std::marker::Sync + Clone, Fut: Future + std::marker::Send, Self: Sized, { @@ -920,7 +920,7 @@ impl Runtime { } #[must_use] - async fn connect(&self, zid: &ZenohId, locators: &[Locator]) -> bool { + async fn connect(&self, zid: &ZenohIdProto, locators: &[Locator]) -> bool { const ERR: &str = "Unable to connect to newly scouted peer "; let inspector = LocatorInspector::default(); @@ -981,7 +981,7 @@ impl Runtime { false } - pub async fn connect_peer(&self, zid: &ZenohId, locators: &[Locator]) { + pub async fn connect_peer(&self, zid: &ZenohIdProto, locators: &[Locator]) { let manager = self.manager(); if zid != &manager.zid() { let has_unicast = manager.get_transport_unicast(zid).await.is_some(); @@ -1104,7 +1104,7 @@ impl Runtime { let codec = Zenoh080::new(); let zid = self.manager().zid(); - let hello: ScoutingMessage = Hello { + let hello: ScoutingMessage = HelloProto { version: zenoh_protocol::VERSION, whatami: self.whatami(), zid, diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 5e8ec8a1b6..de0b691b04 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -22,7 +22,8 @@ use zenoh_config::Config; use zenoh_core::zlock; use zenoh_protocol::{ core::{ - key_expr::keyexpr, Encoding, ExprId, Reliability, WhatAmI, WireExpr, ZenohId, EMPTY_EXPR_ID, + key_expr::keyexpr, Encoding, ExprId, Reliability, WhatAmI, WireExpr, ZenohIdProto, + EMPTY_EXPR_ID, }, network::{ declare::subscriber::ext::SubscriberInfo, ext, Declare, DeclareBody, DeclareKeyExpr, @@ -43,7 +44,7 @@ use crate::net::{ fn base_test() { let config = Config::default(); let router = Router::new( - ZenohId::try_from([1]).unwrap(), + ZenohIdProto::try_from([1]).unwrap(), WhatAmI::Client, Some(Arc::new(HLC::default())), &config, @@ -139,7 +140,7 @@ fn match_test() { let config = Config::default(); let router = Router::new( - ZenohId::try_from([1]).unwrap(), + ZenohIdProto::try_from([1]).unwrap(), WhatAmI::Client, Some(Arc::new(HLC::default())), &config, @@ -179,7 +180,7 @@ fn match_test() { fn multisub_test() { let config = Config::default(); let router = Router::new( - ZenohId::try_from([1]).unwrap(), + ZenohIdProto::try_from([1]).unwrap(), WhatAmI::Client, Some(Arc::new(HLC::default())), &config, @@ -248,7 +249,7 @@ fn multisub_test() { async fn clean_test() { let config = Config::default(); let router = Router::new( - ZenohId::try_from([1]).unwrap(), + ZenohIdProto::try_from([1]).unwrap(), WhatAmI::Client, Some(Arc::new(HLC::default())), &config, @@ -568,7 +569,7 @@ impl EPrimitives for ClientPrimitives { fn client_test() { let config = Config::default(); let router = Router::new( - ZenohId::try_from([1]).unwrap(), + ZenohIdProto::try_from([1]).unwrap(), WhatAmI::Client, Some(Arc::new(HLC::default())), &config, From 825bfe4d6256b2b5cc14d12b64be569e4efaffd2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 12 Jun 2024 16:18:49 +0200 Subject: [PATCH 421/598] more renamings --- .../src/core/{parameters => }/parameters.rs | 6 ++++- .../zenoh-protocol/src/core/parameters/mod.rs | 22 ------------------- .../src/core/parameters/properties.rs | 3 ++- 3 files changed, 7 insertions(+), 24 deletions(-) rename commons/zenoh-protocol/src/core/{parameters => }/parameters.rs (95%) delete mode 100644 commons/zenoh-protocol/src/core/parameters/mod.rs diff --git a/commons/zenoh-protocol/src/core/parameters/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs similarity index 95% rename from commons/zenoh-protocol/src/core/parameters/parameters.rs rename to commons/zenoh-protocol/src/core/parameters.rs index 4f1c1f5eb4..4ecec4e47a 100644 --- a/commons/zenoh-protocol/src/core/parameters/parameters.rs +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -12,7 +12,8 @@ // ZettaScale Zenoh Team, // -/// Module provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. +/// Module provides a set of utility functions whic allows to manipulate &str` which follows the format `a=b;c=d|e;f=g`. +/// and structure `Parameters` which provides `HashMap<&str, &str>`-like view over a string of such format. /// /// `;` is the separator between the key-value `(&str, &str)` elements. /// @@ -20,6 +21,9 @@ /// /// `|` is the separator between multiple elements of the values. +mod properties; +pub use properties::Parameters; + pub(super) const LIST_SEPARATOR: char = ';'; pub(super) const FIELD_SEPARATOR: char = '='; pub(super) const VALUE_SEPARATOR: char = '|'; diff --git a/commons/zenoh-protocol/src/core/parameters/mod.rs b/commons/zenoh-protocol/src/core/parameters/mod.rs deleted file mode 100644 index eda078f39d..0000000000 --- a/commons/zenoh-protocol/src/core/parameters/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -// This module contains utility functions for parsing and manipulating &str as key-value pairs -#[allow(clippy::module_inception)] -mod parameters; -pub use parameters::*; - -// This module contains the `Parameters` struct which provides a HashMap-like interface for string with the key-value pairs -mod properties; -pub use properties::Parameters; diff --git a/commons/zenoh-protocol/src/core/parameters/properties.rs b/commons/zenoh-protocol/src/core/parameters/properties.rs index 95a2907b2d..809e840aff 100644 --- a/commons/zenoh-protocol/src/core/parameters/properties.rs +++ b/commons/zenoh-protocol/src/core/parameters/properties.rs @@ -18,7 +18,8 @@ use alloc::{ use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] use std::collections::HashMap; -use super::{parameters as parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; +use super::{FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; +use crate::core::parameters; /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties From 0bcbc558bed29e9fcf4d91bd019d08d86e1ec944 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 12 Jun 2024 16:24:09 +0200 Subject: [PATCH 422/598] cargo fmt --- commons/zenoh-protocol/src/core/parameters.rs | 5 +---- commons/zenoh-protocol/src/core/parameters/properties.rs | 1 + io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs index 4ecec4e47a..101bf9d925 100644 --- a/commons/zenoh-protocol/src/core/parameters.rs +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -20,7 +20,6 @@ /// `=` is the separator between the `&str`-key and `&str`-value /// /// `|` is the separator between multiple elements of the values. - mod properties; pub use properties::Parameters; @@ -91,9 +90,7 @@ where /// Get the a `&str`-value for a `&str`-key according to the parameters format. pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { - iter(s) - .find(|(key, _)| *key == k) - .map(|(_, value)| value) + iter(s).find(|(key, _)| *key == k).map(|(_, value)| value) } /// Get the a `&str`-value iterator for a `&str`-key according to the parameters format. diff --git a/commons/zenoh-protocol/src/core/parameters/properties.rs b/commons/zenoh-protocol/src/core/parameters/properties.rs index 809e840aff..d9f8634e9f 100644 --- a/commons/zenoh-protocol/src/core/parameters/properties.rs +++ b/commons/zenoh-protocol/src/core/parameters/properties.rs @@ -18,6 +18,7 @@ use alloc::{ use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] use std::collections::HashMap; + use super::{FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; use crate::core::parameters; diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs index 9266534f2b..ff1c2f983b 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs @@ -24,7 +24,7 @@ pub use unicast::*; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::core::{Locator, parameters}; +use zenoh_protocol::core::{parameters, Locator}; use zenoh_result::ZResult; pub const UNIXPIPE_LOCATOR_PREFIX: &str = "unixpipe"; From 296c32562046b5784b5442ff21cfbc29eac2ef99 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 12 Jun 2024 16:29:57 +0200 Subject: [PATCH 423/598] doc fix --- commons/zenoh-protocol/src/core/parameters/properties.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/commons/zenoh-protocol/src/core/parameters/properties.rs b/commons/zenoh-protocol/src/core/parameters/properties.rs index d9f8634e9f..4856edf7af 100644 --- a/commons/zenoh-protocol/src/core/parameters/properties.rs +++ b/commons/zenoh-protocol/src/core/parameters/properties.rs @@ -28,10 +28,10 @@ use crate::core::parameters; /// /// Example: /// ``` -/// use zenoh_protocol::core::Properties; +/// use zenoh_protocol::core::Parameters; /// /// let a = "a=1;b=2;c=3|4|5;d=6"; -/// let p = Properties::from(a); +/// let p = Parameters::from(a); /// /// // Retrieve values /// assert!(!p.is_empty()); @@ -50,7 +50,7 @@ use crate::core::parameters; /// assert!(iter.next().is_none()); /// /// // Create properties from iterators -/// let pi = Properties::from_iter(vec![("a", "1"), ("b", "2"), ("c", "3|4|5"), ("d", "6")]); +/// let pi = Parameters::from_iter(vec![("a", "1"), ("b", "2"), ("c", "3|4|5"), ("d", "6")]); /// assert_eq!(p, pi); /// ``` #[derive(Clone, PartialEq, Eq, Hash, Default)] From 906215bde5f5e1bdea4ae26b2d78620c508cf107 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 12 Jun 2024 16:35:25 +0200 Subject: [PATCH 424/598] after merge updates --- zenoh/src/api/query.rs | 2 +- zenoh/src/api/queryable.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 24c4da8e9c..df6f568220 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -20,7 +20,7 @@ use std::{ use zenoh_core::{Resolvable, Wait}; use zenoh_keyexpr::OwnedKeyExpr; -use zenoh_protocol::core::{CongestionControl, ZenohIdProto}; +use zenoh_protocol::core::{CongestionControl, Parameters, ZenohIdProto}; use zenoh_result::ZResult; #[zenoh_macros::unstable] diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index d0e9f438ff..b727e89d62 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -21,7 +21,7 @@ use std::{ use uhlc::Timestamp; use zenoh_core::{Resolvable, Resolve, Wait}; use zenoh_protocol::{ - core::{CongestionControl, EntityId, WireExpr, ZenohIdProto}, + core::{CongestionControl, EntityId, Parameters, WireExpr, ZenohIdProto}, network::{response, Mapping, RequestId, Response, ResponseFinal}, zenoh::{self, reply::ReplyBody, Del, Put, ResponseBody}, }; From 7ff7e81170ca5dcd92dd5be6e0c3ad702dfe24e9 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 12 Jun 2024 16:40:22 +0200 Subject: [PATCH 425/598] no import of parameters::* into root of zenoh-protocol --- commons/zenoh-protocol/src/core/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 7da8e5b901..e9bc700318 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -54,7 +54,7 @@ pub mod resolution; pub use resolution::*; pub mod parameters; -pub use parameters::*; +pub use parameters::Parameters; /// The global unique id of a zenoh peer. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] From 1bbbc9ae26d42d43ff383442b8d95469cd2c27b7 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 12 Jun 2024 17:00:03 +0200 Subject: [PATCH 426/598] clippy fixes --- examples/examples/z_get_shm.rs | 2 +- zenoh/src/net/runtime/adminspace.rs | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index fd902bfe65..83d02568b2 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -15,9 +15,9 @@ use std::time::Duration; use clap::Parser; use zenoh::{ + key_expr::KeyExpr, prelude::*, query::QueryTarget, - selector::KeyExpr, shm::{ zshm, BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID, diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 3183539155..c57a7b9340 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -577,7 +577,6 @@ fn local_data(context: &AdminContext, query: Query) { #[cfg(feature = "stats")] { let stats = query - .selector() .parameters() .iter() .any(|(k, v)| k == "_stats" && v != "false"); @@ -611,7 +610,6 @@ fn local_data(context: &AdminContext, query: Query) { #[cfg(feature = "stats")] { let stats = query - .selector() .parameters() .iter() .any(|(k, v)| k == "_stats" && v != "false"); From 115e894d15577936fdf6b3cf2d6fdf5c22269d05 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 12 Jun 2024 17:24:06 +0200 Subject: [PATCH 427/598] Import zenoh with unstable features (#1128) --- ci/valgrind-check/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/valgrind-check/Cargo.toml b/ci/valgrind-check/Cargo.toml index 067e5e53ee..e334c74141 100644 --- a/ci/valgrind-check/Cargo.toml +++ b/ci/valgrind-check/Cargo.toml @@ -25,7 +25,7 @@ description = "Internal crate for zenoh." tokio = { version = "1.35.1", features = ["rt-multi-thread", "time", "io-std"] } tracing-subscriber = { version = "0.3", features = ["json", "env-filter"] } futures = "0.3.25" -zenoh = { path = "../../zenoh/" } +zenoh = { path = "../../zenoh/", features = ["unstable"] } zenoh-runtime = { path = "../../commons/zenoh-runtime/" } zenoh-util = { path = "../../commons/zenoh-util/", features = ["test"] } From 0e5ca0d6ce4af7ad13441f3c44d447fa318c8b40 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 12 Jun 2024 17:27:31 +0200 Subject: [PATCH 428/598] comment updated --- zenoh/src/api/selector.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 9ff5e97a9e..b90b4ed0dd 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -121,9 +121,13 @@ pub trait PredefinedParameters { const TIME_RANGE_KEY: &'static str = "_time"; /// Sets the time range targeted by the selector parameters. fn set_time_range>>(&mut self, time_range: T); - /// Sets parameter allowing to querier to reply to this request even - /// it the requested key expression does not match the reply key expression. - /// TODO: add example + /// Sets the parameter allowing to receieve replies from queryables not matching + /// the requested key expression. This may happen in this scenario: + /// - we are requesting keyexpr `a/b`. + /// - queryable is declared to handle `a/*` requests and contains data for `a/b` and `a/c`. + /// - queryable receives our request and sends two replies with data for `a/b` and `a/c` + /// + /// Normally only `a/b` reply would be accepted, but with `_anyke` parameter set, both replies are accepted. fn set_reply_key_expr_any(&mut self); /// Extracts the standardized `_time` argument from the selector parameters. /// Returns `None` if the `_time` argument is not present or `Some` with the result of parsing the `_time` argument From 73961dd87b66f16160526385d614b53e24422fec Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Wed, 12 Jun 2024 17:27:43 +0200 Subject: [PATCH 429/598] feat: remove `Value` from the public API, make it internal for plugins (#1119) * feat: remove `Value` from the public API, make it internal for plugins * fix: remove `ReplyError::payload_mut` * fix: fix tests * fix: fix examples * fix: fix examples * fix: fix doctests --- examples/examples/z_get.rs | 12 +-- examples/examples/z_get_shm.rs | 13 ++-- examples/examples/z_pub.rs | 10 +-- examples/examples/z_pub_shm.rs | 12 +-- examples/examples/z_put.rs | 12 +-- examples/examples/z_put_float.rs | 12 +-- examples/examples/z_queryable.rs | 21 +++--- examples/examples/z_queryable_shm.rs | 18 ++--- plugins/zenoh-backend-example/src/lib.rs | 2 +- plugins/zenoh-backend-traits/Cargo.toml | 2 +- plugins/zenoh-backend-traits/src/lib.rs | 4 +- plugins/zenoh-plugin-rest/src/lib.rs | 8 +- .../src/memory_backend/mod.rs | 2 +- .../src/replica/align_queryable.rs | 4 +- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 5 +- zenoh/src/api/builders/publisher.rs | 25 +------ zenoh/src/api/builders/sample.rs | 38 +++------- zenoh/src/api/query.rs | 73 +++++++++++++------ zenoh/src/api/queryable.rs | 50 +------------ zenoh/src/api/session.rs | 4 +- zenoh/src/lib.rs | 13 ++-- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/prelude.rs | 2 +- zenoh/tests/attachments.rs | 9 +-- zenoh/tests/handler.rs | 7 +- 26 files changed, 146 insertions(+), 216 deletions(-) diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index d4fc416f9c..2069e20b31 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -14,7 +14,7 @@ use std::time::Duration; use clap::Parser; -use zenoh::{prelude::*, query::QueryTarget, selector::Selector, Config}; +use zenoh::{query::QueryTarget, selector::Selector, Config}; use zenoh_examples::CommonArgs; #[tokio::main] @@ -22,7 +22,7 @@ async fn main() { // initiate logging zenoh::try_init_log_from_env(); - let (config, selector, value, target, timeout) = parse_args(); + let (config, selector, payload, target, timeout) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); @@ -34,7 +34,7 @@ async fn main() { // // Uncomment this line to use a ring channel instead. // // More information on the ring channel are available in the z_pull example. // .with(zenoh::handlers::RingChannel::default()) - .value(value) + .payload(payload.unwrap_or_default()) .target(target) .timeout(timeout) .await @@ -77,8 +77,8 @@ struct Args { /// The selection of resources to query selector: Selector<'static>, #[arg(short, long)] - /// An optional value to put in the query. - value: Option, + /// An optional payload to put in the query. + payload: Option, #[arg(short, long, default_value = "BEST_MATCHING")] /// The target queryables of the query. target: Qt, @@ -100,7 +100,7 @@ fn parse_args() -> ( ( args.common.into(), args.selector, - args.value, + args.payload, match args.target { Qt::BestMatching => QueryTarget::BestMatching, Qt::All => QueryTarget::All, diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 17f4e40e5b..71a3e3aa65 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -15,7 +15,6 @@ use std::time::Duration; use clap::Parser; use zenoh::{ - prelude::*, query::QueryTarget, selector::Selector, shm::{ @@ -33,7 +32,7 @@ async fn main() { // initiate logging zenoh::try_init_log_from_env(); - let (mut config, selector, mut value, target, timeout) = parse_args(); + let (mut config, selector, mut payload, target, timeout) = parse_args(); // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the @@ -67,7 +66,7 @@ async fn main() { .await .unwrap(); - let content = value + let content = payload .take() .unwrap_or_else(|| "Get from SHM Rust!".to_string()); sbuf[0..content.len()].copy_from_slice(content.as_bytes()); @@ -75,7 +74,7 @@ async fn main() { println!("Sending Query '{selector}'..."); let replies = session .get(&selector) - .value(sbuf) + .payload(sbuf) .target(target) .timeout(timeout) .await @@ -114,8 +113,8 @@ struct Args { #[arg(short, long, default_value = "demo/example/**")] /// The selection of resources to query selector: Selector<'static>, - /// The value to publish. - value: Option, + /// The payload to publish. + payload: Option, #[arg(short, long, default_value = "BEST_MATCHING")] /// The target queryables of the query. target: Qt, @@ -137,7 +136,7 @@ fn parse_args() -> ( ( args.common.into(), args.selector, - args.value, + args.payload, match args.target { Qt::BestMatching => QueryTarget::BestMatching, Qt::All => QueryTarget::All, diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 74a9c2898e..2130832fb4 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -22,7 +22,7 @@ async fn main() { // Initiate logging zenoh::try_init_log_from_env(); - let (config, key_expr, value, attachment) = parse_args(); + let (config, key_expr, payload, attachment) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); @@ -33,7 +33,7 @@ async fn main() { println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { tokio::time::sleep(Duration::from_secs(1)).await; - let buf = format!("[{idx:4}] {value}"); + let buf = format!("[{idx:4}] {payload}"); println!("Putting Data ('{}': '{}')...", &key_expr, buf); publisher.put(buf).attachment(&attachment).await.unwrap(); } @@ -45,8 +45,8 @@ struct Args { /// The key expression to write to. key: KeyExpr<'static>, #[arg(short, long, default_value = "Pub from Rust!")] - /// The value to write. - value: String, + /// The payload to write. + payload: String, #[arg(short, long)] /// The attachments to add to each put. /// @@ -58,5 +58,5 @@ struct Args { fn parse_args() -> (Config, KeyExpr<'static>, String, Option) { let args = Args::parse(); - (args.common.into(), args.key, args.value, args.attach) + (args.common.into(), args.key, args.payload, args.attach) } diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index dfb6fb44a6..fd3c7ce1b6 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -29,7 +29,7 @@ async fn main() -> Result<(), ZError> { // Initiate logging zenoh::try_init_log_from_env(); - let (mut config, path, value) = parse_args(); + let (mut config, path, payload) = parse_args(); // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the @@ -74,10 +74,10 @@ async fn main() -> Result<(), ZError> { // of the write. This is simply to have the same format as zn_pub. let prefix = format!("[{idx:4}] "); let prefix_len = prefix.as_bytes().len(); - let slice_len = prefix_len + value.as_bytes().len(); + let slice_len = prefix_len + payload.as_bytes().len(); sbuf[0..prefix_len].copy_from_slice(prefix.as_bytes()); - sbuf[prefix_len..slice_len].copy_from_slice(value.as_bytes()); + sbuf[prefix_len..slice_len].copy_from_slice(payload.as_bytes()); // Write the data println!( @@ -97,13 +97,13 @@ struct Args { /// The key expression to publish onto. path: KeyExpr<'static>, #[arg(short, long, default_value = "Pub from SHM Rust!")] - /// The value of to publish. - value: String, + /// The payload of to publish. + payload: String, #[command(flatten)] common: CommonArgs, } fn parse_args() -> (Config, KeyExpr<'static>, String) { let args = Args::parse(); - (args.common.into(), args.path, args.value) + (args.common.into(), args.path, args.payload) } diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index 4fb6e0ca2a..bc4dd88eed 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -20,13 +20,13 @@ async fn main() { // initiate logging zenoh::try_init_log_from_env(); - let (config, key_expr, value) = parse_args(); + let (config, key_expr, payload) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); - println!("Putting Data ('{key_expr}': '{value}')..."); - session.put(&key_expr, value).await.unwrap(); + println!("Putting Data ('{key_expr}': '{payload}')..."); + session.put(&key_expr, payload).await.unwrap(); } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] @@ -35,13 +35,13 @@ struct Args { /// The key expression to write to. key: KeyExpr<'static>, #[arg(short, long, default_value = "Put from Rust!")] - /// The value to write. - value: String, + /// The payload to write. + payload: String, #[command(flatten)] common: CommonArgs, } fn parse_args() -> (Config, KeyExpr<'static>, String) { let args = Args::parse(); - (args.common.into(), args.key, args.value) + (args.common.into(), args.key, args.payload) } diff --git a/examples/examples/z_put_float.rs b/examples/examples/z_put_float.rs index 89100b3731..35ece437f3 100644 --- a/examples/examples/z_put_float.rs +++ b/examples/examples/z_put_float.rs @@ -20,13 +20,13 @@ async fn main() { // initiate logging zenoh::try_init_log_from_env(); - let (config, key_expr, value) = parse_args(); + let (config, key_expr, payload) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); - println!("Putting Float ('{key_expr}': '{value}')..."); - session.put(&key_expr, value).await.unwrap(); + println!("Putting Float ('{key_expr}': '{payload}')..."); + session.put(&key_expr, payload).await.unwrap(); session.close().await.unwrap(); } @@ -37,13 +37,13 @@ struct Args { /// The key expression to write to. key: KeyExpr<'static>, #[arg(short, long, default_value_t = std::f64::consts::PI)] - /// The value to write. - value: f64, + /// The payload to write. + payload: f64, #[command(flatten)] common: CommonArgs, } fn parse_args() -> (Config, KeyExpr<'static>, f64) { let args = Args::parse(); - (args.common.into(), args.key, args.value) + (args.common.into(), args.key, args.payload) } diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index ede3eff635..7857c8caff 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -20,7 +20,7 @@ async fn main() { // initiate logging zenoh::try_init_log_from_env(); - let (mut config, key_expr, value, complete) = parse_args(); + let (mut config, key_expr, payload, complete) = parse_args(); // A probing procedure for shared memory is performed upon session opening. To enable `z_get_shm` to operate // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the @@ -43,27 +43,26 @@ async fn main() { println!("Press CTRL-C to quit..."); while let Ok(query) = queryable.recv_async().await { - match query.value() { + match query.payload() { None => println!(">> [Queryable ] Received Query '{}'", query.selector()), - Some(value) => { - let payload = value - .payload() + Some(query_payload) => { + let deserialized_payload = query_payload .deserialize::() .unwrap_or_else(|e| format!("{}", e)); println!( ">> [Queryable ] Received Query '{}' with payload '{}'", query.selector(), - payload + deserialized_payload ) } } println!( ">> [Queryable ] Responding ('{}': '{}')", key_expr.as_str(), - value, + payload, ); query - .reply(key_expr.clone(), value.clone()) + .reply(key_expr.clone(), payload.clone()) .await .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); } @@ -75,8 +74,8 @@ struct Args { /// The key expression matching queries to reply to. key: KeyExpr<'static>, #[arg(short, long, default_value = "Queryable from Rust!")] - /// The value to reply to queries. - value: String, + /// The payload to reply to queries. + payload: String, #[arg(long)] /// Declare the queryable as complete w.r.t. the key expression. complete: bool, @@ -86,5 +85,5 @@ struct Args { fn parse_args() -> (Config, KeyExpr<'static>, String, bool) { let args = Args::parse(); - (args.common.into(), args.key, args.value, args.complete) + (args.common.into(), args.key, args.payload, args.complete) } diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs index c76a031286..75da0379e2 100644 --- a/examples/examples/z_queryable_shm.rs +++ b/examples/examples/z_queryable_shm.rs @@ -30,7 +30,7 @@ async fn main() { // initiate logging zenoh::try_init_log_from_env(); - let (mut config, key_expr, value, complete) = parse_args(); + let (mut config, key_expr, payload, complete) = parse_args(); // A probing procedure for shared memory is performed upon session opening. To enable `z_get_shm` to operate // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the @@ -68,9 +68,9 @@ async fn main() { query.selector(), query.key_expr().as_str(), ); - if let Some(payload) = query.payload() { - match payload.deserialize::<&zshm>() { - Ok(payload) => print!(": '{}'", String::from_utf8_lossy(payload)), + if let Some(query_payload) = query.payload() { + match query_payload.deserialize::<&zshm>() { + Ok(p) => print!(": '{}'", String::from_utf8_lossy(p)), Err(e) => print!(": 'Not a ShmBufInner: {:?}'", e), } } @@ -86,12 +86,12 @@ async fn main() { .await .unwrap(); - sbuf[0..value.len()].copy_from_slice(value.as_bytes()); + sbuf[0..payload.len()].copy_from_slice(payload.as_bytes()); println!( ">> [Queryable] Responding ('{}': '{}')", key_expr.as_str(), - value, + payload, ); query .reply(key_expr.clone(), sbuf) @@ -106,8 +106,8 @@ struct Args { /// The key expression matching queries to reply to. key: KeyExpr<'static>, #[arg(short, long, default_value = "Queryable from SHM Rust!")] - /// The value to reply to queries. - value: String, + /// The payload to reply to queries. + payload: String, #[arg(long)] /// Declare the queryable as complete w.r.t. the key expression. complete: bool, @@ -117,5 +117,5 @@ struct Args { fn parse_args() -> (Config, KeyExpr<'static>, String, bool) { let args = Args::parse(); - (args.common.into(), args.key, args.value, args.complete) + (args.common.into(), args.key, args.payload, args.complete) } diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index 5bc3f47980..89fcd5afd5 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -15,7 +15,7 @@ use std::collections::{hash_map::Entry, HashMap}; use async_std::sync::RwLock; use async_trait::async_trait; -use zenoh::{key_expr::OwnedKeyExpr, time::Timestamp, value::Value}; +use zenoh::{internal::Value, key_expr::OwnedKeyExpr, time::Timestamp}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, Volume, diff --git a/plugins/zenoh-backend-traits/Cargo.toml b/plugins/zenoh-backend-traits/Cargo.toml index b3926ab955..5997dc5c65 100644 --- a/plugins/zenoh-backend-traits/Cargo.toml +++ b/plugins/zenoh-backend-traits/Cargo.toml @@ -31,7 +31,7 @@ async-std = { workspace = true, features = ["default"] } async-trait = { workspace = true } derive_more = { workspace = true } serde_json = { workspace = true } -zenoh = { workspace = true, features = ["unstable"] } +zenoh = { workspace = true, features = ["unstable", "internal"] } zenoh-result = { workspace = true } zenoh-util = { workspace = true } schemars = { workspace = true } diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 913ce35bbf..fac516b7b8 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -29,7 +29,7 @@ //! ``` //! use std::sync::Arc; //! use async_trait::async_trait; -//! use zenoh::{key_expr::OwnedKeyExpr, prelude::*, time::Timestamp, value::Value}; +//! use zenoh::{key_expr::OwnedKeyExpr, prelude::*, time::Timestamp, internal::Value}; //! use zenoh_backend_traits::*; //! use zenoh_backend_traits::config::*; //! @@ -125,9 +125,9 @@ use async_trait::async_trait; use const_format::concatcp; use zenoh::{ core::Result as ZResult, + internal::Value, key_expr::{keyexpr, OwnedKeyExpr}, time::Timestamp, - value::Value, }; use zenoh_plugin_trait::{PluginControl, PluginInstance, PluginStatusRec, StructVersion}; use zenoh_util::concat_enabled_features; diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index ee66ae7dbb..a5866e9d74 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -34,16 +34,16 @@ use zenoh::{ }, key_expr::{keyexpr, KeyExpr}, query::{QueryConsolidation, Reply}, - sample::{Sample, SampleKind, ValueBuilderTrait}, + sample::{EncodingBuilderTrait, Sample, SampleKind}, selector::{Selector, TIME_RANGE_KEY}, session::{Session, SessionDeclarations}, - value::Value, }; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, zerror, ZResult}; mod config; pub use config::Config; +use zenoh::query::ReplyError; const GIT_VERSION: &str = git_version::git_version!(prefix = "v", cargo_prefix = "v"); lazy_static::lazy_static! { @@ -95,7 +95,7 @@ fn sample_to_json(sample: &Sample) -> JSONSample { } } -fn result_to_json(sample: Result<&Sample, &Value>) -> JSONSample { +fn result_to_json(sample: Result<&Sample, &ReplyError>) -> JSONSample { match sample { Ok(sample) => sample_to_json(sample), Err(err) => JSONSample { @@ -136,7 +136,7 @@ fn sample_to_html(sample: &Sample) -> String { ) } -fn result_to_html(sample: Result<&Sample, &Value>) -> String { +fn result_to_html(sample: Result<&Sample, &ReplyError>) -> String { match sample { Ok(sample) => sample_to_html(sample), Err(err) => { diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index 1534d95e32..e3a9cd9196 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -15,7 +15,7 @@ use std::{collections::HashMap, sync::Arc}; use async_std::sync::RwLock; use async_trait::async_trait; -use zenoh::{core::Result as ZResult, key_expr::OwnedKeyExpr, time::Timestamp, value::Value}; +use zenoh::{core::Result as ZResult, internal::Value, key_expr::OwnedKeyExpr, time::Timestamp}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, *, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 1dc5d438c8..cbcdefe772 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -21,8 +21,8 @@ use std::{ use async_std::sync::Arc; use zenoh::{ - key_expr::OwnedKeyExpr, prelude::*, sample::Sample, selector::Selector, time::Timestamp, - value::Value, Session, + internal::Value, key_expr::OwnedKeyExpr, prelude::*, sample::Sample, selector::Selector, + time::Timestamp, Session, }; use super::{digest::*, Snapshotter}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index fd4b5460a7..2ab41f2880 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -21,12 +21,12 @@ use std::{ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use zenoh::{ + internal::Value, key_expr::{KeyExpr, OwnedKeyExpr}, prelude::*, sample::{Sample, SampleBuilder}, selector::Selector, time::Timestamp, - value::Value, Session, }; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 4087fb3682..04adf6aae5 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -26,7 +26,7 @@ use zenoh::{ internal::{ bail, buffers::{SplitBuffer, ZBuf}, - zenoh_home, Timed, TimedEvent, Timer, + zenoh_home, Timed, TimedEvent, Timer, Value, }, key_expr::{ keyexpr_tree::{ @@ -35,11 +35,10 @@ use zenoh::{ KeyExpr, OwnedKeyExpr, }, query::{ConsolidationMode, QueryTarget}, - sample::{Sample, SampleBuilder, SampleKind, TimestampBuilderTrait, ValueBuilderTrait}, + sample::{EncodingBuilderTrait, Sample, SampleBuilder, SampleKind, TimestampBuilderTrait}, selector::Selector, session::{Session, SessionDeclarations}, time::{new_timestamp, Timestamp, NTP64}, - value::Value, }; use zenoh_backend_traits::{ config::{GarbageCollectionConfig, StorageConfig}, diff --git a/zenoh/src/api/builders/publisher.rs b/zenoh/src/api/builders/publisher.rs index 279bcb072a..88bae4469b 100644 --- a/zenoh/src/api/builders/publisher.rs +++ b/zenoh/src/api/builders/publisher.rs @@ -20,7 +20,7 @@ use zenoh_protocol::{core::CongestionControl, network::Mapping}; use crate::api::sample::SourceInfo; use crate::api::{ builders::sample::{ - QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, + EncodingBuilderTrait, QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, }, bytes::{OptionZBytes, ZBytes}, encoding::Encoding, @@ -28,7 +28,6 @@ use crate::api::{ publisher::{Priority, Publisher}, sample::{Locality, SampleKind}, session::SessionRef, - value::Value, }; pub type SessionPutBuilder<'a, 'b> = @@ -114,7 +113,7 @@ impl PublicationBuilder, T> { } } -impl

ValueBuilderTrait for PublicationBuilder { +impl

EncodingBuilderTrait for PublicationBuilder { fn encoding>(self, encoding: T) -> Self { Self { kind: PublicationBuilderPut { @@ -124,26 +123,6 @@ impl

ValueBuilderTrait for PublicationBuilder { ..self } } - - fn payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { - Self { - kind: PublicationBuilderPut { - payload: payload.into(), - ..self.kind - }, - ..self - } - } - fn value>(self, value: T) -> Self { - let Value { payload, encoding } = value.into(); - Self { - kind: PublicationBuilderPut { payload, encoding }, - ..self - } - } } impl SampleBuilderTrait for PublicationBuilder { diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 5537cf4326..53cf099448 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -23,7 +23,6 @@ use crate::api::{ key_expr::KeyExpr, publisher::Priority, sample::{QoS, QoSBuilder, Sample, SampleKind}, - value::Value, }; #[cfg(feature = "unstable")] use crate::sample::SourceInfo; @@ -52,14 +51,9 @@ pub trait SampleBuilderTrait { fn attachment>(self, attachment: T) -> Self; } -pub trait ValueBuilderTrait { +pub trait EncodingBuilderTrait { /// Set the [`Encoding`] fn encoding>(self, encoding: T) -> Self; - /// Sets the payload - fn payload>(self, payload: T) -> Self; - /// Sets both payload and encoding at once. - /// This is convenient for passing user type which supports `Into` when both payload and encoding depends on user type - fn value>(self, value: T) -> Self; } #[derive(Clone, Debug)] @@ -99,6 +93,14 @@ impl SampleBuilder { _t: PhantomData::, } } + + pub fn payload(mut self, payload: IntoZBytes) -> Self + where + IntoZBytes: Into, + { + self.sample.payload = payload.into(); + self + } } impl SampleBuilder { @@ -210,7 +212,7 @@ impl QoSBuilderTrait for SampleBuilder { } } -impl ValueBuilderTrait for SampleBuilder { +impl EncodingBuilderTrait for SampleBuilder { fn encoding>(self, encoding: T) -> Self { Self { sample: Sample { @@ -220,26 +222,6 @@ impl ValueBuilderTrait for SampleBuilder { _t: PhantomData::, } } - fn payload>(self, payload: T) -> Self { - Self { - sample: Sample { - payload: payload.into(), - ..self.sample - }, - _t: PhantomData::, - } - } - fn value>(self, value: T) -> Self { - let Value { payload, encoding } = value.into(); - Self { - sample: Sample { - payload, - encoding, - ..self.sample - }, - _t: PhantomData::, - } - } } impl From for SampleBuilder { diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 0b761789b4..4155311de1 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -26,7 +26,7 @@ use zenoh_result::ZResult; #[zenoh_macros::unstable] use super::{builders::sample::SampleBuilderTrait, bytes::OptionZBytes, sample::SourceInfo}; use super::{ - builders::sample::{QoSBuilderTrait, ValueBuilderTrait}, + builders::sample::{EncodingBuilderTrait, QoSBuilderTrait}, bytes::ZBytes, encoding::Encoding, handlers::{locked, Callback, DefaultHandler, IntoHandler}, @@ -79,27 +79,57 @@ impl Default for QueryConsolidation { } } -/// Structs returned by a [`get`](Session::get). +/// Error returned by a [`get`](Session::get). +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ReplyError { + pub(crate) payload: ZBytes, + pub(crate) encoding: Encoding, +} + +impl ReplyError { + /// Gets the payload of this ReplyError. + #[inline] + pub fn payload(&self) -> &ZBytes { + &self.payload + } + + /// Gets the encoding of this ReplyError. + #[inline] + pub fn encoding(&self) -> &Encoding { + &self.encoding + } +} + +impl From for ReplyError { + fn from(value: Value) -> Self { + Self { + payload: value.payload, + encoding: value.encoding, + } + } +} + +/// Struct returned by a [`get`](Session::get). #[non_exhaustive] #[derive(Clone, Debug)] pub struct Reply { - pub(crate) result: Result, + pub(crate) result: Result, pub(crate) replier_id: ZenohIdProto, } impl Reply { /// Gets the a borrowed result of this `Reply`. Use [`Reply::into_result`] to take ownership of the result. - pub fn result(&self) -> Result<&Sample, &Value> { + pub fn result(&self) -> Result<&Sample, &ReplyError> { self.result.as_ref() } /// Gets the a mutable borrowed result of this `Reply`. Use [`Reply::into_result`] to take ownership of the result. - pub fn result_mut(&mut self) -> Result<&mut Sample, &mut Value> { + pub fn result_mut(&mut self) -> Result<&mut Sample, &mut ReplyError> { self.result.as_mut() } /// Converts this `Reply` into the its result. Use [`Reply::result`] it you don't want to take ownership. - pub fn into_result(self) -> Result { + pub fn into_result(self) -> Result { self.result } @@ -109,7 +139,7 @@ impl Reply { } } -impl From for Result { +impl From for Result { fn from(value: Reply) -> Self { value.into_result() } @@ -198,7 +228,7 @@ impl QoSBuilderTrait for SessionGetBuilder<'_, '_, DefaultHandler> { } } -impl ValueBuilderTrait for SessionGetBuilder<'_, '_, Handler> { +impl EncodingBuilderTrait for SessionGetBuilder<'_, '_, Handler> { fn encoding>(self, encoding: T) -> Self { let mut value = self.value.unwrap_or_default(); value.encoding = encoding.into(); @@ -207,22 +237,6 @@ impl ValueBuilderTrait for SessionGetBuilder<'_, '_, Handler> { ..self } } - - fn payload>(self, payload: T) -> Self { - let mut value = self.value.unwrap_or_default(); - value.payload = payload.into(); - Self { - value: Some(value), - ..self - } - } - fn value>(self, value: T) -> Self { - let value: Value = value.into(); - Self { - value: if value.is_empty() { None } else { Some(value) }, - ..self - } - } } impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { @@ -367,6 +381,17 @@ impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { } } impl<'a, 'b, Handler> SessionGetBuilder<'a, 'b, Handler> { + #[inline] + pub fn payload(mut self, payload: IntoZBytes) -> Self + where + IntoZBytes: Into, + { + let mut value = self.value.unwrap_or_default(); + value.payload = payload.into(); + self.value = Some(value); + self + } + /// Change the target of the query. #[inline] pub fn target(self, target: QueryTarget) -> Self { diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 4c68b524b2..296964cf9f 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -34,8 +34,8 @@ use { use super::{ builders::sample::{ - QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, - ValueBuilderTrait, + EncodingBuilderTrait, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, + TimestampBuilderTrait, }, bytes::{OptionZBytes, ZBytes}, encoding::Encoding, @@ -99,18 +99,6 @@ impl Query { &self.inner.parameters } - /// This Query's value. - #[inline(always)] - pub fn value(&self) -> Option<&Value> { - self.value.as_ref() - } - - /// This Query's value. - #[inline(always)] - pub fn value_mut(&mut self) -> Option<&mut Value> { - self.value.as_mut() - } - /// This Query's payload. #[inline(always)] pub fn payload(&self) -> Option<&ZBytes> { @@ -360,7 +348,7 @@ impl QoSBuilderTrait for ReplyBuilder<'_, '_, T> { } } -impl ValueBuilderTrait for ReplyBuilder<'_, '_, ReplyBuilderPut> { +impl EncodingBuilderTrait for ReplyBuilder<'_, '_, ReplyBuilderPut> { fn encoding>(self, encoding: T) -> Self { Self { kind: ReplyBuilderPut { @@ -370,23 +358,6 @@ impl ValueBuilderTrait for ReplyBuilder<'_, '_, ReplyBuilderPut> { ..self } } - - fn payload>(self, payload: T) -> Self { - Self { - kind: ReplyBuilderPut { - payload: payload.into(), - ..self.kind - }, - ..self - } - } - fn value>(self, value: T) -> Self { - let Value { payload, encoding } = value.into(); - Self { - kind: ReplyBuilderPut { payload, encoding }, - ..self - } - } } impl Resolvable for ReplyBuilder<'_, '_, T> { @@ -500,25 +471,12 @@ pub struct ReplyErrBuilder<'a> { value: Value, } -impl ValueBuilderTrait for ReplyErrBuilder<'_> { +impl EncodingBuilderTrait for ReplyErrBuilder<'_> { fn encoding>(self, encoding: T) -> Self { let mut value = self.value.clone(); value.encoding = encoding.into(); Self { value, ..self } } - - fn payload>(self, payload: T) -> Self { - let mut value = self.value.clone(); - value.payload = payload.into(); - Self { value, ..self } - } - - fn value>(self, value: T) -> Self { - Self { - value: value.into(), - ..self - } - } } impl<'a> Resolvable for ReplyErrBuilder<'a> { diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 4c613acfb0..d95a9a8910 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -1698,7 +1698,7 @@ impl Session { } } (query.callback)(Reply { - result: Err("Timeout".into()), + result: Err(Value::from("Timeout").into()), replier_id: zid.into(), }); } @@ -2199,7 +2199,7 @@ impl Primitives for Session { }; let new_reply = Reply { replier_id, - result: Err(value), + result: Err(value.into()), }; callback(new_reply); } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 375cfd4712..fb9cbae0d1 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -223,18 +223,13 @@ pub mod sample { pub use crate::api::sample::SourceInfo; pub use crate::api::{ builders::sample::{ - QoSBuilderTrait, SampleBuilder, SampleBuilderAny, SampleBuilderDelete, - SampleBuilderPut, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, + EncodingBuilderTrait, QoSBuilderTrait, SampleBuilder, SampleBuilderAny, + SampleBuilderDelete, SampleBuilderPut, SampleBuilderTrait, TimestampBuilderTrait, }, sample::{Sample, SampleFields, SampleKind, SourceSn}, }; } -/// Value primitives -pub mod value { - pub use crate::api::value::Value; -} - /// Encoding support pub mod encoding { pub use crate::api::encoding::Encoding; @@ -307,7 +302,7 @@ pub mod query { #[zenoh_macros::internal] pub use crate::api::queryable::ReplySample; pub use crate::api::{ - query::{ConsolidationMode, QueryConsolidation, QueryTarget, Reply}, + query::{ConsolidationMode, QueryConsolidation, QueryTarget, Reply, ReplyError}, queryable::{Query, ReplyBuilder, ReplyBuilderDelete, ReplyBuilderPut, ReplyErrBuilder}, }; } @@ -401,6 +396,8 @@ pub mod internal { PluginsManager, Response, RunningPlugin, RunningPluginTrait, ZenohPlugin, PLUGIN_PREFIX, }; } + + pub use crate::api::value::Value; } #[cfg(all(feature = "shared-memory", not(feature = "unstable")))] diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 1f78bb71e9..7a9e1a9fd2 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -44,7 +44,7 @@ use super::{routing::dispatcher::face::Face, Runtime}; use crate::api::plugins::PluginsManager; use crate::{ api::{ - builders::sample::ValueBuilderTrait, + builders::sample::EncodingBuilderTrait, bytes::ZBytes, key_expr::KeyExpr, queryable::{Query, QueryInner}, diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 605b0638ab..ee6ab290c1 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -30,7 +30,7 @@ mod _prelude { pub use crate::{ api::{ builders::sample::{ - QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, + EncodingBuilderTrait, QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, }, session::{SessionDeclarations, Undeclarable}, }, diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 057045ba60..38b1fea136 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -67,8 +67,8 @@ fn attachment_queries() { .declare_queryable("test/attachment") .callback(|query| { let s = query - .value() - .map(|q| q.payload().deserialize::().unwrap()) + .payload() + .map(|p| p.deserialize::().unwrap()) .unwrap_or_default(); println!("Query value: {}", s); @@ -85,10 +85,7 @@ fn attachment_queries() { } query - .reply( - query.key_expr().clone(), - query.value().unwrap().payload().clone(), - ) + .reply(query.key_expr().clone(), query.payload().unwrap().clone()) .attachment(ZBytes::from_iter( attachment .iter::<( diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index 5ecdc363d5..640ed33b89 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -67,12 +67,7 @@ fn query_with_ringbuffer() { let query = queryable.recv().unwrap(); // Only receive the latest query assert_eq!( - query - .value() - .unwrap() - .payload() - .deserialize::() - .unwrap(), + query.payload().unwrap().deserialize::().unwrap(), "query2" ); } From 222af456a6a003b9e93a9258558b5a58cab4e324 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 12 Jun 2024 17:32:12 +0200 Subject: [PATCH 430/598] cargo fmt --- zenoh/src/api/selector.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index b90b4ed0dd..cedd5158db 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -124,9 +124,9 @@ pub trait PredefinedParameters { /// Sets the parameter allowing to receieve replies from queryables not matching /// the requested key expression. This may happen in this scenario: /// - we are requesting keyexpr `a/b`. - /// - queryable is declared to handle `a/*` requests and contains data for `a/b` and `a/c`. + /// - queryable is declared to handle `a/*` requests and contains data for `a/b` and `a/c`. /// - queryable receives our request and sends two replies with data for `a/b` and `a/c` - /// + /// /// Normally only `a/b` reply would be accepted, but with `_anyke` parameter set, both replies are accepted. fn set_reply_key_expr_any(&mut self); /// Extracts the standardized `_time` argument from the selector parameters. From f66d1816399c996a37dd58579c6b6d99b5bbf040 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 12 Jun 2024 17:37:11 +0200 Subject: [PATCH 431/598] accidental renaming fix --- zenoh-ext/src/querying_subscriber.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 2adf4d43ae..54f3ff0224 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -28,6 +28,7 @@ use zenoh::{ prelude::Wait, query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}, sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}, + selector::Selector, session::{SessionDeclarations, SessionRef}, subscriber::{Reliability, Subscriber}, time::{new_timestamp, Timestamp}, @@ -43,7 +44,7 @@ pub struct QueryingSubscriberBuilder<'a, 'b, KeySpace, Handler> { pub(crate) key_space: KeySpace, pub(crate) reliability: Reliability, pub(crate) origin: Locality, - pub(crate) query_selector: Option>>, + pub(crate) query_selector: Option>>, pub(crate) query_target: QueryTarget, pub(crate) query_consolidation: QueryConsolidation, pub(crate) query_accept_replies: ReplyKeyExpr, @@ -178,8 +179,8 @@ impl<'a, 'b, Handler> QueryingSubscriberBuilder<'a, 'b, crate::UserSpace, Handle #[inline] pub fn query_selector(mut self, query_selector: IntoSelector) -> Self where - IntoSelector: TryInto>, - >>::Error: Into, + IntoSelector: TryInto>, + >>::Error: Into, { self.query_selector = Some(query_selector.try_into().map_err(Into::into)); self From 988c9574104cf140f28619740e814dc1e661d763 Mon Sep 17 00:00:00 2001 From: yellowhatter <104833606+yellowhatter@users.noreply.github.com> Date: Thu, 13 Jun 2024 11:13:49 +0300 Subject: [PATCH 432/598] Remove internal API's from SHM examples (#1129) --- examples/examples/z_bytes_shm.rs | 4 ++-- examples/examples/z_ping_shm.rs | 6 +++--- examples/examples/z_pub_shm_thr.rs | 5 +++-- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/examples/examples/z_bytes_shm.rs b/examples/examples/z_bytes_shm.rs index 75bf01e3bf..c30710214a 100644 --- a/examples/examples/z_bytes_shm.rs +++ b/examples/examples/z_bytes_shm.rs @@ -43,13 +43,13 @@ fn main() { let _data: &[u8] = &owned_shm_buf_mut; let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; - // convert into immutable owned buffer (ZShmMut -> ZSlceShm) + // convert into immutable owned buffer (ZShmMut -> ZShm) let owned_shm_buf: ZShm = owned_shm_buf_mut.into(); // immutable API let _data: &[u8] = &owned_shm_buf; - // convert again into mutable owned buffer (ZShm -> ZSlceShmMut) + // convert again into mutable owned buffer (ZShm -> ZShmMut) let mut owned_shm_buf_mut: ZShmMut = owned_shm_buf.try_into().unwrap(); // mutable and immutable API diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 033fe2d844..5e809c9341 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -15,7 +15,7 @@ use std::time::{Duration, Instant}; use clap::Parser; use zenoh::{ - internal::buffers::ZSlice, + bytes::ZBytes, key_expr::keyexpr, prelude::*, publisher::CongestionControl, @@ -70,8 +70,8 @@ fn main() { // NOTE: For buf's API please check z_bytes_shm.rs example let buf = provider.alloc(size).wait().unwrap(); - // convert ZShmMut into ZSlice as ZShmMut does not support Clone - let buf: ZSlice = buf.into(); + // convert ZShmMut into ZBytes as ZShmMut does not support Clone + let buf: ZBytes = buf.into(); // -- warmup -- println!("Warming up for {warmup:?}..."); diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index cff095024e..3093a0962d 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -13,7 +13,7 @@ // use clap::Parser; use zenoh::{ - internal::buffers::ZSlice, + bytes::ZBytes, prelude::*, publisher::CongestionControl, shm::{PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID}, @@ -63,7 +63,8 @@ async fn main() { .await .unwrap(); - let buf: ZSlice = buf.into(); + // convert ZShmMut into ZBytes as ZShmMut does not support Clone + let buf: ZBytes = buf.into(); println!("Press CTRL-C to quit..."); loop { From ede0e36fcb54e641eafac8d362b709f5e1c21694 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 13 Jun 2024 11:53:52 +0200 Subject: [PATCH 433/598] Add since attribute to #[deprecated] directive (#1121) * Add since to deprecated directives * Deprecated cleanup --- commons/zenoh-buffers/src/zslice.rs | 2 +- commons/zenoh-core/src/lib.rs | 12 ++++++------ commons/zenoh-util/src/lib.rs | 3 --- examples/examples/z_sub.rs | 1 + zenoh/src/prelude.rs | 4 ++-- 5 files changed, 10 insertions(+), 12 deletions(-) diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index c169fcd4c0..43a273c4ad 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -99,7 +99,7 @@ pub struct ZSlice { } impl ZSlice { - #[deprecated(note = "use `new` instead")] + #[deprecated(since = "1.0.0", note = "use `new` instead")] pub fn make( buf: Arc, start: usize, diff --git a/commons/zenoh-core/src/lib.rs b/commons/zenoh-core/src/lib.rs index 434d0e6740..37102d619d 100644 --- a/commons/zenoh-core/src/lib.rs +++ b/commons/zenoh-core/src/lib.rs @@ -54,16 +54,16 @@ pub trait Wait: Resolvable { fn wait(self) -> Self::To; } -#[deprecated = "use `.await` directly instead"] +#[deprecated(since = "1.0.0", note = "use `.await` directly instead")] pub trait AsyncResolve: Resolvable { type Future: Future + Send; #[allow(deprecated)] - #[deprecated = "use `.await` directly instead"] + #[deprecated(since = "1.0.0", note = "use `.await` directly instead")] fn res_async(self) -> Self::Future; #[allow(deprecated)] - #[deprecated = "use `.wait()` instead`"] + #[deprecated(since = "1.0.0", note = "use `.wait()` instead`")] fn res(self) -> Self::Future where Self: Sized, @@ -85,13 +85,13 @@ where } } -#[deprecated = "use `.wait()` instead`"] +#[deprecated(since = "1.0.0", note = "use `.wait()` instead`")] pub trait SyncResolve: Resolvable { - #[deprecated = "use `.wait()` instead`"] + #[deprecated(since = "1.0.0", note = "use `.wait()` instead`")] fn res_sync(self) -> Self::To; #[allow(deprecated)] - #[deprecated = "use `.wait()` instead`"] + #[deprecated(since = "1.0.0", note = "use `.wait()` instead`")] fn res(self) -> Self::To where Self: Sized, diff --git a/commons/zenoh-util/src/lib.rs b/commons/zenoh-util/src/lib.rs index 7e02096ebb..4b5da75548 100644 --- a/commons/zenoh-util/src/lib.rs +++ b/commons/zenoh-util/src/lib.rs @@ -34,9 +34,6 @@ macro_rules! concat_enabled_features { }; } -#[deprecated = "This module is now a separate crate. Use the `zenoh_core` crate directly for shorter compile-times. You may disable this re-export by disabling `zenoh-util`'s default features."] -pub use zenoh_core as core; - #[cfg(feature = "std")] mod std_only; diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index 47432cf9cb..8ecc4b9818 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -39,6 +39,7 @@ async fn main() { .payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)); + print!( ">> [Subscriber] Received {} ('{}': '{}')", sample.kind(), diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index ee6ab290c1..a6b9aa592e 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -48,14 +48,14 @@ pub use crate::core::SyncResolve; pub use crate::core::Wait; /// Prelude to import when using Zenoh's sync API. -#[deprecated = "use `zenoh::prelude` instead"] +#[deprecated(since = "1.0.0", note = "use `zenoh::prelude` instead")] pub mod sync { pub use super::_prelude::*; #[allow(deprecated)] pub use crate::core::SyncResolve; } /// Prelude to import when using Zenoh's async API. -#[deprecated = "use `zenoh::prelude` instead"] +#[deprecated(since = "1.0.0", note = "use `zenoh::prelude` instead")] pub mod r#async { pub use super::_prelude::*; #[allow(deprecated)] From dbb25d7b0ee049e2c0f851145d5da9f5a186f5bc Mon Sep 17 00:00:00 2001 From: C Schleich Date: Thu, 13 Jun 2024 11:59:35 +0200 Subject: [PATCH 434/598] Add Default for SourceInfo (#1132) Co-authored-by: Alexander Bushnev From 0006d95c70fb973a9373280732a1faeff50d7d70 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 13 Jun 2024 12:06:28 +0200 Subject: [PATCH 435/598] Properties Variable renamed to parameters --- commons/zenoh-protocol/src/core/parameters.rs | 323 ++++++++++- .../src/core/parameters/properties.rs | 530 ------------------ .../src/replica/align_queryable.rs | 2 +- .../src/replica/aligner.rs | 20 +- zenoh/src/api/bytes.rs | 4 +- 5 files changed, 332 insertions(+), 547 deletions(-) delete mode 100644 commons/zenoh-protocol/src/core/parameters/properties.rs diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs index 101bf9d925..20bcdd9aef 100644 --- a/commons/zenoh-protocol/src/core/parameters.rs +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -20,15 +20,19 @@ /// `=` is the separator between the `&str`-key and `&str`-value /// /// `|` is the separator between multiple elements of the values. -mod properties; -pub use properties::Parameters; +use alloc::{ + borrow::Cow, + string::{String, ToString}, + vec::Vec, +}; +use core::{borrow::Borrow, fmt}; +#[cfg(feature = "std")] +use std::collections::HashMap; pub(super) const LIST_SEPARATOR: char = ';'; pub(super) const FIELD_SEPARATOR: char = '='; pub(super) const VALUE_SEPARATOR: char = '|'; -use alloc::{string::String, vec::Vec}; - fn split_once(s: &str, c: char) -> (&str, &str) { match s.find(c) { Some(index) => { @@ -208,3 +212,314 @@ pub fn rand(into: &mut String) { into.push_str(value.as_str()); } } + +/// A map of key/value (String,String) parameters. +/// It can be parsed from a String, using `;` or `` as separator between each parameters +/// and `=` as separator between a key and its value. Keys and values are trimed. +/// +/// Example: +/// ``` +/// use zenoh_protocol::core::Parameters; +/// +/// let a = "a=1;b=2;c=3|4|5;d=6"; +/// let p = Parameters::from(a); +/// +/// // Retrieve values +/// assert!(!p.is_empty()); +/// assert_eq!(p.get("a").unwrap(), "1"); +/// assert_eq!(p.get("b").unwrap(), "2"); +/// assert_eq!(p.get("c").unwrap(), "3|4|5"); +/// assert_eq!(p.get("d").unwrap(), "6"); +/// assert_eq!(p.values("c").collect::>(), vec!["3", "4", "5"]); +/// +/// // Iterate over parameters +/// let mut iter = p.iter(); +/// assert_eq!(iter.next().unwrap(), ("a", "1")); +/// assert_eq!(iter.next().unwrap(), ("b", "2")); +/// assert_eq!(iter.next().unwrap(), ("c", "3|4|5")); +/// assert_eq!(iter.next().unwrap(), ("d", "6")); +/// assert!(iter.next().is_none()); +/// +/// // Create parameters from iterators +/// let pi = Parameters::from_iter(vec![("a", "1"), ("b", "2"), ("c", "3|4|5"), ("d", "6")]); +/// assert_eq!(p, pi); +/// ``` +#[derive(Clone, PartialEq, Eq, Hash, Default)] +pub struct Parameters<'s>(Cow<'s, str>); + +impl<'s> Parameters<'s> { + /// Create empty parameters. + pub const fn empty() -> Self { + Self(Cow::Borrowed("")) + } + + /// Returns `true` if parameters does not contain anything. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns parameters as [`str`]. + pub fn as_str(&'s self) -> &'s str { + &self.0 + } + + /// Returns `true` if parameters contains the specified key. + pub fn contains_key(&self, k: K) -> bool + where + K: Borrow, + { + super::parameters::get(self.as_str(), k.borrow()).is_some() + } + + /// Returns a reference to the `&str`-value corresponding to the key. + pub fn get(&'s self, k: K) -> Option<&'s str> + where + K: Borrow, + { + super::parameters::get(self.as_str(), k.borrow()) + } + + /// Returns an iterator to the `&str`-values corresponding to the key. + pub fn values(&'s self, k: K) -> impl DoubleEndedIterator + where + K: Borrow, + { + super::parameters::values(self.as_str(), k.borrow()) + } + + /// Returns an iterator on the key-value pairs as `(&str, &str)`. + pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { + super::parameters::iter(self.as_str()) + } + + /// Inserts a key-value pair into the map. + /// If the map did not have this key present, [`None`]` is returned. + /// If the map did have this key present, the value is updated, and the old value is returned. + pub fn insert(&mut self, k: K, v: V) -> Option + where + K: Borrow, + V: Borrow, + { + let (inner, item) = super::parameters::insert(self.as_str(), k.borrow(), v.borrow()); + let item = item.map(|i| i.to_string()); + self.0 = Cow::Owned(inner); + item + } + + /// Removes a key from the map, returning the value at the key if the key was previously in the parameters. + pub fn remove(&mut self, k: K) -> Option + where + K: Borrow, + { + let (inner, item) = super::parameters::remove(self.as_str(), k.borrow()); + let item = item.map(|i| i.to_string()); + self.0 = Cow::Owned(inner); + item + } + + /// Extend these parameters with other parameters. + pub fn extend(&mut self, other: &Parameters) { + self.extend_from_iter(other.iter()); + } + + /// Extend these parameters from an iterator. + pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) + where + I: Iterator + Clone, + K: Borrow + 'e + ?Sized, + V: Borrow + 'e + ?Sized, + { + let inner = super::parameters::from_iter(super::parameters::join( + self.iter(), + iter.map(|(k, v)| (k.borrow(), v.borrow())), + )); + self.0 = Cow::Owned(inner); + } + + /// Convert these parameters into owned parameters. + pub fn into_owned(self) -> Parameters<'static> { + Parameters(Cow::Owned(self.0.into_owned())) + } + + /// Returns `true`` if all keys are sorted in alphabetical order. + pub fn is_ordered(&self) -> bool { + super::parameters::is_ordered(self.as_str()) + } +} + +impl<'s> From<&'s str> for Parameters<'s> { + fn from(mut value: &'s str) -> Self { + value = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); + Self(Cow::Borrowed(value)) + } +} + +impl From for Parameters<'_> { + fn from(mut value: String) -> Self { + let s = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); + value.truncate(s.len()); + Self(Cow::Owned(value)) + } +} + +impl<'s> From> for Parameters<'s> { + fn from(value: Cow<'s, str>) -> Self { + match value { + Cow::Borrowed(s) => Parameters::from(s), + Cow::Owned(s) => Parameters::from(s), + } + } +} + +impl<'a> From> for Cow<'_, Parameters<'a>> { + fn from(props: Parameters<'a>) -> Self { + Cow::Owned(props) + } +} + +impl<'a> From<&'a Parameters<'a>> for Cow<'a, Parameters<'a>> { + fn from(props: &'a Parameters<'a>) -> Self { + Cow::Borrowed(props) + } +} + +impl<'s, K, V> FromIterator<(&'s K, &'s V)> for Parameters<'_> +where + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, +{ + fn from_iter>(iter: T) -> Self { + let iter = iter.into_iter(); + let inner = super::parameters::from_iter(iter.map(|(k, v)| (k.borrow(), v.borrow()))); + Self(Cow::Owned(inner)) + } +} + +impl<'s, K, V> FromIterator<&'s (K, V)> for Parameters<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from_iter>(iter: T) -> Self { + Self::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))) + } +} + +impl<'s, K, V> From<&'s [(K, V)]> for Parameters<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from(value: &'s [(K, V)]) -> Self { + Self::from_iter(value.iter()) + } +} + +#[cfg(feature = "std")] +impl From> for Parameters<'_> +where + K: Borrow, + V: Borrow, +{ + fn from(map: HashMap) -> Self { + Self::from_iter(map.iter()) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s Parameters<'s>> for HashMap<&'s str, &'s str> { + fn from(props: &'s Parameters<'s>) -> Self { + HashMap::from_iter(props.iter()) + } +} + +#[cfg(feature = "std")] +impl From<&Parameters<'_>> for HashMap { + fn from(props: &Parameters<'_>) -> Self { + HashMap::from_iter(props.iter().map(|(k, v)| (k.to_string(), v.to_string()))) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s Parameters<'s>> for HashMap, Cow<'s, str>> { + fn from(props: &'s Parameters<'s>) -> Self { + HashMap::from_iter(props.iter().map(|(k, v)| (Cow::from(k), Cow::from(v)))) + } +} + +#[cfg(feature = "std")] +impl From> for HashMap { + fn from(props: Parameters) -> Self { + HashMap::from(&props) + } +} + +impl fmt::Display for Parameters<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl fmt::Debug for Parameters<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parameters() { + assert!(Parameters::from("").0.is_empty()); + + assert_eq!(Parameters::from("p1"), Parameters::from(&[("p1", "")][..])); + + assert_eq!( + Parameters::from("p1=v1"), + Parameters::from(&[("p1", "v1")][..]) + ); + + assert_eq!( + Parameters::from("p1=v1;p2=v2;"), + Parameters::from(&[("p1", "v1"), ("p2", "v2")][..]) + ); + + assert_eq!( + Parameters::from("p1=v1;p2=v2;|="), + Parameters::from(&[("p1", "v1"), ("p2", "v2")][..]) + ); + + assert_eq!( + Parameters::from("p1=v1;p2;p3=v3"), + Parameters::from(&[("p1", "v1"), ("p2", ""), ("p3", "v3")][..]) + ); + + assert_eq!( + Parameters::from("p1=v 1;p 2=v2"), + Parameters::from(&[("p1", "v 1"), ("p 2", "v2")][..]) + ); + + assert_eq!( + Parameters::from("p1=x=y;p2=a==b"), + Parameters::from(&[("p1", "x=y"), ("p2", "a==b")][..]) + ); + + let mut hm: HashMap = HashMap::new(); + hm.insert("p1".to_string(), "v1".to_string()); + assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); + + let mut hm: HashMap<&str, &str> = HashMap::new(); + hm.insert("p1", "v1"); + assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); + + let mut hm: HashMap, Cow> = HashMap::new(); + hm.insert(Cow::from("p1"), Cow::from("v1")); + assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); + } +} diff --git a/commons/zenoh-protocol/src/core/parameters/properties.rs b/commons/zenoh-protocol/src/core/parameters/properties.rs deleted file mode 100644 index 4856edf7af..0000000000 --- a/commons/zenoh-protocol/src/core/parameters/properties.rs +++ /dev/null @@ -1,530 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use alloc::{ - borrow::Cow, - string::{String, ToString}, -}; -use core::{borrow::Borrow, fmt}; -#[cfg(feature = "std")] -use std::collections::HashMap; - -use super::{FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; -use crate::core::parameters; - -/// A map of key/value (String,String) properties. -/// It can be parsed from a String, using `;` or `` as separator between each properties -/// and `=` as separator between a key and its value. Keys and values are trimed. -/// -/// Example: -/// ``` -/// use zenoh_protocol::core::Parameters; -/// -/// let a = "a=1;b=2;c=3|4|5;d=6"; -/// let p = Parameters::from(a); -/// -/// // Retrieve values -/// assert!(!p.is_empty()); -/// assert_eq!(p.get("a").unwrap(), "1"); -/// assert_eq!(p.get("b").unwrap(), "2"); -/// assert_eq!(p.get("c").unwrap(), "3|4|5"); -/// assert_eq!(p.get("d").unwrap(), "6"); -/// assert_eq!(p.values("c").collect::>(), vec!["3", "4", "5"]); -/// -/// // Iterate over properties -/// let mut iter = p.iter(); -/// assert_eq!(iter.next().unwrap(), ("a", "1")); -/// assert_eq!(iter.next().unwrap(), ("b", "2")); -/// assert_eq!(iter.next().unwrap(), ("c", "3|4|5")); -/// assert_eq!(iter.next().unwrap(), ("d", "6")); -/// assert!(iter.next().is_none()); -/// -/// // Create properties from iterators -/// let pi = Parameters::from_iter(vec![("a", "1"), ("b", "2"), ("c", "3|4|5"), ("d", "6")]); -/// assert_eq!(p, pi); -/// ``` -#[derive(Clone, PartialEq, Eq, Hash, Default)] -pub struct Parameters<'s>(Cow<'s, str>); - -impl<'s> Parameters<'s> { - /// Returns `true` if properties does not contain anything. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Returns properties as [`str`]. - pub fn as_str(&'s self) -> &'s str { - &self.0 - } - - /// Returns `true` if properties contains the specified key. - pub fn contains_key(&self, k: K) -> bool - where - K: Borrow, - { - parameters::get(self.as_str(), k.borrow()).is_some() - } - - /// Returns a reference to the `&str`-value corresponding to the key. - pub fn get(&'s self, k: K) -> Option<&'s str> - where - K: Borrow, - { - parameters::get(self.as_str(), k.borrow()) - } - - /// Returns an iterator to the `&str`-values corresponding to the key. - pub fn values(&'s self, k: K) -> impl DoubleEndedIterator - where - K: Borrow, - { - parameters::values(self.as_str(), k.borrow()) - } - - /// Returns an iterator on the key-value pairs as `(&str, &str)`. - pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { - parameters::iter(self.as_str()) - } - - /// Inserts a key-value pair into the map. - /// If the map did not have this key present, [`None`]` is returned. - /// If the map did have this key present, the value is updated, and the old value is returned. - pub fn insert(&mut self, k: K, v: V) -> Option - where - K: Borrow, - V: Borrow, - { - let (inner, item) = parameters::insert(self.as_str(), k.borrow(), v.borrow()); - let item = item.map(|i| i.to_string()); - self.0 = Cow::Owned(inner); - item - } - - /// Removes a key from the map, returning the value at the key if the key was previously in the properties. - pub fn remove(&mut self, k: K) -> Option - where - K: Borrow, - { - let (inner, item) = parameters::remove(self.as_str(), k.borrow()); - let item = item.map(|i| i.to_string()); - self.0 = Cow::Owned(inner); - item - } - - /// Extend these properties with other properties. - pub fn extend(&mut self, other: &Parameters) { - self.extend_from_iter(other.iter()); - } - - /// Extend these properties from an iterator. - pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) - where - I: Iterator + Clone, - K: Borrow + 'e + ?Sized, - V: Borrow + 'e + ?Sized, - { - let inner = parameters::from_iter(parameters::join( - self.iter(), - iter.map(|(k, v)| (k.borrow(), v.borrow())), - )); - self.0 = Cow::Owned(inner); - } - - /// Convert these properties into owned properties. - pub fn into_owned(self) -> Parameters<'static> { - Parameters(Cow::Owned(self.0.into_owned())) - } - - /// Returns `true`` if all keys are sorted in alphabetical order. - pub fn is_ordered(&self) -> bool { - parameters::is_ordered(self.as_str()) - } -} - -impl<'s> From<&'s str> for Parameters<'s> { - fn from(mut value: &'s str) -> Self { - value = value.trim_end_matches(|c| { - c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR - }); - Self(Cow::Borrowed(value)) - } -} - -impl From for Parameters<'_> { - fn from(mut value: String) -> Self { - let s = value.trim_end_matches(|c| { - c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR - }); - value.truncate(s.len()); - Self(Cow::Owned(value)) - } -} - -impl<'s> From> for Parameters<'s> { - fn from(value: Cow<'s, str>) -> Self { - match value { - Cow::Borrowed(s) => Parameters::from(s), - Cow::Owned(s) => Parameters::from(s), - } - } -} - -impl<'a> From> for Cow<'_, Parameters<'a>> { - fn from(props: Parameters<'a>) -> Self { - Cow::Owned(props) - } -} - -impl<'a> From<&'a Parameters<'a>> for Cow<'a, Parameters<'a>> { - fn from(props: &'a Parameters<'a>) -> Self { - Cow::Borrowed(props) - } -} - -impl<'s, K, V> FromIterator<(&'s K, &'s V)> for Parameters<'_> -where - K: Borrow + 's + ?Sized, - V: Borrow + 's + ?Sized, -{ - fn from_iter>(iter: T) -> Self { - let iter = iter.into_iter(); - let inner = parameters::from_iter(iter.map(|(k, v)| (k.borrow(), v.borrow()))); - Self(Cow::Owned(inner)) - } -} - -impl<'s, K, V> FromIterator<&'s (K, V)> for Parameters<'_> -where - K: Borrow + 's, - V: Borrow + 's, -{ - fn from_iter>(iter: T) -> Self { - Self::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))) - } -} - -impl<'s, K, V> From<&'s [(K, V)]> for Parameters<'_> -where - K: Borrow + 's, - V: Borrow + 's, -{ - fn from(value: &'s [(K, V)]) -> Self { - Self::from_iter(value.iter()) - } -} - -#[cfg(feature = "std")] -impl From> for Parameters<'_> -where - K: Borrow, - V: Borrow, -{ - fn from(map: HashMap) -> Self { - Self::from_iter(map.iter()) - } -} - -#[cfg(feature = "std")] -impl<'s> From<&'s Parameters<'s>> for HashMap<&'s str, &'s str> { - fn from(props: &'s Parameters<'s>) -> Self { - HashMap::from_iter(props.iter()) - } -} - -#[cfg(feature = "std")] -impl From<&Parameters<'_>> for HashMap { - fn from(props: &Parameters<'_>) -> Self { - HashMap::from_iter(props.iter().map(|(k, v)| (k.to_string(), v.to_string()))) - } -} - -#[cfg(feature = "std")] -impl<'s> From<&'s Parameters<'s>> for HashMap, Cow<'s, str>> { - fn from(props: &'s Parameters<'s>) -> Self { - HashMap::from_iter(props.iter().map(|(k, v)| (Cow::from(k), Cow::from(v)))) - } -} - -#[cfg(feature = "std")] -impl From> for HashMap { - fn from(props: Parameters) -> Self { - HashMap::from(&props) - } -} - -impl fmt::Display for Parameters<'_> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -impl fmt::Debug for Parameters<'_> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self) - } -} - -#[derive(Clone, PartialEq, Eq, Hash, Default)] -pub struct OrderedProperties<'s>(Parameters<'s>); - -impl<'s> OrderedProperties<'s> { - /// Returns `true` if properties does not contain anything. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Returns properties as [`str`]. - pub fn as_str(&'s self) -> &'s str { - self.0.as_str() - } - - /// Returns `true` if properties contains the specified key. - pub fn contains_key(&self, k: K) -> bool - where - K: Borrow, - { - self.0.contains_key(k) - } - - /// Returns a reference to the `&str`-value corresponding to the key. - pub fn get(&'s self, k: K) -> Option<&'s str> - where - K: Borrow, - { - self.0.get(k) - } - - /// Returns an iterator to the `&str`-values corresponding to the key. - pub fn values(&'s self, k: K) -> impl DoubleEndedIterator - where - K: Borrow, - { - self.0.values(k) - } - - /// Returns an iterator on the key-value pairs as `(&str, &str)`. - pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { - self.0.iter() - } - - /// Removes a key from the map, returning the value at the key if the key was previously in the properties. - pub fn remove(&mut self, k: K) -> Option - where - K: Borrow, - { - self.0.remove(k) - } - - /// Inserts a key-value pair into the map. - /// If the map did not have this key present, [`None`]` is returned. - /// If the map did have this key present, the value is updated, and the old value is returned. - pub fn insert(&mut self, k: K, v: V) -> Option - where - K: Borrow, - V: Borrow, - { - let item = self.0.insert(k, v); - self.order(); - item - } - - /// Extend these properties with other properties. - pub fn extend(&mut self, other: &Parameters) { - self.extend_from_iter(other.iter()); - } - - /// Extend these properties from an iterator. - pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) - where - I: Iterator + Clone, - K: Borrow + 'e + ?Sized, - V: Borrow + 'e + ?Sized, - { - self.0.extend_from_iter(iter); - self.order(); - } - - /// Convert these properties into owned properties. - pub fn into_owned(self) -> OrderedProperties<'static> { - OrderedProperties(self.0.into_owned()) - } - - fn order(&mut self) { - if !self.0.is_ordered() { - self.0 = Parameters(Cow::Owned(parameters::from_iter(parameters::sort( - self.iter(), - )))); - } - } -} - -impl<'s> From> for OrderedProperties<'s> { - fn from(value: Parameters<'s>) -> Self { - let mut props = Self(value); - props.order(); - props - } -} - -impl<'s> From<&'s str> for OrderedProperties<'s> { - fn from(value: &'s str) -> Self { - Self::from(Parameters::from(value)) - } -} - -impl From for OrderedProperties<'_> { - fn from(value: String) -> Self { - Self::from(Parameters::from(value)) - } -} - -impl<'s> From> for OrderedProperties<'s> { - fn from(value: Cow<'s, str>) -> Self { - Self::from(Parameters::from(value)) - } -} - -impl<'s, K, V> FromIterator<(&'s K, &'s V)> for OrderedProperties<'_> -where - K: Borrow + 's + ?Sized, - V: Borrow + 's + ?Sized, -{ - fn from_iter>(iter: T) -> Self { - Self::from(Parameters::from_iter(iter)) - } -} - -impl<'s, K, V> FromIterator<&'s (K, V)> for OrderedProperties<'_> -where - K: Borrow + 's, - V: Borrow + 's, -{ - fn from_iter>(iter: T) -> Self { - Self::from(Parameters::from_iter(iter)) - } -} - -impl<'s, K, V> From<&'s [(K, V)]> for OrderedProperties<'_> -where - K: Borrow + 's, - V: Borrow + 's, -{ - fn from(value: &'s [(K, V)]) -> Self { - Self::from_iter(value.iter()) - } -} - -#[cfg(feature = "std")] -impl From> for OrderedProperties<'_> -where - K: Borrow, - V: Borrow, -{ - fn from(map: HashMap) -> Self { - Self::from_iter(map.iter()) - } -} - -#[cfg(feature = "std")] -impl<'s> From<&'s OrderedProperties<'s>> for HashMap<&'s str, &'s str> { - fn from(props: &'s OrderedProperties<'s>) -> Self { - HashMap::from(&props.0) - } -} - -#[cfg(feature = "std")] -impl From<&OrderedProperties<'_>> for HashMap { - fn from(props: &OrderedProperties<'_>) -> Self { - HashMap::from(&props.0) - } -} - -#[cfg(feature = "std")] -impl<'s> From<&'s OrderedProperties<'s>> for HashMap, Cow<'s, str>> { - fn from(props: &'s OrderedProperties<'s>) -> Self { - HashMap::from(&props.0) - } -} - -#[cfg(feature = "std")] -impl From> for HashMap { - fn from(props: OrderedProperties) -> Self { - HashMap::from(&props) - } -} - -impl fmt::Display for OrderedProperties<'_> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -impl fmt::Debug for OrderedProperties<'_> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_properties() { - assert!(Parameters::from("").0.is_empty()); - - assert_eq!(Parameters::from("p1"), Parameters::from(&[("p1", "")][..])); - - assert_eq!( - Parameters::from("p1=v1"), - Parameters::from(&[("p1", "v1")][..]) - ); - - assert_eq!( - Parameters::from("p1=v1;p2=v2;"), - Parameters::from(&[("p1", "v1"), ("p2", "v2")][..]) - ); - - assert_eq!( - Parameters::from("p1=v1;p2=v2;|="), - Parameters::from(&[("p1", "v1"), ("p2", "v2")][..]) - ); - - assert_eq!( - Parameters::from("p1=v1;p2;p3=v3"), - Parameters::from(&[("p1", "v1"), ("p2", ""), ("p3", "v3")][..]) - ); - - assert_eq!( - Parameters::from("p1=v 1;p 2=v2"), - Parameters::from(&[("p1", "v 1"), ("p 2", "v2")][..]) - ); - - assert_eq!( - Parameters::from("p1=x=y;p2=a==b"), - Parameters::from(&[("p1", "x=y"), ("p2", "a==b")][..]) - ); - - let mut hm: HashMap = HashMap::new(); - hm.insert("p1".to_string(), "v1".to_string()); - assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); - - let mut hm: HashMap<&str, &str> = HashMap::new(); - hm.insert("p1", "v1"); - assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); - - let mut hm: HashMap, Cow> = HashMap::new(); - hm.insert(Cow::from("p1"), Cow::from("v1")); - assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); - } -} diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 51c674cb1b..069bcb6034 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -189,7 +189,7 @@ impl AlignQueryable { } fn parse_parameters(&self, parameters: &Parameters) -> Option { - tracing::trace!("[ALIGN QUERYABLE] Properties are: {:?}", parameters); + tracing::trace!("[ALIGN QUERYABLE] Parameters are: {:?}", parameters); if parameters.contains_key(super::ERA) { Some(AlignComponent::Era( EraType::from_str(parameters.get(super::ERA).unwrap()).unwrap(), diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 7022885d2a..c092c6629e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -137,13 +137,13 @@ impl Aligner { from: &str, ) -> (HashMap, bool) { let mut result = HashMap::new(); - let properties = format!( + let parameters = format!( "timestamp={}&{}={}", timestamp, CONTENTS, serde_json::to_string(missing_content).unwrap() ); - let (replies, no_err) = self.perform_query(from, properties.clone()).await; + let (replies, no_err) = self.perform_query(from, parameters.clone()).await; for sample in replies { result.insert( @@ -211,8 +211,8 @@ impl Aligner { other_rep: &str, ) -> (HashSet, bool) { let (other_intervals, no_err) = if era.eq(&EraType::Cold) { - let properties = format!("timestamp={}&{}=cold", other.timestamp, ERA); - let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; + let parameters = format!("timestamp={}&{}=cold", other.timestamp, ERA); + let (reply_content, mut no_err) = self.perform_query(other_rep, parameters).await; let mut other_intervals: HashMap = HashMap::new(); // expecting sample.payload to be a vec of intervals with their checksum for each in reply_content { @@ -252,14 +252,14 @@ impl Aligner { for each_int in diff_intervals { diff_string.push(each_int.to_string()); } - let properties = format!( + let parameters = format!( "timestamp={}&{}=[{}]", other.timestamp, INTERVALS, diff_string.join(",") ); // expecting sample.payload to be a vec of subintervals with their checksum - let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; + let (reply_content, mut no_err) = self.perform_query(other_rep, parameters).await; let mut other_subintervals: HashMap = HashMap::new(); for each in reply_content { match serde_json::from_reader(each.payload().reader()) { @@ -293,14 +293,14 @@ impl Aligner { for each_sub in diff_subintervals { diff_string.push(each_sub.to_string()); } - let properties = format!( + let parameters = format!( "timestamp={}&{}=[{}]", other.timestamp, SUBINTERVALS, diff_string.join(",") ); // expecting sample.payload to be a vec of log entries with their checksum - let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; + let (reply_content, mut no_err) = self.perform_query(other_rep, parameters).await; let mut other_content: HashMap> = HashMap::new(); for each in reply_content { match serde_json::from_reader(each.payload().reader()) { @@ -321,11 +321,11 @@ impl Aligner { } } - async fn perform_query(&self, from: &str, properties: String) -> (Vec, bool) { + async fn perform_query(&self, from: &str, parameters: String) -> (Vec, bool) { let mut no_err = true; let selector = Selector::owned( KeyExpr::from(&self.digest_key).join(&from).unwrap(), - properties, + parameters, ); tracing::trace!("[ALIGNER] Sending Query '{}'...", selector); let mut return_val = Vec::new(); diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index fdb324c77d..cea9720c1f 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -1142,8 +1142,8 @@ impl TryFrom<&mut ZBytes> for bool { } } -// - Zenoh advanced types encoders/decoders -// Properties +// - Zenoh advanced types serializer/deserializer +// Parameters impl Serialize> for ZSerde { type Output = ZBytes; From 7632742fa275dc16b919b0ed6d5a3c477ed2c8f0 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 13 Jun 2024 12:29:55 +0200 Subject: [PATCH 436/598] Fix z_get_shm --- examples/examples/z_get_shm.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 6f69598c02..71a3e3aa65 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -15,9 +15,8 @@ use std::time::Duration; use clap::Parser; use zenoh::{ - key_expr::KeyExpr, - prelude::*, query::QueryTarget, + selector::Selector, shm::{ zshm, BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID, @@ -128,7 +127,7 @@ struct Args { fn parse_args() -> ( Config, - KeyExpr<'static>, + Selector<'static>, Option, QueryTarget, Duration, From 328890051914221cf627e4c471a5ee263cfadded Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 13 Jun 2024 12:33:40 +0200 Subject: [PATCH 437/598] Improve docs --- zenoh/src/api/selector.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index cedd5158db..fdda4117d7 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -124,10 +124,12 @@ pub trait PredefinedParameters { /// Sets the parameter allowing to receieve replies from queryables not matching /// the requested key expression. This may happen in this scenario: /// - we are requesting keyexpr `a/b`. - /// - queryable is declared to handle `a/*` requests and contains data for `a/b` and `a/c`. + /// - queryable is declared to handle `a/*` queries and contains data for `a/b` and `a/c`. /// - queryable receives our request and sends two replies with data for `a/b` and `a/c` /// /// Normally only `a/b` reply would be accepted, but with `_anyke` parameter set, both replies are accepted. + /// NOTE: `_anyke` indicates that ANY key expression is allowed. I.e., if `_anyke` parameter is set, a reply + /// on `x/y/z` is valid even if the queryable is declared on `a/*`. fn set_reply_key_expr_any(&mut self); /// Extracts the standardized `_time` argument from the selector parameters. /// Returns `None` if the `_time` argument is not present or `Some` with the result of parsing the `_time` argument From 69619b6938465df88ec75106d3bc8d2d32b73616 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 13 Jun 2024 12:35:01 +0200 Subject: [PATCH 438/598] restored selector parameter in example --- examples/examples/z_get_shm.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 6f69598c02..ef48e26322 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -15,14 +15,10 @@ use std::time::Duration; use clap::Parser; use zenoh::{ - key_expr::KeyExpr, - prelude::*, - query::QueryTarget, - shm::{ + key_expr::KeyExpr, prelude::*, query::QueryTarget, selector::Selector, shm::{ zshm, BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID, - }, - Config, + }, Config }; use zenoh_examples::CommonArgs; @@ -128,7 +124,7 @@ struct Args { fn parse_args() -> ( Config, - KeyExpr<'static>, + Selector<'static>, Option, QueryTarget, Duration, From 73bde69262d6490b6b768aa365d0d364eb5c3f01 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 13 Jun 2024 12:59:32 +0200 Subject: [PATCH 439/598] Rename PredefinedParameters to ZenohParameters --- plugins/zenoh-plugin-rest/src/lib.rs | 2 +- zenoh-ext/src/publication_cache.rs | 2 +- zenoh/src/api/query.rs | 2 +- zenoh/src/api/queryable.rs | 2 +- zenoh/src/api/selector.rs | 6 +++--- zenoh/src/api/session.rs | 2 +- zenoh/src/lib.rs | 2 +- zenoh/src/prelude.rs | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 42b1731afc..86742d67eb 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -35,7 +35,7 @@ use zenoh::{ key_expr::{keyexpr, KeyExpr}, query::{QueryConsolidation, Reply}, sample::{EncodingBuilderTrait, Sample, SampleKind}, - selector::{Parameters, PredefinedParameters, Selector}, + selector::{Parameters, Selector, ZenohParameters}, session::{Session, SessionDeclarations}, }; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index a6351ef202..279d071ee5 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -26,7 +26,7 @@ use zenoh::{ query::Query, queryable::Queryable, sample::{Locality, Sample}, - selector::PredefinedParameters, + selector::ZenohParameters, session::{SessionDeclarations, SessionRef}, subscriber::FlumeSubscriber, }; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 8bec460b99..9fb3c65ebf 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -33,7 +33,7 @@ use super::{ key_expr::KeyExpr, publisher::Priority, sample::{Locality, QoSBuilder, Sample}, - selector::{PredefinedParameters, Selector}, + selector::{ZenohParameters, Selector}, session::Session, value::Value, }; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 5524a93295..b912d358bb 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -33,7 +33,7 @@ use { }; #[zenoh_macros::unstable] -use super::selector::PredefinedParameters; +use super::selector::ZenohParameters; use super::{ builders::sample::{ EncodingBuilderTrait, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index fdda4117d7..6cd55e035e 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -116,7 +116,7 @@ impl<'a> From<&'a Selector<'a>> for (&'a KeyExpr<'a>, &'a Parameters<'a>) { } #[zenoh_macros::unstable] -pub trait PredefinedParameters { +pub trait ZenohParameters { const REPLY_KEY_EXPR_ANY_SEL_PARAM: &'static str = "_anyke"; const TIME_RANGE_KEY: &'static str = "_time"; /// Sets the time range targeted by the selector parameters. @@ -140,7 +140,7 @@ pub trait PredefinedParameters { } #[cfg(not(feature = "unstable"))] -pub(crate) trait PredefinedParameters { +pub(crate) trait ZenohParameters { const REPLY_KEY_EXPR_ANY_SEL_PARAM: &'static str = "_anyke"; const TIME_RANGE_KEY: &'static str = "_time"; fn set_time_range>>(&mut self, time_range: T); @@ -149,7 +149,7 @@ pub(crate) trait PredefinedParameters { fn reply_key_expr_any(&self) -> bool; } -impl PredefinedParameters for Parameters<'_> { +impl ZenohParameters for Parameters<'_> { /// Sets the time range targeted by the selector parameters. fn set_time_range>>(&mut self, time_range: T) { let mut time_range: Option = time_range.into(); diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index eb3ce6d95c..3fbdc4e69e 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -90,7 +90,7 @@ use super::{ sample::SourceInfo, }; use crate::{ - api::selector::PredefinedParameters, + api::selector::ZenohParameters, net::{ primitives::Primitives, routing::dispatcher::face::Face, diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index b11a91b00c..cf328820af 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -250,7 +250,7 @@ pub mod selector { pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; #[zenoh_macros::unstable] - pub use crate::api::selector::PredefinedParameters; + pub use crate::api::selector::ZenohParameters; pub use crate::api::selector::Selector; } diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 1fa5e16dfb..63cb397e38 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -28,7 +28,7 @@ mod _prelude { #[zenoh_macros::unstable] pub use crate::api::publisher::PublisherDeclarations; #[zenoh_macros::unstable] - pub use crate::api::selector::PredefinedParameters; + pub use crate::api::selector::ZenohParameters; pub use crate::{ api::{ builders::sample::{ From 9250f757c4f64b7d442d043ce69b38b1d530454b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 13 Jun 2024 13:12:06 +0200 Subject: [PATCH 440/598] ZenohParameters renaming, comment added --- plugins/zenoh-plugin-rest/src/lib.rs | 2 +- zenoh-ext/src/publication_cache.rs | 2 +- zenoh/src/api/query.rs | 2 +- zenoh/src/api/queryable.rs | 2 +- zenoh/src/api/selector.rs | 10 +++++++--- zenoh/src/api/session.rs | 2 +- zenoh/src/lib.rs | 2 +- zenoh/src/prelude.rs | 2 +- 8 files changed, 14 insertions(+), 10 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 42b1731afc..c8a24e9c77 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -35,7 +35,7 @@ use zenoh::{ key_expr::{keyexpr, KeyExpr}, query::{QueryConsolidation, Reply}, sample::{EncodingBuilderTrait, Sample, SampleKind}, - selector::{Parameters, PredefinedParameters, Selector}, + selector::{Parameters, ZenohParameters, Selector}, session::{Session, SessionDeclarations}, }; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index a6351ef202..279d071ee5 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -26,7 +26,7 @@ use zenoh::{ query::Query, queryable::Queryable, sample::{Locality, Sample}, - selector::PredefinedParameters, + selector::ZenohParameters, session::{SessionDeclarations, SessionRef}, subscriber::FlumeSubscriber, }; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 8bec460b99..9fb3c65ebf 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -33,7 +33,7 @@ use super::{ key_expr::KeyExpr, publisher::Priority, sample::{Locality, QoSBuilder, Sample}, - selector::{PredefinedParameters, Selector}, + selector::{ZenohParameters, Selector}, session::Session, value::Value, }; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 5524a93295..b912d358bb 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -33,7 +33,7 @@ use { }; #[zenoh_macros::unstable] -use super::selector::PredefinedParameters; +use super::selector::ZenohParameters; use super::{ builders::sample::{ EncodingBuilderTrait, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index fdda4117d7..189f7703d0 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -116,7 +116,11 @@ impl<'a> From<&'a Selector<'a>> for (&'a KeyExpr<'a>, &'a Parameters<'a>) { } #[zenoh_macros::unstable] -pub trait PredefinedParameters { +/// The trait allows to set/read parameters processed by the zenoh library itself +pub trait ZenohParameters { + /// Text parameter names are not part of the public API. They exposed just to provide information about current parameters + /// namings, allowing user to avoid conflicts with custom parameters. It's also possible that some of these zenoh-specific parameters + /// which now are stored in the key-value pairs will be later passed in some other way, keeping the same get/set interface functions. const REPLY_KEY_EXPR_ANY_SEL_PARAM: &'static str = "_anyke"; const TIME_RANGE_KEY: &'static str = "_time"; /// Sets the time range targeted by the selector parameters. @@ -140,7 +144,7 @@ pub trait PredefinedParameters { } #[cfg(not(feature = "unstable"))] -pub(crate) trait PredefinedParameters { +pub(crate) trait ZenohParameters { const REPLY_KEY_EXPR_ANY_SEL_PARAM: &'static str = "_anyke"; const TIME_RANGE_KEY: &'static str = "_time"; fn set_time_range>>(&mut self, time_range: T); @@ -149,7 +153,7 @@ pub(crate) trait PredefinedParameters { fn reply_key_expr_any(&self) -> bool; } -impl PredefinedParameters for Parameters<'_> { +impl ZenohParameters for Parameters<'_> { /// Sets the time range targeted by the selector parameters. fn set_time_range>>(&mut self, time_range: T) { let mut time_range: Option = time_range.into(); diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index eb3ce6d95c..3fbdc4e69e 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -90,7 +90,7 @@ use super::{ sample::SourceInfo, }; use crate::{ - api::selector::PredefinedParameters, + api::selector::ZenohParameters, net::{ primitives::Primitives, routing::dispatcher::face::Face, diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index b11a91b00c..cf328820af 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -250,7 +250,7 @@ pub mod selector { pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; #[zenoh_macros::unstable] - pub use crate::api::selector::PredefinedParameters; + pub use crate::api::selector::ZenohParameters; pub use crate::api::selector::Selector; } diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 1fa5e16dfb..63cb397e38 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -28,7 +28,7 @@ mod _prelude { #[zenoh_macros::unstable] pub use crate::api::publisher::PublisherDeclarations; #[zenoh_macros::unstable] - pub use crate::api::selector::PredefinedParameters; + pub use crate::api::selector::ZenohParameters; pub use crate::{ api::{ builders::sample::{ From cee80ba49370f1b22ff407c7f34ee1b0cef217e4 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 13 Jun 2024 13:17:51 +0200 Subject: [PATCH 441/598] cargio fmt --- zenoh/src/api/query.rs | 2 +- zenoh/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 9fb3c65ebf..dc0f00300d 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -33,7 +33,7 @@ use super::{ key_expr::KeyExpr, publisher::Priority, sample::{Locality, QoSBuilder, Sample}, - selector::{ZenohParameters, Selector}, + selector::{Selector, ZenohParameters}, session::Session, value::Value, }; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index cf328820af..a522375cc1 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -249,9 +249,9 @@ pub mod selector { #[zenoh_macros::unstable] pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; + pub use crate::api::selector::Selector; #[zenoh_macros::unstable] pub use crate::api::selector::ZenohParameters; - pub use crate::api::selector::Selector; } /// Subscribing primitives From f4bfcc052cc9fc9dbc5ea005944728f08612498f Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Thu, 13 Jun 2024 13:38:23 +0200 Subject: [PATCH 442/598] fix: fix proto wrappers debug impl --- commons/zenoh-config/src/wrappers.rs | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/commons/zenoh-config/src/wrappers.rs b/commons/zenoh-config/src/wrappers.rs index bf8570de35..cb78812c86 100644 --- a/commons/zenoh-config/src/wrappers.rs +++ b/commons/zenoh-config/src/wrappers.rs @@ -25,12 +25,15 @@ use zenoh_protocol::{ }; /// The global unique id of a zenoh peer. -#[derive( - Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug, Default, -)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Default)] #[repr(transparent)] pub struct ZenohId(ZenohIdProto); +impl fmt::Debug for ZenohId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} impl fmt::Display for ZenohId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) @@ -64,6 +67,7 @@ impl FromStr for ZenohId { } /// A zenoh Hello message. +#[repr(transparent)] pub struct Hello(HelloProto); impl Hello { @@ -89,7 +93,7 @@ impl From for Hello { } } -impl fmt::Display for Hello { +impl fmt::Debug for Hello { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Hello") .field("zid", &self.zid()) @@ -99,7 +103,7 @@ impl fmt::Display for Hello { } } -#[derive(Debug, Default, Copy, Clone, Eq, Hash, PartialEq)] +#[derive(Default, Copy, Clone, Eq, Hash, PartialEq)] #[repr(transparent)] pub struct EntityGlobalId(EntityGlobalIdProto); @@ -113,6 +117,15 @@ impl EntityGlobalId { } } +impl fmt::Debug for EntityGlobalId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("EntityGlobalId") + .field("zid", &self.zid()) + .field("eid", &self.eid()) + .finish() + } +} + impl From for EntityGlobalId { fn from(id: EntityGlobalIdProto) -> Self { Self(id) From 33269113de7acb34a39d9443f848aa6e37dac899 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Thu, 13 Jun 2024 13:42:34 +0200 Subject: [PATCH 443/598] fix: fix z_scout example --- examples/examples/z_scout.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index 1d485991fd..e016a03420 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -25,7 +25,7 @@ async fn main() { let _ = tokio::time::timeout(std::time::Duration::from_secs(1), async { while let Ok(hello) = receiver.recv_async().await { - println!("{hello}"); + println!("{hello:?}"); } }) .await; From 7a2c358577013284936f5c7abc6bb1b408465f44 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Thu, 13 Jun 2024 13:53:13 +0200 Subject: [PATCH 444/598] fix: fix hello display --- commons/zenoh-config/src/wrappers.rs | 6 ++++++ examples/examples/z_scout.rs | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/commons/zenoh-config/src/wrappers.rs b/commons/zenoh-config/src/wrappers.rs index cb78812c86..5c5d797fac 100644 --- a/commons/zenoh-config/src/wrappers.rs +++ b/commons/zenoh-config/src/wrappers.rs @@ -103,6 +103,12 @@ impl fmt::Debug for Hello { } } +impl fmt::Display for Hello { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(self, f) + } +} + #[derive(Default, Copy, Clone, Eq, Hash, PartialEq)] #[repr(transparent)] pub struct EntityGlobalId(EntityGlobalIdProto); diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index e016a03420..1d485991fd 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -25,7 +25,7 @@ async fn main() { let _ = tokio::time::timeout(std::time::Duration::from_secs(1), async { while let Ok(hello) = receiver.recv_async().await { - println!("{hello:?}"); + println!("{hello}"); } }) .await; From 99b876e4610687f7136fc3a24df7c4def7f14ad1 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Thu, 13 Jun 2024 14:35:15 +0200 Subject: [PATCH 445/598] ci: add checks for zenoh crate alone (#1131) * ci: add checks for zenoh crate alone * fix: fix test * ci: add no-default-features check for zenoh * fix: fix test --- .github/workflows/ci.yml | 22 +++++++++++++---- ci/valgrind-check/Cargo.toml | 4 ++-- .../src/queryable_get/bin/z_queryable_get.rs | 4 ++-- zenoh/src/api/builders/publisher.rs | 2 ++ zenoh/src/api/publisher.rs | 22 +++++++++++------ zenoh/src/api/selector.rs | 1 + zenoh/src/api/session.rs | 10 ++++---- zenoh/src/net/routing/hat/client/pubsub.rs | 1 + .../net/routing/hat/linkstate_peer/pubsub.rs | 24 +++++++++---------- zenoh/src/net/routing/hat/mod.rs | 7 ++++-- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 1 + zenoh/src/net/routing/hat/router/pubsub.rs | 24 +++++++++---------- zenoh/tests/acl.rs | 2 +- zenoh/tests/events.rs | 4 +++- zenoh/tests/formatters.rs | 1 + zenoh/tests/liveliness.rs | 2 +- zenoh/tests/matching.rs | 3 ++- zenoh/tests/open_time.rs | 1 + zenoh/tests/qos.rs | 3 ++- zenoh/tests/routing.rs | 3 ++- zenoh/tests/session.rs | 11 +++++---- zenoh/tests/shm.rs | 2 +- zenoh/tests/unicity.rs | 2 +- 23 files changed, 95 insertions(+), 61 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1200218451..5f153a2d32 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -51,15 +51,27 @@ jobs: - name: Code format check run: cargo fmt --check -- --config "unstable_features=true,imports_granularity=Crate,group_imports=StdExternalCrate" - - name: Clippy + - name: Clippy zenoh no-default-features + run: cargo +stable clippy -p zenoh --all-targets --no-default-features -- --deny warnings + + - name: Clippy zenoh + run: cargo +stable clippy -p zenoh --all-targets -- --deny warnings + + - name: Clippy zenoh unstable + run: cargo +stable clippy -p zenoh --all-targets --features unstable -- --deny warnings + + - name: Clippy zenoh internal + run: cargo +stable clippy -p zenoh --all-targets --features unstable,internal -- --deny warnings + + - name: Clippy zenoh shared-memory + run: cargo +stable clippy -p zenoh --all-targets --features unstable,shared-memory -- --deny warnings + + - name: Clippy workspace run: cargo +stable clippy --all-targets -- --deny warnings - - name: Clippy unstable targets + - name: Clippy workspace unstable run: cargo +stable clippy --all-targets --features unstable -- --deny warnings - - name: Clippy shared memory without unstable - run: cargo +stable clippy --all-targets --features shared-memory -- --deny warnings - - name: Clippy all features if: ${{ matrix.os == 'ubuntu-latest' || matrix.os == 'macOS-latest' }} run: cargo +stable clippy --all-targets --all-features -- --deny warnings diff --git a/ci/valgrind-check/Cargo.toml b/ci/valgrind-check/Cargo.toml index e334c74141..94ee27e7eb 100644 --- a/ci/valgrind-check/Cargo.toml +++ b/ci/valgrind-check/Cargo.toml @@ -22,10 +22,10 @@ categories = ["network-programming"] description = "Internal crate for zenoh." [dependencies] -tokio = { version = "1.35.1", features = ["rt-multi-thread", "time", "io-std"] } +tokio = { version = "1.35.1", features = ["rt-multi-thread", "time", "io-std"] } tracing-subscriber = { version = "0.3", features = ["json", "env-filter"] } futures = "0.3.25" -zenoh = { path = "../../zenoh/", features = ["unstable"] } +zenoh = { path = "../../zenoh/" } zenoh-runtime = { path = "../../commons/zenoh-runtime/" } zenoh-util = { path = "../../commons/zenoh-util/", features = ["test"] } diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 06d1d79152..70945a4926 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -34,7 +34,7 @@ async fn main() { let queryable_key_expr = queryable_key_expr.clone(); zenoh_runtime::ZRuntime::Application.block_in_place(async move { query - .reply(queryable_key_expr, query.value().unwrap().payload().clone()) + .reply(queryable_key_expr, query.payload().unwrap().clone()) .await .unwrap(); }); @@ -51,7 +51,7 @@ async fn main() { println!("Sending Query '{get_selector}'..."); let replies = get_session .get(&get_selector) - .value(idx) + .payload(idx) .target(QueryTarget::All) .await .unwrap(); diff --git a/zenoh/src/api/builders/publisher.rs b/zenoh/src/api/builders/publisher.rs index 88bae4469b..97b3c22fe4 100644 --- a/zenoh/src/api/builders/publisher.rs +++ b/zenoh/src/api/builders/publisher.rs @@ -293,6 +293,7 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { priority: self.priority, is_express: self.is_express, destination: self.destination, + #[cfg(feature = "unstable")] matching_listeners: Default::default(), undeclare_on_drop: true, }) @@ -346,6 +347,7 @@ impl<'a, 'b> Wait for PublisherBuilder<'a, 'b> { priority: self.priority, is_express: self.is_express, destination: self.destination, + #[cfg(feature = "unstable")] matching_listeners: Default::default(), undeclare_on_drop: true, }) diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index 48a927cab3..8a4330676f 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -13,18 +13,14 @@ // use std::{ - collections::HashSet, convert::TryFrom, fmt, future::{IntoFuture, Ready}, pin::Pin, - sync::{Arc, Mutex}, task::{Context, Poll}, }; use futures::Sink; -#[zenoh_macros::unstable] -use zenoh_config::wrappers::EntityGlobalId; use zenoh_core::{zread, Resolvable, Resolve, Wait}; use zenoh_protocol::{ core::CongestionControl, @@ -32,10 +28,17 @@ use zenoh_protocol::{ zenoh::{Del, PushBody, Put}, }; use zenoh_result::{Error, ZResult}; -#[zenoh_macros::unstable] +#[cfg(feature = "unstable")] use { - crate::api::handlers::{Callback, DefaultHandler, IntoHandler}, - crate::api::sample::SourceInfo, + crate::api::{ + handlers::{Callback, DefaultHandler, IntoHandler}, + sample::SourceInfo, + }, + std::{ + collections::HashSet, + sync::{Arc, Mutex}, + }, + zenoh_config::wrappers::EntityGlobalId, zenoh_protocol::core::EntityGlobalIdProto, }; @@ -137,6 +140,7 @@ pub struct Publisher<'a> { pub(crate) priority: Priority, pub(crate) is_express: bool, pub(crate) destination: Locality, + #[cfg(feature = "unstable")] pub(crate) matching_listeners: Arc>>, pub(crate) undeclare_on_drop: bool, } @@ -350,6 +354,7 @@ impl<'a> Publisher<'a> { Undeclarable::undeclare_inner(self, ()) } + #[cfg(feature = "unstable")] fn undeclare_matching_listeners(&self) -> ZResult<()> { let ids: Vec = zlock!(self.matching_listeners).drain().collect(); for id in ids { @@ -479,6 +484,7 @@ impl Wait for PublisherUndeclaration<'_> { fn wait(mut self) -> ::To { // set the flag first to avoid double panic if this function panic self.publisher.undeclare_on_drop = false; + #[cfg(feature = "unstable")] self.publisher.undeclare_matching_listeners()?; self.publisher .session @@ -498,6 +504,7 @@ impl IntoFuture for PublisherUndeclaration<'_> { impl Drop for Publisher<'_> { fn drop(&mut self) { if self.undeclare_on_drop { + #[cfg(feature = "unstable")] let _ = self.undeclare_matching_listeners(); let _ = self.session.undeclare_publisher_inner(self.id); } @@ -1102,6 +1109,7 @@ mod tests { use crate::api::{sample::SampleKind, session::SessionDeclarations}; + #[cfg(feature = "internal")] #[test] fn priority_from() { use std::convert::TryInto; diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 7477ea65e9..e22ac977dc 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -333,6 +333,7 @@ impl<'a> From> for Selector<'a> { } } +#[cfg(feature = "unstable")] #[test] fn selector_accessors() { use crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index d95a9a8910..b8a73e8c23 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -1665,13 +1665,11 @@ impl Session { tracing::trace!("get({}, {:?}, {:?})", selector, target, consolidation); let mut state = zwrite!(self.state); let consolidation = match consolidation.mode { - ConsolidationMode::Auto => { - if selector.parameters().contains_key(TIME_RANGE_KEY) { - ConsolidationMode::None - } else { - ConsolidationMode::Latest - } + #[cfg(feature = "unstable")] + ConsolidationMode::Auto if selector.parameters().contains_key(TIME_RANGE_KEY) => { + ConsolidationMode::None } + ConsolidationMode::Auto => ConsolidationMode::Latest, mode => mode, }; let qid = state.qid_counter.fetch_add(1, Ordering::SeqCst); diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index a87a4e7f1e..e31c5244a7 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -527,6 +527,7 @@ impl HatPubSubTrait for HatCode { get_routes_entries() } + #[zenoh_macros::unstable] fn get_matching_subscriptions( &self, tables: &Tables, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index b75cb26cc1..8860e04f34 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -34,19 +34,18 @@ use super::{ face_hat, face_hat_mut, get_peer, get_routes_entries, hat, hat_mut, network::Network, res_hat, res_hat_mut, HatCode, HatContext, HatFace, HatTables, }; -use crate::{ - key_expr::KeyExpr, - net::routing::{ - dispatcher::{ - face::FaceState, - pubsub::*, - resource::{NodeId, Resource, SessionContext}, - tables::{Route, RoutingExpr, Tables}, - }, - hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, - router::RoutesIndexes, - RoutingContext, PREFIX_LIVELINESS, +#[cfg(feature = "unstable")] +use crate::key_expr::KeyExpr; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + pubsub::*, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, }, + hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, }; #[inline] @@ -913,6 +912,7 @@ impl HatPubSubTrait for HatCode { get_routes_entries(tables) } + #[zenoh_macros::unstable] fn get_matching_subscriptions( &self, tables: &Tables, diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index b8aa28d5f4..6d557f44ff 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -17,7 +17,7 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use std::{any::Any, collections::HashMap, sync::Arc}; +use std::{any::Any, sync::Arc}; use zenoh_buffers::ZBuf; use zenoh_config::{unwrap_or_default, Config, WhatAmI}; @@ -34,6 +34,8 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; use zenoh_transport::unicast::TransportUnicast; +#[cfg(feature = "unstable")] +use {crate::key_expr::KeyExpr, std::collections::HashMap}; use super::{ dispatcher::{ @@ -42,7 +44,7 @@ use super::{ }, router::RoutesIndexes, }; -use crate::{key_expr::KeyExpr, net::runtime::Runtime}; +use crate::net::runtime::Runtime; mod client; mod linkstate_peer; @@ -180,6 +182,7 @@ pub(crate) trait HatPubSubTrait { fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes; + #[zenoh_macros::unstable] fn get_matching_subscriptions( &self, tables: &Tables, diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index ef092d286a..b4be235a29 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -652,6 +652,7 @@ impl HatPubSubTrait for HatCode { get_routes_entries() } + #[zenoh_macros::unstable] fn get_matching_subscriptions( &self, tables: &Tables, diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index dba4d58e85..dfb578ecf7 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -34,19 +34,18 @@ use super::{ face_hat, face_hat_mut, get_peer, get_router, get_routes_entries, hat, hat_mut, network::Network, res_hat, res_hat_mut, HatCode, HatContext, HatFace, HatTables, }; -use crate::{ - key_expr::KeyExpr, - net::routing::{ - dispatcher::{ - face::FaceState, - pubsub::*, - resource::{NodeId, Resource, SessionContext}, - tables::{Route, RoutingExpr, Tables}, - }, - hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, - router::RoutesIndexes, - RoutingContext, PREFIX_LIVELINESS, +#[cfg(feature = "unstable")] +use crate::key_expr::KeyExpr; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + pubsub::*, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, }, + hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, }; #[inline] @@ -1252,6 +1251,7 @@ impl HatPubSubTrait for HatCode { get_routes_entries(tables) } + #[zenoh_macros::unstable] fn get_matching_subscriptions( &self, tables: &Tables, diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 31294b1359..6086a048ee 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -22,10 +22,10 @@ mod test { use zenoh::{ config, config::{EndPoint, WhatAmI}, - internal::{zlock, ztimeout}, prelude::*, Config, Session, }; + use zenoh_core::{zlock, ztimeout}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 17819390aa..267b30442f 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -13,7 +13,8 @@ // use std::time::Duration; -use zenoh::{config, internal::ztimeout, prelude::*, query::Reply, sample::SampleKind, Session}; +use zenoh::{config, query::Reply, sample::SampleKind, Session}; +use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(10); @@ -39,6 +40,7 @@ async fn close_session(session: Session) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_events() { + use zenoh::prelude::SessionDeclarations; let session = open_session(&["tcp/127.0.0.1:18447"], &[]).await; let zid = session.zid(); let sub1 = diff --git a/zenoh/tests/formatters.rs b/zenoh/tests/formatters.rs index faa4839abd..a63fb10e8c 100644 --- a/zenoh/tests/formatters.rs +++ b/zenoh/tests/formatters.rs @@ -11,6 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // +#![cfg(feature = "unstable")] use zenoh::key_expr::format::{kedefine, keformat}; #[test] diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 6c666ca26d..00d181311e 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -16,10 +16,10 @@ use std::time::Duration; use zenoh::{ config, - internal::ztimeout, prelude::*, sample::{Sample, SampleKind}, }; +use zenoh_core::ztimeout; #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_liveliness() { diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index db10241cc4..13a05a268e 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -15,7 +15,8 @@ use std::{str::FromStr, time::Duration}; use flume::RecvTimeoutError; -use zenoh::{config, config::Locator, internal::ztimeout, prelude::*, sample::Locality, Session}; +use zenoh::{config, config::Locator, prelude::*, sample::Locality, Session}; +use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); const RECV_TIMEOUT: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/open_time.rs b/zenoh/tests/open_time.rs index dec41d1558..a6336e863a 100644 --- a/zenoh/tests/open_time.rs +++ b/zenoh/tests/open_time.rs @@ -11,6 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // +#![allow(unused)] use std::{ future::IntoFuture, time::{Duration, Instant}, diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 18bc782852..7ba694d80c 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -13,7 +13,8 @@ // use std::time::Duration; -use zenoh::{core::Priority, internal::ztimeout, prelude::*, publisher::CongestionControl}; +use zenoh::{core::Priority, prelude::*, publisher::CongestionControl}; +use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index b72f2d560c..b632434c08 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -23,11 +23,12 @@ use std::{ use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::{ config::{ModeDependentValue, WhatAmI, WhatAmIMatcher}, - internal::{bail, ztimeout}, prelude::*, publisher::CongestionControl, Config, Result, Session, }; +use zenoh_core::ztimeout; +use zenoh_result::bail; const TIMEOUT: Duration = Duration::from_secs(10); const MSG_COUNT: usize = 50; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 4064cbc8ba..25adaf42e0 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -19,12 +19,13 @@ use std::{ time::Duration, }; -#[cfg(feature = "unstable")] +#[cfg(feature = "internal")] use zenoh::internal::runtime::{Runtime, RuntimeBuilder}; use zenoh::{ - config, internal::ztimeout, key_expr::KeyExpr, prelude::*, publisher::CongestionControl, - sample::SampleKind, subscriber::Reliability, Session, + config, key_expr::KeyExpr, prelude::*, publisher::CongestionControl, sample::SampleKind, + subscriber::Reliability, Session, }; +use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); @@ -261,7 +262,7 @@ async fn zenoh_session_multicast() { close_session(peer01, peer02).await; } -#[cfg(feature = "unstable")] +#[cfg(feature = "internal")] async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) { // Open the sessions let mut config = config::peer(); @@ -287,7 +288,7 @@ async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) (r1, r2) } -#[cfg(feature = "unstable")] +#[cfg(feature = "internal")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_2sessions_1runtime_init() { let (r1, r2) = open_session_unicast_runtime(&["tcp/127.0.0.1:17449"]).await; diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index c2cbc4e89a..43205e8e47 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -22,7 +22,6 @@ use std::{ use zenoh::{ config, - internal::ztimeout, prelude::*, publisher::CongestionControl, shm::{ @@ -31,6 +30,7 @@ use zenoh::{ subscriber::Reliability, Session, }; +use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 35725a1abb..6ce01ff2bf 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -23,12 +23,12 @@ use tokio::runtime::Handle; use zenoh::{ config, config::{EndPoint, WhatAmI}, - internal::ztimeout, key_expr::KeyExpr, prelude::*, publisher::CongestionControl, Session, }; +use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); From f73d5199d0e38b233b94a022e8ce04ae227f77bf Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 13 Jun 2024 15:19:58 +0200 Subject: [PATCH 446/598] Fix unicity tests --- zenoh/src/api/session.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 3fbdc4e69e..99f965a497 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -1674,7 +1674,7 @@ impl Session { let mut state = zwrite!(self.state); let consolidation = match consolidation.mode { ConsolidationMode::Auto => { - if parameters.time_range().is_none() { + if parameters.time_range().is_some() { ConsolidationMode::None } else { ConsolidationMode::Latest From 2bfed1a822e869b88070c73a1bdf99d55ba9f702 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 13 Jun 2024 15:31:53 +0200 Subject: [PATCH 447/598] Use Parameters::empty() --- zenoh/src/api/liveliness.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 00498b7ac3..91f5d4b227 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -757,7 +757,7 @@ where self.session .query( &self.key_expr?, - &Parameters::default(), + &Parameters::empty(), &Some(KeyExpr::from(*KE_PREFIX_LIVELINESS)), QueryTarget::DEFAULT, QueryConsolidation::DEFAULT, From f3d033842e62f4b67cdaf72b91bfbe71cc089b5d Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 13 Jun 2024 15:39:19 +0200 Subject: [PATCH 448/598] Fix --no-default-features --- zenoh/src/api/query.rs | 7 +++++-- zenoh/src/api/selector.rs | 2 -- zenoh/src/api/session.rs | 12 +++++------- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index dc0f00300d..d3d4b84bfd 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -24,7 +24,10 @@ use zenoh_protocol::core::{CongestionControl, Parameters, ZenohIdProto}; use zenoh_result::ZResult; #[zenoh_macros::unstable] -use super::{builders::sample::SampleBuilderTrait, bytes::OptionZBytes, sample::SourceInfo}; +use super::{ + builders::sample::SampleBuilderTrait, bytes::OptionZBytes, sample::SourceInfo, + selector::ZenohParameters, +}; use super::{ builders::sample::{EncodingBuilderTrait, QoSBuilderTrait}, bytes::ZBytes, @@ -33,7 +36,7 @@ use super::{ key_expr::KeyExpr, publisher::Priority, sample::{Locality, QoSBuilder, Sample}, - selector::{Selector, ZenohParameters}, + selector::Selector, session::Session, value::Value, }; diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 3dbb136d83..1469544770 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -19,9 +19,7 @@ use zenoh_protocol::core::{ key_expr::{keyexpr, OwnedKeyExpr}, Parameters, }; -#[cfg(feature = "unstable")] use zenoh_result::ZResult; -#[cfg(feature = "unstable")] use zenoh_util::time_range::TimeRange; use super::{key_expr::KeyExpr, queryable::Query}; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index bef1ef4086..e6c9e79e73 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -84,18 +84,16 @@ use super::{ }; #[cfg(feature = "unstable")] use super::{ + api::selector::ZenohParameters, liveliness::{Liveliness, LivelinessTokenState}, publisher::Publisher, publisher::{MatchingListenerState, MatchingStatus}, sample::SourceInfo, }; -use crate::{ - api::selector::ZenohParameters, - net::{ - primitives::Primitives, - routing::dispatcher::face::Face, - runtime::{Runtime, RuntimeBuilder}, - }, +use crate::net::{ + primitives::Primitives, + routing::dispatcher::face::Face, + runtime::{Runtime, RuntimeBuilder}, }; zconfigurable! { From 3a0bcf679c42f2d12b9e09de01c27e9c55582250 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 13 Jun 2024 15:45:22 +0200 Subject: [PATCH 449/598] Fix --no-default-features --- zenoh/src/api/selector.rs | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 1469544770..bc022e04d8 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -15,14 +15,13 @@ //! [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries use std::{borrow::Cow, convert::TryFrom, str::FromStr}; +use super::{key_expr::KeyExpr, queryable::Query}; use zenoh_protocol::core::{ key_expr::{keyexpr, OwnedKeyExpr}, Parameters, }; -use zenoh_result::ZResult; -use zenoh_util::time_range::TimeRange; - -use super::{key_expr::KeyExpr, queryable::Query}; +#[cfg(feature = "unstable")] +use ::{zenoh_result::ZResult, zenoh_util::time_range::TimeRange}; /// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters @@ -141,16 +140,7 @@ pub trait ZenohParameters { fn reply_key_expr_any(&self) -> bool; } -#[cfg(not(feature = "unstable"))] -pub(crate) trait ZenohParameters { - const REPLY_KEY_EXPR_ANY_SEL_PARAM: &'static str = "_anyke"; - const TIME_RANGE_KEY: &'static str = "_time"; - fn set_time_range>>(&mut self, time_range: T); - fn set_reply_key_expr_any(&mut self); - fn time_range(&self) -> Option>; - fn reply_key_expr_any(&self) -> bool; -} - +#[cfg(feature = "unstable")] impl ZenohParameters for Parameters<'_> { /// Sets the time range targeted by the selector parameters. fn set_time_range>>(&mut self, time_range: T) { From 112399505cdedba55778a836a622852f932aaeea Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 13 Jun 2024 15:47:49 +0200 Subject: [PATCH 450/598] Fix cargo fmt --- zenoh/src/api/selector.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index bc022e04d8..85ac4787a0 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -15,7 +15,6 @@ //! [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries use std::{borrow::Cow, convert::TryFrom, str::FromStr}; -use super::{key_expr::KeyExpr, queryable::Query}; use zenoh_protocol::core::{ key_expr::{keyexpr, OwnedKeyExpr}, Parameters, @@ -23,6 +22,8 @@ use zenoh_protocol::core::{ #[cfg(feature = "unstable")] use ::{zenoh_result::ZResult, zenoh_util::time_range::TimeRange}; +use super::{key_expr::KeyExpr, queryable::Query}; + /// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters /// with a few intendend uses: From ad22c81a2f51634ff93889d068f95adf5abb25e1 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 13 Jun 2024 15:49:32 +0200 Subject: [PATCH 451/598] Fix wrong use --- zenoh/src/api/session.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index e6c9e79e73..d6f4b25de6 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -84,12 +84,13 @@ use super::{ }; #[cfg(feature = "unstable")] use super::{ - api::selector::ZenohParameters, liveliness::{Liveliness, LivelinessTokenState}, publisher::Publisher, publisher::{MatchingListenerState, MatchingStatus}, sample::SourceInfo, }; +#[cfg(feature = "unstable")] +use crate::api::selector::ZenohParameters; use crate::net::{ primitives::Primitives, routing::dispatcher::face::Face, From 7f4f4aa0f17ce61bf05eda89a9fbfdc239f27db2 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Thu, 13 Jun 2024 16:26:30 +0200 Subject: [PATCH 452/598] fix: fix ZenohIdProto leaking in API (#1133) * fix: fix ZenohIdProto leaking in API * fix: add forgotten import --- commons/zenoh-config/src/connection_retry.rs | 2 +- commons/zenoh-config/src/lib.rs | 2 +- commons/zenoh-config/src/mode_dependent.rs | 4 +--- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/commons/zenoh-config/src/connection_retry.rs b/commons/zenoh-config/src/connection_retry.rs index e5f88a05f3..77db48e31c 100644 --- a/commons/zenoh-config/src/connection_retry.rs +++ b/commons/zenoh-config/src/connection_retry.rs @@ -14,7 +14,7 @@ use serde::{Deserialize, Serialize}; use zenoh_core::zparse_default; -use zenoh_protocol::core::WhatAmI; +use zenoh_protocol::core::{EndPoint, WhatAmI}; use crate::{ defaults::{ diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 150487791c..b7ebaa7396 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -35,7 +35,7 @@ use serde::{Deserialize, Serialize}; use serde_json::{Map, Value}; use validated_struct::ValidatedMapAssociatedTypes; pub use validated_struct::{GetError, ValidatedMap}; -use wrappers::ZenohId; +pub use wrappers::ZenohId; use zenoh_core::zlock; pub use zenoh_protocol::core::{ whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, diff --git a/commons/zenoh-config/src/mode_dependent.rs b/commons/zenoh-config/src/mode_dependent.rs index 20bcb3481b..7c331c8318 100644 --- a/commons/zenoh-config/src/mode_dependent.rs +++ b/commons/zenoh-config/src/mode_dependent.rs @@ -18,9 +18,7 @@ use serde::{ de::{self, MapAccess, Visitor}, Deserialize, Serialize, }; -pub use zenoh_protocol::core::{ - whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohIdProto, -}; +use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor}; pub trait ModeDependent { fn router(&self) -> Option<&T>; From 550a040056af79d36425dbedc152038a82a4f443 Mon Sep 17 00:00:00 2001 From: eclipse-zenoh-bot Date: Thu, 13 Jun 2024 15:02:44 +0000 Subject: [PATCH 453/598] chore: Sync Rust toolchain --- rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index b7eadd649b..4dd8e5c567 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.72.0" \ No newline at end of file +channel = "1.75.0" \ No newline at end of file From 8d9d187c696d0567a55b3160558c4aa93827ee5a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 13 Jun 2024 17:45:01 +0200 Subject: [PATCH 454/598] replier_id() returns Option --- zenoh/src/api/query.rs | 7 ++++--- zenoh/src/api/session.rs | 10 +++------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index d3d4b84bfd..c294839f90 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -18,6 +18,7 @@ use std::{ time::Duration, }; +use zenoh_config::ZenohId; use zenoh_core::{Resolvable, Wait}; use zenoh_keyexpr::OwnedKeyExpr; use zenoh_protocol::core::{CongestionControl, Parameters, ZenohIdProto}; @@ -117,7 +118,7 @@ impl From for ReplyError { #[derive(Clone, Debug)] pub struct Reply { pub(crate) result: Result, - pub(crate) replier_id: ZenohIdProto, + pub(crate) replier_id: Option, } impl Reply { @@ -137,8 +138,8 @@ impl Reply { } /// Gets the id of the zenoh instance that answered this Reply. - pub fn replier_id(&self) -> ZenohIdProto { - self.replier_id + pub fn replier_id(&self) -> Option { + self.replier_id.map(Into::into) } } diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index d6f4b25de6..dc311db1ae 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -1702,7 +1702,7 @@ impl Session { } (query.callback)(Reply { result: Err(Value::from("Timeout").into()), - replier_id: zid.into(), + replier_id: Some(zid.into()), }); } } @@ -2194,12 +2194,8 @@ impl Primitives for Session { payload: e.payload.into(), encoding: e.encoding.into(), }; - let replier_id = match e.ext_sinfo { - Some(info) => info.id.zid, - None => zenoh_protocol::core::ZenohIdProto::rand(), - }; let new_reply = Reply { - replier_id, + replier_id: e.ext_sinfo.map(|info| info.id.zid), result: Err(value.into()), }; callback(new_reply); @@ -2308,7 +2304,7 @@ impl Primitives for Session { let sample = info.into_sample(key_expr.into_owned(), payload, attachment); let new_reply = Reply { result: Ok(sample), - replier_id: zenoh_protocol::core::ZenohIdProto::rand(), // TODO + replier_id: None, }; let callback = match query.reception_mode { From 90bba9379e4b844a5b4d387ccebab5483b81a7b4 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Thu, 13 Jun 2024 18:12:23 +0200 Subject: [PATCH 455/598] fix: fix hello debug impl (#1137) --- commons/zenoh-config/src/wrappers.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/commons/zenoh-config/src/wrappers.rs b/commons/zenoh-config/src/wrappers.rs index 5c5d797fac..3a45e20896 100644 --- a/commons/zenoh-config/src/wrappers.rs +++ b/commons/zenoh-config/src/wrappers.rs @@ -95,17 +95,17 @@ impl From for Hello { impl fmt::Debug for Hello { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Hello") - .field("zid", &self.zid()) - .field("whatami", &self.whatami()) - .field("locators", &self.locators()) - .finish() + fmt::Debug::fmt(&self.0, f) } } impl fmt::Display for Hello { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(self, f) + f.debug_struct("Hello") + .field("zid", &self.zid()) + .field("whatami", &self.whatami()) + .field("locators", &self.locators()) + .finish() } } From 02fc4a8a2f1e33edaac7ff598e7a623524231e88 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Thu, 13 Jun 2024 18:24:16 +0200 Subject: [PATCH 456/598] fix: fix clippy warnings for 1.79 (#1139) * fix: fix clippy warnings for 1.79 * fix: fix shm --- commons/zenoh-buffers/src/slice.rs | 6 ++-- commons/zenoh-config/src/lib.rs | 6 +++- .../src/keyexpr_tree/arc_tree.rs | 28 +++++++++++++++---- .../src/keyexpr_tree/box_tree.rs | 2 +- .../src/keyexpr_tree/traits/mod.rs | 6 ++-- commons/zenoh-shm/src/api/buffer/zshm.rs | 4 +-- commons/zenoh-shm/src/api/buffer/zshmmut.rs | 2 +- zenoh/src/api/bytes.rs | 27 ++++++++---------- 8 files changed, 50 insertions(+), 31 deletions(-) diff --git a/commons/zenoh-buffers/src/slice.rs b/commons/zenoh-buffers/src/slice.rs index f26e37a2aa..658827b6c4 100644 --- a/commons/zenoh-buffers/src/slice.rs +++ b/commons/zenoh-buffers/src/slice.rs @@ -77,7 +77,7 @@ impl Writer for &mut [u8] { // SAFETY: this doesn't compile with simple assignment because the compiler // doesn't believe that the subslice has the same lifetime as the original slice, // so we transmute to assure it that it does. - *self = unsafe { mem::transmute(lhs) }; + *self = unsafe { mem::transmute::<&mut [u8], &mut [u8]>(lhs) }; // SAFETY: this operation is safe since we check if len is non-zero. Ok(unsafe { NonZeroUsize::new_unchecked(len) }) @@ -99,7 +99,7 @@ impl Writer for &mut [u8] { // SAFETY: this doesn't compile with simple assignment because the compiler // doesn't believe that the subslice has the same lifetime as the original slice, // so we transmute to assure it that it does. - *self = unsafe { mem::transmute(lhs) }; + *self = unsafe { mem::transmute::<&mut [u8], &mut [u8]>(lhs) }; Ok(()) } @@ -123,7 +123,7 @@ impl Writer for &mut [u8] { // SAFETY: this doesn't compile with simple assignment because the compiler // doesn't believe that the subslice has the same lifetime as the original slice, // so we transmute to assure it that it does. - *self = unsafe { mem::transmute(s) }; + *self = unsafe { mem::transmute::<&mut [u8], &mut [u8]>(s) }; NonZeroUsize::new(len).ok_or(DidntWrite) } diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index b7ebaa7396..6b52e250b2 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -1089,7 +1089,11 @@ impl PluginsConfig { for next in split { match remove_from { Value::Object(o) => match o.get_mut(current) { - Some(v) => unsafe { remove_from = std::mem::transmute(v) }, + Some(v) => { + remove_from = unsafe { + std::mem::transmute::<&mut serde_json::Value, &mut serde_json::Value>(v) + } + } None => bail!("{:?} has no {} property", o, current), }, Value::Array(a) => { diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs index ef29ccc0f8..5e2deb206c 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs @@ -160,8 +160,11 @@ where } // tags{ketree.arc.node.mut} fn node_mut(&'a self, token: &'a mut Token, at: &keyexpr) -> Option { - self.node(unsafe { core::mem::transmute(&*token) }, at) - .map(|(node, _)| (node, token)) + self.node( + unsafe { core::mem::transmute::<&Token, &Token>(&*token) }, + at, + ) + .map(|(node, _)| (node, token)) } // tags{ketree.arc.node.or_create} fn node_or_create(&'a self, token: &'a mut Token, at: &keyexpr) -> Self::NodeMut { @@ -237,7 +240,9 @@ where fn tree_iter_mut(&'a self, token: &'a mut Token) -> Self::TreeIterMut { let inner = ketree_borrow(&self.inner, token); TokenPacker { - iter: TreeIter::new(unsafe { core::mem::transmute(&inner.children) }), + iter: TreeIter::new(unsafe { + core::mem::transmute::<&Children::Assoc, &Children::Assoc>(&inner.children) + }), token, } } @@ -289,7 +294,12 @@ where let inner = ketree_borrow(&self.inner, token); if inner.wildness.get() || key.is_wild_impl() { IterOrOption::Iter(TokenPacker { - iter: Intersection::new(unsafe { core::mem::transmute(&inner.children) }, key), + iter: Intersection::new( + unsafe { + core::mem::transmute::<&Children::Assoc, &Children::Assoc>(&inner.children) + }, + key, + ), token, }) } else { @@ -341,7 +351,10 @@ where if inner.wildness.get() || key.is_wild_impl() { unsafe { IterOrOption::Iter(TokenPacker { - iter: Inclusion::new(core::mem::transmute(&inner.children), key), + iter: Inclusion::new( + core::mem::transmute::<&Children::Assoc, &Children::Assoc>(&inner.children), + key, + ), token, }) } @@ -394,7 +407,10 @@ where if inner.wildness.get() || key.is_wild_impl() { unsafe { IterOrOption::Iter(TokenPacker { - iter: Includer::new(core::mem::transmute(&inner.children), key), + iter: Includer::new( + core::mem::transmute::<&Children::Assoc, &Children::Assoc>(&inner.children), + key, + ), token, }) } diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs index c72047ee03..69607e9608 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs @@ -158,7 +158,7 @@ where if !node.children.is_empty() { node.weight.take() } else { - let chunk = unsafe { core::mem::transmute::<_, &keyexpr>(node.chunk()) }; + let chunk = unsafe { core::mem::transmute::<&keyexpr, &keyexpr>(node.chunk()) }; match node.parent { None => &mut self.children, Some(parent) => unsafe { &mut (*parent.as_ptr()).children }, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs index 03a97f5063..69fe6efde3 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs @@ -55,8 +55,10 @@ pub trait IKeyExprTree<'a, Weight> { Self::TreeIterItem: AsNode>, { self.tree_iter().filter_map(|node| { - unsafe { core::mem::transmute::<_, Option<&Weight>>(node.as_node().weight()) } - .map(|w| (node.as_node().keyexpr(), w)) + unsafe { + core::mem::transmute::, Option<&Weight>>(node.as_node().weight()) + } + .map(|w| (node.as_node().keyexpr(), w)) }) } diff --git a/commons/zenoh-shm/src/api/buffer/zshm.rs b/commons/zenoh-shm/src/api/buffer/zshm.rs index 23b902ac4c..8a028277a7 100644 --- a/commons/zenoh-shm/src/api/buffer/zshm.rs +++ b/commons/zenoh-shm/src/api/buffer/zshm.rs @@ -97,7 +97,7 @@ impl TryFrom<&mut ZShm> for &mut zshmmut { true => { // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to ShmBufInner type, so it is safe to transmute them in any direction - Ok(unsafe { core::mem::transmute(value) }) + Ok(unsafe { core::mem::transmute::<&mut ZShm, &mut zshmmut>(value) }) } false => Err(()), } @@ -163,7 +163,7 @@ impl TryFrom<&mut zshm> for &mut zshmmut { true => { // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to ShmBufInner type, so it is safe to transmute them in any direction - Ok(unsafe { core::mem::transmute(value) }) + Ok(unsafe { core::mem::transmute::<&mut zshm, &mut zshmmut>(value) }) } false => Err(()), } diff --git a/commons/zenoh-shm/src/api/buffer/zshmmut.rs b/commons/zenoh-shm/src/api/buffer/zshmmut.rs index 39a01dff74..a116a7f421 100644 --- a/commons/zenoh-shm/src/api/buffer/zshmmut.rs +++ b/commons/zenoh-shm/src/api/buffer/zshmmut.rs @@ -181,7 +181,7 @@ impl TryFrom<&mut ShmBufInner> for &mut zshmmut { match value.is_unique() && value.is_valid() { // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to ShmBufInner type, so it is safe to transmute them in any direction - true => Ok(unsafe { core::mem::transmute(value) }), + true => Ok(unsafe { core::mem::transmute::<&mut ShmBufInner, &mut zshmmut>(value) }), false => Err(()), } } diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index cea9720c1f..1496492379 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -911,11 +911,10 @@ impl TryFrom for Cow<'static, str> { type Error = Utf8Error; fn try_from(v: ZBytes) -> Result { - let v: Cow<'static, [u8]> = Cow::from(v); - let _ = core::str::from_utf8(v.as_ref())?; - // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 - // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. - Ok(unsafe { core::mem::transmute(v) }) + Ok(match Cow::<[u8]>::from(v) { + Cow::Borrowed(s) => core::str::from_utf8(s)?.into(), + Cow::Owned(s) => String::from_utf8(s).map_err(|err| err.utf8_error())?.into(), + }) } } @@ -923,11 +922,10 @@ impl<'a> TryFrom<&'a ZBytes> for Cow<'a, str> { type Error = Utf8Error; fn try_from(v: &'a ZBytes) -> Result { - let v: Cow<'a, [u8]> = Cow::from(v); - let _ = core::str::from_utf8(v.as_ref())?; - // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 - // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. - Ok(unsafe { core::mem::transmute(v) }) + Ok(match Cow::<[u8]>::from(v) { + Cow::Borrowed(s) => core::str::from_utf8(s)?.into(), + Cow::Owned(s) => String::from_utf8(s).map_err(|err| err.utf8_error())?.into(), + }) } } @@ -935,11 +933,10 @@ impl<'a> TryFrom<&'a mut ZBytes> for Cow<'a, str> { type Error = Utf8Error; fn try_from(v: &'a mut ZBytes) -> Result { - let v: Cow<'a, [u8]> = Cow::from(v); - let _ = core::str::from_utf8(v.as_ref())?; - // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 - // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. - Ok(unsafe { core::mem::transmute(v) }) + Ok(match Cow::<[u8]>::from(v) { + Cow::Borrowed(s) => core::str::from_utf8(s)?.into(), + Cow::Owned(s) => String::from_utf8(s).map_err(|err| err.utf8_error())?.into(), + }) } } From d4f68a29984283e244d793f2b04a2c2d16652c23 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Thu, 13 Jun 2024 19:49:42 +0300 Subject: [PATCH 457/598] fix EntityGlobalIdProto leaks --- commons/zenoh-config/src/wrappers.rs | 6 ++++++ zenoh/src/api/sample.rs | 9 +++++---- zenoh/src/api/session.rs | 8 ++++---- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/commons/zenoh-config/src/wrappers.rs b/commons/zenoh-config/src/wrappers.rs index 3a45e20896..92ed7faf44 100644 --- a/commons/zenoh-config/src/wrappers.rs +++ b/commons/zenoh-config/src/wrappers.rs @@ -137,3 +137,9 @@ impl From for EntityGlobalId { Self(id) } } + +impl From for EntityGlobalIdProto { + fn from(value: EntityGlobalId) -> Self { + value.0 + } +} diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index a65e42048c..41317b8b43 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -17,8 +17,9 @@ use std::{convert::TryFrom, fmt}; #[cfg(feature = "unstable")] use serde::Serialize; +use zenoh_config::wrappers::EntityGlobalId; use zenoh_protocol::{ - core::{CongestionControl, EntityGlobalIdProto, Timestamp}, + core::{CongestionControl, Timestamp}, network::declare::ext::QoSType, }; @@ -52,7 +53,7 @@ pub(crate) struct DataInfo { pub kind: SampleKind, pub encoding: Option, pub timestamp: Option, - pub source_id: Option, + pub source_id: Option, pub source_sn: Option, pub qos: QoS, } @@ -137,7 +138,7 @@ impl DataInfoIntoSample for Option { #[derive(Debug, Clone)] pub struct SourceInfo { /// The [`EntityGlobalId`] of the zenoh entity that published the concerned [`Sample`]. - pub source_id: Option, + pub source_id: Option, /// The sequence number of the [`Sample`] from the source. pub source_sn: Option, } @@ -175,7 +176,7 @@ impl From for Option Date: Fri, 14 Jun 2024 08:41:19 +0200 Subject: [PATCH 458/598] feat: add `Selector::into_owned` --- zenoh/src/api/queryable.rs | 2 +- zenoh/src/api/selector.rs | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index b912d358bb..0cb82683e9 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -94,7 +94,7 @@ impl Query { /// This Query's selector parameters. #[inline(always)] - pub fn parameters(&self) -> &Parameters { + pub fn parameters(&self) -> &Parameters<'static> { &self.inner.parameters } diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 85ac4787a0..fe98cce6a6 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -79,13 +79,21 @@ impl<'a> Selector<'a> { } } /// Build a new selector holding references to keyexpr and parameters - /// Useful for printing pair of keyexpr and parameters in url-like format + /// Useful for printing pairs of keyexpr and parameters in url-like format pub fn borrowed(key_expr: &'a KeyExpr<'a>, parameters: &'a Parameters<'a>) -> Self { Self { key_expr: Cow::Borrowed(key_expr), parameters: Cow::Borrowed(parameters), } } + + /// Convert this selector into an owned one. + pub fn into_owned(self) -> Selector<'static> { + Selector::owned( + self.key_expr.into_owned().into_owned(), + self.parameters.into_owned().into_owned(), + ) + } } impl<'a, K, P> From<(K, P)> for Selector<'a> From 22936495203d18f13c9353b2010fd7794583d599 Mon Sep 17 00:00:00 2001 From: yellowhatter <104833606+yellowhatter@users.noreply.github.com> Date: Fri, 14 Jun 2024 12:36:22 +0300 Subject: [PATCH 459/598] More API leaks fixed (#1143) --- commons/zenoh-config/src/wrappers.rs | 4 ++-- zenoh/src/api/query.rs | 5 +++-- zenoh/src/api/queryable.rs | 4 +++- zenoh/src/api/subscriber.rs | 7 ++++--- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/commons/zenoh-config/src/wrappers.rs b/commons/zenoh-config/src/wrappers.rs index 92ed7faf44..47dc27aea7 100644 --- a/commons/zenoh-config/src/wrappers.rs +++ b/commons/zenoh-config/src/wrappers.rs @@ -77,8 +77,8 @@ impl Hello { } /// Get the zenoh id of this Hello message. - pub fn zid(&self) -> ZenohIdProto { - self.0.zid + pub fn zid(&self) -> ZenohId { + self.0.zid.into() } /// Get the whatami of this Hello message. diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index d3d4b84bfd..fc5bb2e5d7 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -18,6 +18,7 @@ use std::{ time::Duration, }; +use zenoh_config::ZenohId; use zenoh_core::{Resolvable, Wait}; use zenoh_keyexpr::OwnedKeyExpr; use zenoh_protocol::core::{CongestionControl, Parameters, ZenohIdProto}; @@ -137,8 +138,8 @@ impl Reply { } /// Gets the id of the zenoh instance that answered this Reply. - pub fn replier_id(&self) -> ZenohIdProto { - self.replier_id + pub fn replier_id(&self) -> ZenohId { + self.replier_id.into() } } diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index b912d358bb..8a2d7011e5 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -29,6 +29,7 @@ use zenoh_result::ZResult; #[zenoh_macros::unstable] use { super::{query::ReplyKeyExpr, sample::SourceInfo}, + zenoh_config::wrappers::EntityGlobalId, zenoh_protocol::core::EntityGlobalIdProto, }; @@ -823,11 +824,12 @@ impl<'a, Handler> Queryable<'a, Handler> { /// # } /// ``` #[zenoh_macros::unstable] - pub fn id(&self) -> EntityGlobalIdProto { + pub fn id(&self) -> EntityGlobalId { EntityGlobalIdProto { zid: self.queryable.session.zid().into(), eid: self.queryable.state.id, } + .into() } /// Returns a reference to this queryable's handler. diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index f7d5268772..c77dbc8791 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -20,10 +20,10 @@ use std::{ }; use zenoh_core::{Resolvable, Wait}; -#[cfg(feature = "unstable")] -use zenoh_protocol::core::EntityGlobalIdProto; use zenoh_protocol::{core::Reliability, network::declare::subscriber::ext::SubscriberInfo}; use zenoh_result::ZResult; +#[cfg(feature = "unstable")] +use {zenoh_config::wrappers::EntityGlobalId, zenoh_protocol::core::EntityGlobalIdProto}; use super::{ handlers::{locked, Callback, DefaultHandler, IntoHandler}, @@ -458,11 +458,12 @@ impl<'a, Handler> Subscriber<'a, Handler> { /// # } /// ``` #[zenoh_macros::unstable] - pub fn id(&self) -> EntityGlobalIdProto { + pub fn id(&self) -> EntityGlobalId { EntityGlobalIdProto { zid: self.subscriber.session.zid().into(), eid: self.subscriber.state.id, } + .into() } /// Returns the [`KeyExpr`] this Subscriber subscribes to. From b9f65063ea397d0e847ce96f262dccce4b406d55 Mon Sep 17 00:00:00 2001 From: yellowhatter <104833606+yellowhatter@users.noreply.github.com> Date: Fri, 14 Jun 2024 12:59:42 +0300 Subject: [PATCH 460/598] Update lib.rs (#1144) --- zenoh/src/lib.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index a522375cc1..65a2efda8d 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -398,11 +398,6 @@ pub mod internal { pub use crate::api::value::Value; } -#[cfg(all(feature = "shared-memory", not(feature = "unstable")))] -compile_error!( - "The shared-memory support is unstable. The `unstable` feature must be enabled to use `shared-memory`." -); - #[zenoh_macros::unstable] #[cfg(feature = "shared-memory")] pub mod shm { From e2efb1a803814bf8b948b065046970a5db55f048 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 14 Jun 2024 15:11:09 +0200 Subject: [PATCH 461/598] default added to ReplyError --- zenoh/src/api/query.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index c294839f90..48e3674c85 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -84,7 +84,7 @@ impl Default for QueryConsolidation { } /// Error returned by a [`get`](Session::get). -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, Default)] pub struct ReplyError { pub(crate) payload: ZBytes, pub(crate) encoding: Encoding, From 5a81586b0c0301157060e2bbc391849d3ea45168 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 16 Jun 2024 20:30:32 +0200 Subject: [PATCH 462/598] into keyexpr for ZenohId --- commons/zenoh-config/src/wrappers.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/commons/zenoh-config/src/wrappers.rs b/commons/zenoh-config/src/wrappers.rs index 47dc27aea7..033e65cc35 100644 --- a/commons/zenoh-config/src/wrappers.rs +++ b/commons/zenoh-config/src/wrappers.rs @@ -20,7 +20,7 @@ use std::str::FromStr; use serde::{Deserialize, Serialize}; use zenoh_protocol::{ - core::{EntityGlobalIdProto, EntityId, Locator, WhatAmI, ZenohIdProto}, + core::{key_expr::OwnedKeyExpr, EntityGlobalIdProto, EntityId, Locator, WhatAmI, ZenohIdProto}, scouting::HelloProto, }; @@ -58,6 +58,18 @@ impl From for uhlc::ID { } } +impl From for OwnedKeyExpr { + fn from(zid: ZenohId) -> Self { + zid.0.into() + } +} + +impl From<&ZenohId> for OwnedKeyExpr { + fn from(zid: &ZenohId) -> Self { + (*zid).into() + } +} + impl FromStr for ZenohId { type Err = zenoh_result::Error; From a493333ca33fc3f66cee2b2fe0d9c3a992d0cfc6 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 16 Jun 2024 21:46:53 +0200 Subject: [PATCH 463/598] into_keyexpr added to zenphId --- Cargo.lock | 1 + commons/zenoh-config/Cargo.toml | 5 +++-- commons/zenoh-config/src/wrappers.rs | 8 ++++++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2842003f2d..a6f286aec3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5189,6 +5189,7 @@ dependencies = [ "uhlc", "validated_struct", "zenoh-core", + "zenoh-macros", "zenoh-protocol", "zenoh-result", "zenoh-util", diff --git a/commons/zenoh-config/Cargo.toml b/commons/zenoh-config/Cargo.toml index 1b860fa7e0..b31b6472a0 100644 --- a/commons/zenoh-config/Cargo.toml +++ b/commons/zenoh-config/Cargo.toml @@ -24,7 +24,7 @@ categories = { workspace = true } description = "Internal crate for zenoh." [dependencies] -tracing = {workspace = true} +tracing = { workspace = true } flume = { workspace = true } json5 = { workspace = true } num_cpus = { workspace = true } @@ -36,5 +36,6 @@ zenoh-core = { workspace = true } zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } zenoh-util = { workspace = true } +zenoh-macros = { workspace = true } secrecy = { workspace = true } -uhlc = { workspace = true } \ No newline at end of file +uhlc = { workspace = true } diff --git a/commons/zenoh-config/src/wrappers.rs b/commons/zenoh-config/src/wrappers.rs index 033e65cc35..73b66824b8 100644 --- a/commons/zenoh-config/src/wrappers.rs +++ b/commons/zenoh-config/src/wrappers.rs @@ -29,6 +29,14 @@ use zenoh_protocol::{ #[repr(transparent)] pub struct ZenohId(ZenohIdProto); +impl ZenohId { + /// Used by plugins for crating adminspace path + #[zenoh_macros::unstable] + pub fn into_keyexpr(self) -> OwnedKeyExpr { + self.into() + } +} + impl fmt::Debug for ZenohId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) From 3fd8e628c27cf51a4722b62e7c485cd8e0353280 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 16 Jun 2024 22:00:57 +0200 Subject: [PATCH 464/598] zenoh-config unstable added --- commons/zenoh-config/Cargo.toml | 3 +++ zenoh/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/commons/zenoh-config/Cargo.toml b/commons/zenoh-config/Cargo.toml index b31b6472a0..6265eb6bc9 100644 --- a/commons/zenoh-config/Cargo.toml +++ b/commons/zenoh-config/Cargo.toml @@ -23,6 +23,9 @@ license = { workspace = true } categories = { workspace = true } description = "Internal crate for zenoh." +[features] +unstable = [] + [dependencies] tracing = { workspace = true } flume = { workspace = true } diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 155a906852..c7da78b8a3 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -63,7 +63,7 @@ transport_udp = ["zenoh-transport/transport_udp"] transport_unixsock-stream = ["zenoh-transport/transport_unixsock-stream"] transport_ws = ["zenoh-transport/transport_ws"] transport_vsock = ["zenoh-transport/transport_vsock"] -unstable = ["zenoh-keyexpr/unstable"] +unstable = ["zenoh-keyexpr/unstable", "zenoh-config/unstable"] [dependencies] tokio = { workspace = true, features = ["rt", "macros", "time"] } From 4ad5b43271dfbc7c58e79eb2402aefa54e8bf4b6 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 17 Jun 2024 00:36:40 +0200 Subject: [PATCH 465/598] export init_log_from_env_or --- zenoh/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 65a2efda8d..f62867c9b2 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -119,7 +119,7 @@ pub use { scouting::scout, session::{open, Session}, }, - zenoh_util::try_init_log_from_env, + zenoh_util::{init_log_from_env_or, try_init_log_from_env}, }; pub mod prelude; From 0027fa663b6af03ee277dcc6f97813fea2e9d0b8 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 17 Jun 2024 11:34:13 +0200 Subject: [PATCH 466/598] Fix interest protocol when client connected to peer (#1130) * Fix invalid interest aggregate option handling * Fix interest propagation when pub client connecting to peer * Code reorg * Peers wait for DeclareFinal from routers before propagating to clients * Fix InterestFinal propagation * Only send back DeclareFinal if interest is current * Address review comments --- zenoh/src/net/routing/dispatcher/face.rs | 73 ++---- zenoh/src/net/routing/dispatcher/interests.rs | 227 ++++++++++++++++ zenoh/src/net/routing/dispatcher/mod.rs | 1 + zenoh/src/net/routing/dispatcher/pubsub.rs | 81 ------ zenoh/src/net/routing/dispatcher/queries.rs | 82 ------ zenoh/src/net/routing/dispatcher/tables.rs | 8 +- zenoh/src/net/routing/hat/client/interests.rs | 173 ++++++++++++ zenoh/src/net/routing/hat/client/mod.rs | 12 +- zenoh/src/net/routing/hat/client/pubsub.rs | 136 +--------- zenoh/src/net/routing/hat/client/queries.rs | 30 +-- .../routing/hat/linkstate_peer/interests.rs | 86 ++++++ .../src/net/routing/hat/linkstate_peer/mod.rs | 12 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 177 ++++++------- .../net/routing/hat/linkstate_peer/queries.rs | 173 ++++++------ zenoh/src/net/routing/hat/mod.rs | 42 +-- .../src/net/routing/hat/p2p_peer/interests.rs | 224 ++++++++++++++++ zenoh/src/net/routing/hat/p2p_peer/mod.rs | 13 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 246 +++++++++--------- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 192 +++++++------- zenoh/src/net/routing/hat/router/interests.rs | 96 +++++++ zenoh/src/net/routing/hat/router/mod.rs | 12 +- zenoh/src/net/routing/hat/router/pubsub.rs | 214 +++++++-------- zenoh/src/net/routing/hat/router/queries.rs | 221 +++++++--------- 23 files changed, 1444 insertions(+), 1087 deletions(-) create mode 100644 zenoh/src/net/routing/dispatcher/interests.rs create mode 100644 zenoh/src/net/routing/hat/client/interests.rs create mode 100644 zenoh/src/net/routing/hat/linkstate_peer/interests.rs create mode 100644 zenoh/src/net/routing/hat/p2p_peer/interests.rs create mode 100644 zenoh/src/net/routing/hat/router/interests.rs diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 7c92d5f709..88f2abe9c1 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -22,10 +22,8 @@ use tokio_util::sync::CancellationToken; use zenoh_protocol::{ core::{ExprId, WhatAmI, ZenohIdProto}, network::{ - declare::ext, interest::{InterestId, InterestMode, InterestOptions}, - Declare, DeclareBody, DeclareFinal, Mapping, Push, Request, RequestId, Response, - ResponseFinal, + Mapping, Push, Request, RequestId, Response, ResponseFinal, }, zenoh::RequestBody, }; @@ -35,15 +33,17 @@ use zenoh_transport::multicast::TransportMulticast; #[cfg(feature = "stats")] use zenoh_transport::stats::TransportStats; -use super::{super::router::*, resource::*, tables, tables::TablesLock}; +use super::{ + super::router::*, + interests::{declare_final, declare_interest, undeclare_interest, CurrentInterest}, + resource::*, + tables::{self, TablesLock}, +}; use crate::{ api::key_expr::KeyExpr, net::{ primitives::{McastMux, Mux, Primitives}, - routing::{ - interceptor::{InterceptorTrait, InterceptorsChain}, - RoutingContext, - }, + routing::interceptor::{InterceptorTrait, InterceptorsChain}, }, }; @@ -62,6 +62,8 @@ pub struct FaceState { pub(crate) primitives: Arc, pub(crate) local_interests: HashMap, pub(crate) remote_key_interests: HashMap>>, + pub(crate) pending_current_interests: + HashMap, CancellationToken)>, pub(crate) local_mappings: HashMap>, pub(crate) remote_mappings: HashMap>, pub(crate) next_qid: RequestId, @@ -93,6 +95,7 @@ impl FaceState { primitives, local_interests: HashMap::new(), remote_key_interests: HashMap::new(), + pending_current_interests: HashMap::new(), local_mappings: HashMap::new(), remote_mappings: HashMap::new(), next_qid: 0, @@ -212,57 +215,17 @@ impl Primitives for Face { fn send_interest(&self, msg: zenoh_protocol::network::Interest) { let ctrl_lock = zlock!(self.tables.ctrl_lock); if msg.mode != InterestMode::Final { - if msg.options.keyexprs() && msg.mode != InterestMode::Current { - register_expr_interest( - &self.tables, - &mut self.state.clone(), - msg.id, - msg.wire_expr.as_ref(), - ); - } - if msg.options.subscribers() { - declare_sub_interest( - ctrl_lock.as_ref(), - &self.tables, - &mut self.state.clone(), - msg.id, - msg.wire_expr.as_ref(), - msg.mode, - msg.options.aggregate(), - ); - } - if msg.options.queryables() { - declare_qabl_interest( - ctrl_lock.as_ref(), - &self.tables, - &mut self.state.clone(), - msg.id, - msg.wire_expr.as_ref(), - msg.mode, - msg.options.aggregate(), - ); - } - if msg.mode != InterestMode::Future { - self.state.primitives.send_declare(RoutingContext::new_out( - Declare { - interest_id: Some(msg.id), - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareFinal(DeclareFinal), - }, - self.clone(), - )); - } - } else { - unregister_expr_interest(&self.tables, &mut self.state.clone(), msg.id); - undeclare_sub_interest( + declare_interest( ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), msg.id, + msg.wire_expr.as_ref(), + msg.mode, + msg.options, ); - undeclare_qabl_interest( + } else { + undeclare_interest( ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), @@ -336,6 +299,8 @@ impl Primitives for Face { .entry(id) .and_modify(|interest| interest.finalized = true); + declare_final(&mut self.state.clone(), id); + // recompute routes // TODO: disable routes and recompute them in parallel to avoid holding // tables write lock for a long time. diff --git a/zenoh/src/net/routing/dispatcher/interests.rs b/zenoh/src/net/routing/dispatcher/interests.rs new file mode 100644 index 0000000000..ab3764d14f --- /dev/null +++ b/zenoh/src/net/routing/dispatcher/interests.rs @@ -0,0 +1,227 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + sync::{Arc, Weak}, + time::Duration, +}; + +use async_trait::async_trait; +use tokio_util::sync::CancellationToken; +use zenoh_keyexpr::keyexpr; +use zenoh_protocol::{ + core::WireExpr, + network::{ + declare::ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareFinal, + }, +}; +use zenoh_sync::get_mut_unchecked; +use zenoh_util::Timed; + +use super::{ + face::FaceState, + tables::{register_expr_interest, TablesLock}, +}; +use crate::net::routing::{ + hat::HatTrait, + router::{unregister_expr_interest, Resource}, + RoutingContext, +}; + +static INTEREST_TIMEOUT_MS: u64 = 10000; + +pub(crate) struct CurrentInterest { + pub(crate) src_face: Arc, + pub(crate) src_interest_id: InterestId, +} + +pub(crate) fn declare_final(face: &mut Arc, id: InterestId) { + if let Some(interest) = get_mut_unchecked(face) + .pending_current_interests + .remove(&id) + { + finalize_pending_interest(interest); + } +} + +pub(crate) fn finalize_pending_interests(_tables_ref: &TablesLock, face: &mut Arc) { + for (_, interest) in get_mut_unchecked(face).pending_current_interests.drain() { + finalize_pending_interest(interest); + } +} + +pub(crate) fn finalize_pending_interest(interest: (Arc, CancellationToken)) { + let (interest, cancellation_token) = interest; + cancellation_token.cancel(); + if let Some(interest) = Arc::into_inner(interest) { + tracing::debug!( + "Propagate DeclareFinal {}:{}", + interest.src_face, + interest.src_interest_id + ); + interest + .src_face + .primitives + .clone() + .send_declare(RoutingContext::new(Declare { + interest_id: Some(interest.src_interest_id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + })); + } +} + +#[derive(Clone)] +pub(crate) struct CurrentInterestCleanup { + tables: Arc, + face: Weak, + id: InterestId, +} + +impl CurrentInterestCleanup { + pub(crate) fn spawn_interest_clean_up_task( + face: &Arc, + tables_ref: &Arc, + id: u32, + ) { + let mut cleanup = CurrentInterestCleanup { + tables: tables_ref.clone(), + face: Arc::downgrade(face), + id, + }; + if let Some((_, cancellation_token)) = face.pending_current_interests.get(&id) { + let c_cancellation_token = cancellation_token.clone(); + face.task_controller + .spawn_with_rt(zenoh_runtime::ZRuntime::Net, async move { + tokio::select! { + _ = tokio::time::sleep(Duration::from_millis(INTEREST_TIMEOUT_MS)) => { cleanup.run().await } + _ = c_cancellation_token.cancelled() => {} + } + }); + } + } +} + +#[async_trait] +impl Timed for CurrentInterestCleanup { + async fn run(&mut self) { + if let Some(mut face) = self.face.upgrade() { + let ctrl_lock = zlock!(self.tables.ctrl_lock); + if let Some(interest) = get_mut_unchecked(&mut face) + .pending_current_interests + .remove(&self.id) + { + drop(ctrl_lock); + tracing::warn!( + "Didn't receive DeclareFinal {}:{} from {}: Timeout({:#?})!", + interest.0.src_face, + self.id, + face, + Duration::from_millis(INTEREST_TIMEOUT_MS), + ); + finalize_pending_interest(interest); + } + } + } +} + +pub(crate) fn declare_interest( + hat_code: &(dyn HatTrait + Send + Sync), + tables_ref: &Arc, + face: &mut Arc, + id: InterestId, + expr: Option<&WireExpr>, + mode: InterestMode, + options: InterestOptions, +) { + if options.keyexprs() && mode != InterestMode::Current { + register_expr_interest(tables_ref, face, id, expr); + } + + if let Some(expr) = expr { + let rtables = zread!(tables_ref.tables); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + tracing::debug!( + "{} Declare interest {} ({}{})", + face, + id, + prefix.expr(), + expr.suffix + ); + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = if res + .as_ref() + .map(|r| r.context.is_some()) + .unwrap_or(false) + { + drop(rtables); + let wtables = zwrite!(tables_ref.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables_ref.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + hat_code.declare_interest( + &mut wtables, + tables_ref, + face, + id, + Some(&mut res), + mode, + options, + ); + } + None => tracing::error!( + "{} Declare interest {} for unknown scope {}!", + face, + id, + expr.scope + ), + } + } else { + let mut wtables = zwrite!(tables_ref.tables); + hat_code.declare_interest(&mut wtables, tables_ref, face, id, None, mode, options); + } +} + +pub(crate) fn undeclare_interest( + hat_code: &(dyn HatTrait + Send + Sync), + tables: &TablesLock, + face: &mut Arc, + id: InterestId, +) { + tracing::debug!("{} Undeclare interest {}", face, id,); + unregister_expr_interest(tables, face, id); + let mut wtables = zwrite!(tables.tables); + hat_code.undeclare_interest(&mut wtables, face, id); +} diff --git a/zenoh/src/net/routing/dispatcher/mod.rs b/zenoh/src/net/routing/dispatcher/mod.rs index 53c32fb5ff..0f42ae2aee 100644 --- a/zenoh/src/net/routing/dispatcher/mod.rs +++ b/zenoh/src/net/routing/dispatcher/mod.rs @@ -18,6 +18,7 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) pub mod face; +pub mod interests; pub mod pubsub; pub mod queries; pub mod resource; diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 510a001053..5d9ab69a92 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -18,7 +18,6 @@ use zenoh_protocol::{ core::{key_expr::keyexpr, WhatAmI, WireExpr}, network::{ declare::{ext, subscriber::ext::SubscriberInfo, SubscriberId}, - interest::{InterestId, InterestMode}, Push, }, zenoh::PushBody, @@ -34,86 +33,6 @@ use super::{ use crate::key_expr::KeyExpr; use crate::net::routing::hat::HatTrait; -pub(crate) fn declare_sub_interest( - hat_code: &(dyn HatTrait + Send + Sync), - tables: &TablesLock, - face: &mut Arc, - id: InterestId, - expr: Option<&WireExpr>, - mode: InterestMode, - aggregate: bool, -) { - if let Some(expr) = expr { - let rtables = zread!(tables.tables); - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - tracing::debug!( - "{} Declare sub interest {} ({}{})", - face, - id, - prefix.expr(), - expr.suffix - ); - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = if res - .as_ref() - .map(|r| r.context.is_some()) - .unwrap_or(false) - { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - - hat_code.declare_sub_interest( - &mut wtables, - face, - id, - Some(&mut res), - mode, - aggregate, - ); - } - None => tracing::error!( - "{} Declare sub interest {} for unknown scope {}!", - face, - id, - expr.scope - ), - } - } else { - let mut wtables = zwrite!(tables.tables); - hat_code.declare_sub_interest(&mut wtables, face, id, None, mode, aggregate); - } -} - -pub(crate) fn undeclare_sub_interest( - hat_code: &(dyn HatTrait + Send + Sync), - tables: &TablesLock, - face: &mut Arc, - id: InterestId, -) { - tracing::debug!("{} Undeclare sub interest {}", face, id,); - let mut wtables = zwrite!(tables.tables); - hat_code.undeclare_sub_interest(&mut wtables, face, id); -} - pub(crate) fn declare_subscription( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 240ddb3a7d..56e4ce9335 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -25,7 +25,6 @@ use zenoh_protocol::{ core::{key_expr::keyexpr, Encoding, WireExpr}, network::{ declare::{ext, queryable::ext::QueryableInfoType, QueryableId}, - interest::{InterestId, InterestMode}, request::{ ext::{BudgetType, TargetType, TimeoutType}, Request, RequestId, @@ -44,87 +43,6 @@ use super::{ }; use crate::net::routing::{hat::HatTrait, RoutingContext}; -#[allow(clippy::too_many_arguments)] // TODO refactor -pub(crate) fn declare_qabl_interest( - hat_code: &(dyn HatTrait + Send + Sync), - tables: &TablesLock, - face: &mut Arc, - id: InterestId, - expr: Option<&WireExpr>, - mode: InterestMode, - aggregate: bool, -) { - if let Some(expr) = expr { - let rtables = zread!(tables.tables); - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - tracing::debug!( - "{} Declare qabl interest {} ({}{})", - face, - id, - prefix.expr(), - expr.suffix - ); - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = if res - .as_ref() - .map(|r| r.context.is_some()) - .unwrap_or(false) - { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - - hat_code.declare_qabl_interest( - &mut wtables, - face, - id, - Some(&mut res), - mode, - aggregate, - ); - } - None => tracing::error!( - "{} Declare qabl interest {} for unknown scope {}!", - face, - id, - expr.scope - ), - } - } else { - let mut wtables = zwrite!(tables.tables); - hat_code.declare_qabl_interest(&mut wtables, face, id, None, mode, aggregate); - } -} - -pub(crate) fn undeclare_qabl_interest( - hat_code: &(dyn HatTrait + Send + Sync), - tables: &TablesLock, - face: &mut Arc, - id: InterestId, -) { - tracing::debug!("{} Undeclare qabl interest {}", face, id,); - let mut wtables = zwrite!(tables.tables); - hat_code.undeclare_qabl_interest(&mut wtables, face, id); -} - pub(crate) struct Query { src_face: Arc, src_qid: RequestId, diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 9221522c00..73d80d567d 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -30,8 +30,8 @@ use zenoh_sync::get_mut_unchecked; use super::face::FaceState; pub use super::{pubsub::*, queries::*, resource::*}; use crate::net::routing::{ - hat, - hat::HatTrait, + dispatcher::interests::finalize_pending_interests, + hat::{self, HatTrait}, interceptor::{interceptor_factories, InterceptorFactory}, }; @@ -175,7 +175,9 @@ pub fn close_face(tables: &TablesLock, face: &Weak) { tracing::debug!("Close {}", face); face.task_controller.terminate_all(Duration::from_secs(10)); finalize_pending_queries(tables, &mut face); - zlock!(tables.ctrl_lock).close_face(tables, &mut face); + let ctrl_lock = zlock!(tables.ctrl_lock); + finalize_pending_interests(tables, &mut face); + ctrl_lock.close_face(tables, &mut face); } None => tracing::error!("Face already closed!"), } diff --git a/zenoh/src/net/routing/hat/client/interests.rs b/zenoh/src/net/routing/hat/client/interests.rs new file mode 100644 index 0000000000..3757677893 --- /dev/null +++ b/zenoh/src/net/routing/hat/client/interests.rs @@ -0,0 +1,173 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::sync::{atomic::Ordering, Arc}; + +use zenoh_protocol::{ + core::WhatAmI, + network::{ + declare::ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareFinal, Interest, + }, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{face_hat, face_hat_mut, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{ + face::{FaceState, InterestState}, + resource::Resource, + tables::{Tables, TablesLock}, + }, + hat::{CurrentFutureTrait, HatInterestTrait}, + RoutingContext, +}; + +pub(super) fn interests_new_face(tables: &mut Tables, face: &mut Arc) { + if face.whatami != WhatAmI::Client { + for mut src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for (res, options) in face_hat_mut!(&mut src_face).remote_interests.values() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + get_mut_unchecked(face).local_interests.insert( + id, + InterestState { + options: *options, + res: res.as_ref().map(|res| (*res).clone()), + finalized: false, + }, + ); + let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, face)); + face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode: InterestMode::CurrentFuture, + options: *options, + wire_expr, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + res.as_ref().map(|res| res.expr()).unwrap_or_default(), + )); + } + } + } +} + +impl HatInterestTrait for HatCode { + fn declare_interest( + &self, + tables: &mut Tables, + _tables_ref: &Arc, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + options: InterestOptions, + ) { + face_hat_mut!(face) + .remote_interests + .insert(id, (res.as_ref().map(|res| (*res).clone()), options)); + for dst_face in tables + .faces + .values_mut() + .filter(|f| f.whatami != WhatAmI::Client) + { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + get_mut_unchecked(dst_face).local_interests.insert( + id, + InterestState { + options, + res: res.as_ref().map(|res| (*res).clone()), + finalized: mode == InterestMode::Future, + }, + ); + let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, dst_face)); + dst_face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode, + options, + wire_expr, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + res.as_ref().map(|res| res.expr()).unwrap_or_default(), + )); + } + + if mode.current() { + face.primitives.send_declare(RoutingContext::new(Declare { + interest_id: Some(id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + })); + } + } + + fn undeclare_interest(&self, tables: &mut Tables, face: &mut Arc, id: InterestId) { + if let Some(interest) = face_hat_mut!(face).remote_interests.remove(&id) { + if !tables.faces.values().any(|f| { + f.whatami == WhatAmI::Client + && face_hat!(f) + .remote_interests + .values() + .any(|i| *i == interest) + }) { + for dst_face in tables + .faces + .values_mut() + .filter(|f| f.whatami != WhatAmI::Client) + { + for id in dst_face + .local_interests + .keys() + .cloned() + .collect::>() + { + let local_interest = dst_face.local_interests.get(&id).unwrap(); + if local_interest.res == interest.0 && local_interest.options == interest.1 + { + dst_face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode: InterestMode::Final, + options: InterestOptions::empty(), + wire_expr: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + local_interest + .res + .as_ref() + .map(|res| res.expr()) + .unwrap_or_default(), + )); + get_mut_unchecked(dst_face).local_interests.remove(&id); + } + } + } + } + } + } +} diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 29a808bcaf..f41b36e584 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -26,7 +26,7 @@ use std::{ use zenoh_config::WhatAmI; use zenoh_protocol::network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, - interest::InterestId, + interest::{InterestId, InterestOptions}, Oam, }; use zenoh_result::ZResult; @@ -34,6 +34,7 @@ use zenoh_sync::get_mut_unchecked; use zenoh_transport::unicast::TransportUnicast; use self::{ + interests::interests_new_face, pubsub::{pubsub_new_face, undeclare_client_subscription}, queries::{queries_new_face, undeclare_client_queryable}, }; @@ -52,6 +53,7 @@ use crate::net::{ runtime::Runtime, }; +mod interests; mod pubsub; mod queries; @@ -100,6 +102,7 @@ impl HatBaseTrait for HatCode { _tables_ref: &Arc, face: &mut Face, ) -> ZResult<()> { + interests_new_face(tables, &mut face.state); pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); Ok(()) @@ -112,6 +115,7 @@ impl HatBaseTrait for HatCode { face: &mut Face, _transport: &TransportUnicast, ) -> ZResult<()> { + interests_new_face(tables, &mut face.state); pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); Ok(()) @@ -121,7 +125,7 @@ impl HatBaseTrait for HatCode { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); - face_hat_mut!(face).remote_sub_interests.clear(); + face_hat_mut!(face).remote_interests.clear(); face_hat_mut!(face).local_subs.clear(); face_hat_mut!(face).local_qabls.clear(); @@ -287,7 +291,7 @@ impl HatContext { struct HatFace { next_id: AtomicU32, // @TODO: manage rollover and uniqueness - remote_sub_interests: HashMap>>, + remote_interests: HashMap>, InterestOptions)>, local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, @@ -298,7 +302,7 @@ impl HatFace { fn new() -> Self { Self { next_id: AtomicU32::new(0), - remote_sub_interests: HashMap::new(), + remote_interests: HashMap::new(), local_subs: HashMap::new(), remote_subs: HashMap::new(), local_qabls: HashMap::new(), diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index e31c5244a7..7ba6005e5a 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -19,13 +19,9 @@ use std::{ use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI}, - network::{ - declare::{ - common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, SubscriberId, UndeclareSubscriber, - }, - interest::{InterestId, InterestMode, InterestOptions}, - Interest, + network::declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -35,7 +31,7 @@ use crate::{ key_expr::KeyExpr, net::routing::{ dispatcher::{ - face::{FaceState, InterestState}, + face::FaceState, resource::{NodeId, Resource, SessionContext}, tables::{Route, RoutingExpr, Tables}, }, @@ -243,7 +239,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; - for mut src_face in tables + for src_face in tables .faces .values() .cloned() @@ -252,134 +248,12 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { for sub in face_hat!(src_face).remote_subs.values() { propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); } - if face.whatami != WhatAmI::Client { - for res in face_hat_mut!(&mut src_face).remote_sub_interests.values() { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - let options = InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS; - get_mut_unchecked(face).local_interests.insert( - id, - InterestState { - options, - res: res.as_ref().map(|res| (*res).clone()), - finalized: false, - }, - ); - let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, face)); - face.primitives.send_interest(RoutingContext::with_expr( - Interest { - id, - mode: InterestMode::CurrentFuture, - options, - wire_expr, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - }, - res.as_ref().map(|res| res.expr()).unwrap_or_default(), - )); - } - } } // recompute routes update_data_routes_from(tables, &mut tables.root_res.clone()); } impl HatPubSubTrait for HatCode { - fn declare_sub_interest( - &self, - tables: &mut Tables, - face: &mut Arc, - id: InterestId, - res: Option<&mut Arc>, - mode: InterestMode, - _aggregate: bool, - ) { - face_hat_mut!(face) - .remote_sub_interests - .insert(id, res.as_ref().map(|res| (*res).clone())); - for dst_face in tables - .faces - .values_mut() - .filter(|f| f.whatami != WhatAmI::Client) - { - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - let options = InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS; - get_mut_unchecked(dst_face).local_interests.insert( - id, - InterestState { - options, - res: res.as_ref().map(|res| (*res).clone()), - finalized: mode == InterestMode::Future, - }, - ); - let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, dst_face)); - dst_face.primitives.send_interest(RoutingContext::with_expr( - Interest { - id, - mode, - options, - wire_expr, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - }, - res.as_ref().map(|res| res.expr()).unwrap_or_default(), - )); - } - } - - fn undeclare_sub_interest( - &self, - tables: &mut Tables, - face: &mut Arc, - id: InterestId, - ) { - if let Some(interest) = face_hat_mut!(face).remote_sub_interests.remove(&id) { - if !tables.faces.values().any(|f| { - f.whatami == WhatAmI::Client - && face_hat!(f) - .remote_sub_interests - .values() - .any(|i| *i == interest) - }) { - for dst_face in tables - .faces - .values_mut() - .filter(|f| f.whatami != WhatAmI::Client) - { - for id in dst_face - .local_interests - .keys() - .cloned() - .collect::>() - { - let local_interest = dst_face.local_interests.get(&id).unwrap(); - if local_interest.options.subscribers() && (local_interest.res == interest) - { - dst_face.primitives.send_interest(RoutingContext::with_expr( - Interest { - id, - mode: InterestMode::Final, - options: InterestOptions::empty(), - wire_expr: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - }, - local_interest - .res - .as_ref() - .map(|res| res.expr()) - .unwrap_or_default(), - )); - get_mut_unchecked(dst_face).local_interests.remove(&id); - } - } - } - } - } - } - fn declare_subscription( &self, tables: &mut Tables, diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 749c03d5f8..bc1fddbb3b 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -27,12 +27,9 @@ use zenoh_protocol::{ }, WhatAmI, WireExpr, }, - network::{ - declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, - DeclareBody, DeclareQueryable, QueryableId, UndeclareQueryable, - }, - interest::{InterestId, InterestMode}, + network::declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, + DeclareQueryable, QueryableId, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -258,27 +255,6 @@ lazy_static::lazy_static! { } impl HatQueriesTrait for HatCode { - fn declare_qabl_interest( - &self, - _tables: &mut Tables, - _face: &mut Arc, - _id: InterestId, - _res: Option<&mut Arc>, - _mode: InterestMode, - _aggregate: bool, - ) { - // ignore - } - - fn undeclare_qabl_interest( - &self, - _tables: &mut Tables, - _face: &mut Arc, - _id: InterestId, - ) { - // ignore - } - fn declare_queryable( &self, tables: &mut Tables, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/interests.rs b/zenoh/src/net/routing/hat/linkstate_peer/interests.rs new file mode 100644 index 0000000000..413f06f67b --- /dev/null +++ b/zenoh/src/net/routing/hat/linkstate_peer/interests.rs @@ -0,0 +1,86 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::sync::Arc; + +use zenoh_protocol::network::{ + declare::ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareFinal, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{ + face_hat_mut, pubsub::declare_sub_interest, queries::declare_qabl_interest, HatCode, HatFace, +}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + resource::Resource, + tables::{Tables, TablesLock}, + }, + hat::{CurrentFutureTrait, HatInterestTrait}, + RoutingContext, +}; + +impl HatInterestTrait for HatCode { + fn declare_interest( + &self, + tables: &mut Tables, + _tables_ref: &Arc, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + options: InterestOptions, + ) { + if options.subscribers() { + declare_sub_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + ) + } + if options.queryables() { + declare_qabl_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + ) + } + if mode.future() { + face_hat_mut!(face) + .remote_interests + .insert(id, (res.cloned(), options)); + } + if mode.current() { + face.primitives.send_declare(RoutingContext::new(Declare { + interest_id: Some(id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + })); + } + } + + fn undeclare_interest(&self, _tables: &mut Tables, face: &mut Arc, id: InterestId) { + face_hat_mut!(face).remote_interests.remove(&id); + } +} diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 32e4cb30e9..bc10eaee8a 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -30,7 +30,7 @@ use zenoh_protocol::{ core::ZenohIdProto, network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, - interest::InterestId, + interest::{InterestId, InterestOptions}, oam::id::OAM_LINKSTATE, Oam, }, @@ -63,6 +63,7 @@ use crate::net::{ runtime::Runtime, }; +mod interests; mod network; mod pubsub; mod queries; @@ -250,9 +251,8 @@ impl HatBaseTrait for HatCode { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); - face_hat_mut!(face).remote_sub_interests.clear(); + face_hat_mut!(face).remote_interests.clear(); face_hat_mut!(face).local_subs.clear(); - face_hat_mut!(face).remote_qabl_interests.clear(); face_hat_mut!(face).local_qabls.clear(); let face = get_mut_unchecked(face); @@ -481,10 +481,9 @@ impl HatContext { struct HatFace { link_id: usize, next_id: AtomicU32, // @TODO: manage rollover and uniqueness - remote_sub_interests: HashMap>, bool)>, + remote_interests: HashMap>, InterestOptions)>, local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, - remote_qabl_interests: HashMap>>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } @@ -494,10 +493,9 @@ impl HatFace { Self { link_id: 0, next_id: AtomicU32::new(0), - remote_sub_interests: HashMap::new(), + remote_interests: HashMap::new(), local_subs: HashMap::new(), remote_subs: HashMap::new(), - remote_qabl_interests: HashMap::new(), local_qabls: HashMap::new(), remote_qabls: HashMap::new(), } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 8860e04f34..a1ff061602 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -25,7 +25,7 @@ use zenoh_protocol::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareSubscriber, SubscriberId, UndeclareSubscriber, }, - interest::{InterestId, InterestMode}, + interest::{InterestId, InterestMode, InterestOptions}, }, }; use zenoh_sync::get_mut_unchecked; @@ -121,14 +121,16 @@ fn propagate_simple_subscription_to( )); } else { let matching_interests = face_hat!(dst_face) - .remote_sub_interests + .remote_interests .values() - .filter(|si| si.0.as_ref().map(|si| si.matches(res)).unwrap_or(true)) + .filter(|(r, o)| { + o.subscribers() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true) + }) .cloned() - .collect::>, bool)>>(); + .collect::>, InterestOptions)>>(); - for (int_res, aggregate) in matching_interests { - let res = if aggregate { + for (int_res, options) in matching_interests { + let res = if options.aggregate() { int_res.as_ref().unwrap_or(res) } else { res @@ -606,85 +608,53 @@ pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec, - id: InterestId, - res: Option<&mut Arc>, - mode: InterestMode, - aggregate: bool, - ) { - if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = (!mode.future()).then_some(id); - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - }; - if let Some(res) = res.as_ref() { - if aggregate { - if hat!(tables).peer_subs.iter().any(|sub| { - sub.context.is_some() - && sub.matches(res) - && (remote_client_subs(sub, face) || remote_peer_subs(tables, sub)) - }) { - let id = if mode.future() { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face).local_subs.insert((*res).clone(), id); - id - } else { - 0 - }; - let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }), - }, - res.expr(), - )); - } - } else { - for sub in &hat!(tables).peer_subs { - if sub.context.is_some() - && sub.matches(res) - && (remote_client_subs(sub, face) || remote_peer_subs(tables, sub)) - { - let id = if mode.future() { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face).local_subs.insert(sub.clone(), id); - id - } else { - 0 - }; - let wire_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } +pub(super) fn declare_sub_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, +) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = (!mode.future()).then_some(id); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).peer_subs.iter().any(|sub| { + sub.context.is_some() + && sub.matches(res) + && (remote_client_subs(sub, face) || remote_peer_subs(tables, sub)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + res.expr(), + )); } } else { for sub in &hat!(tables).peer_subs { if sub.context.is_some() + && sub.matches(res) && (remote_client_subs(sub, face) || remote_peer_subs(tables, sub)) { let id = if mode.future() { @@ -712,23 +682,40 @@ impl HatPubSubTrait for HatCode { } } } + } else { + for sub in &hat!(tables).peer_subs { + if sub.context.is_some() + && (remote_client_subs(sub, face) || remote_peer_subs(tables, sub)) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); + } + } } - if mode.future() { - face_hat_mut!(face) - .remote_sub_interests - .insert(id, (res.cloned(), aggregate)); - } - } - - fn undeclare_sub_interest( - &self, - _tables: &mut Tables, - face: &mut Arc, - id: InterestId, - ) { - face_hat_mut!(face).remote_sub_interests.remove(&id); } +} +impl HatPubSubTrait for HatCode { fn declare_subscription( &self, tables: &mut Tables, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index ea893c05b1..16ed7cc7ef 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -174,9 +174,9 @@ fn propagate_simple_queryable( && (current.is_none() || current.unwrap().1 != info) && dst_face.whatami == WhatAmI::Client && face_hat!(dst_face) - .remote_qabl_interests + .remote_interests .values() - .any(|si| si.as_ref().map(|si| si.matches(res)).unwrap_or(true)) + .any(|(r, o)| o.queryables() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true)) { let id = current .map(|c| c.0) @@ -680,88 +680,53 @@ lazy_static::lazy_static! { static ref EMPTY_ROUTE: Arc = Arc::new(Vec::new()); } -impl HatQueriesTrait for HatCode { - fn declare_qabl_interest( - &self, - tables: &mut Tables, - face: &mut Arc, - id: InterestId, - res: Option<&mut Arc>, - mode: InterestMode, - aggregate: bool, - ) { - if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = (!mode.future()).then_some(id); - if let Some(res) = res.as_ref() { - if aggregate { - if hat!(tables).peer_qabls.iter().any(|qabl| { - qabl.context.is_some() - && qabl.matches(res) - && (remote_client_qabls(qabl, face) || remote_peer_qabls(tables, qabl)) - }) { - let info = local_qabl_info(tables, res, face); - let id = if mode.future() { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face) - .local_qabls - .insert((*res).clone(), (id, info)); - id - } else { - 0 - }; - let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr, - ext_info: info, - }), - }, - res.expr(), - )); - } - } else { - for qabl in hat!(tables).peer_qabls.iter() { - if qabl.context.is_some() - && qabl.matches(res) - && (remote_client_qabls(qabl, face) || remote_peer_qabls(tables, qabl)) - { - let info = local_qabl_info(tables, qabl, face); - let id = if mode.future() { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face) - .local_qabls - .insert(qabl.clone(), (id, info)); - id - } else { - 0 - }; - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } +pub(super) fn declare_qabl_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, +) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = (!mode.future()).then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).peer_qabls.iter().any(|qabl| { + qabl.context.is_some() + && qabl.matches(res) + && (remote_client_qabls(qabl, face) || remote_peer_qabls(tables, qabl)) + }) { + let info = local_qabl_info(tables, res, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert((*res).clone(), (id, info)); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr, + ext_info: info, + }), + }, + res.expr(), + )); } } else { for qabl in hat!(tables).peer_qabls.iter() { if qabl.context.is_some() + && qabl.matches(res) && (remote_client_qabls(qabl, face) || remote_peer_qabls(tables, qabl)) { let info = local_qabl_info(tables, qabl, face); @@ -792,23 +757,43 @@ impl HatQueriesTrait for HatCode { } } } + } else { + for qabl in hat!(tables).peer_qabls.iter() { + if qabl.context.is_some() + && (remote_client_qabls(qabl, face) || remote_peer_qabls(tables, qabl)) + { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } } - if mode.future() { - face_hat_mut!(face) - .remote_qabl_interests - .insert(id, res.cloned()); - } - } - - fn undeclare_qabl_interest( - &self, - _tables: &mut Tables, - face: &mut Arc, - id: InterestId, - ) { - face_hat_mut!(face).remote_qabl_interests.remove(&id); } +} +impl HatQueriesTrait for HatCode { fn declare_queryable( &self, tables: &mut Tables, diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 6d557f44ff..b30e6e9277 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -28,7 +28,7 @@ use zenoh_protocol::{ queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, QueryableId, SubscriberId, }, - interest::{InterestId, InterestMode}, + interest::{InterestId, InterestMode, InterestOptions}, Oam, }, }; @@ -72,7 +72,10 @@ impl Sources { } } -pub(crate) trait HatTrait: HatBaseTrait + HatPubSubTrait + HatQueriesTrait {} +pub(crate) trait HatTrait: + HatBaseTrait + HatInterestTrait + HatPubSubTrait + HatQueriesTrait +{ +} pub(crate) trait HatBaseTrait { fn init(&self, tables: &mut Tables, runtime: Runtime); @@ -135,23 +138,22 @@ pub(crate) trait HatBaseTrait { fn close_face(&self, tables: &TablesLock, face: &mut Arc); } -pub(crate) trait HatPubSubTrait { - #[allow(clippy::too_many_arguments)] // TODO refactor - fn declare_sub_interest( +pub(crate) trait HatInterestTrait { + #[allow(clippy::too_many_arguments)] + fn declare_interest( &self, tables: &mut Tables, + tables_ref: &Arc, face: &mut Arc, id: InterestId, res: Option<&mut Arc>, mode: InterestMode, - aggregate: bool, - ); - fn undeclare_sub_interest( - &self, - tables: &mut Tables, - face: &mut Arc, - id: InterestId, + options: InterestOptions, ); + fn undeclare_interest(&self, tables: &mut Tables, face: &mut Arc, id: InterestId); +} + +pub(crate) trait HatPubSubTrait { fn declare_subscription( &self, tables: &mut Tables, @@ -191,22 +193,6 @@ pub(crate) trait HatPubSubTrait { } pub(crate) trait HatQueriesTrait { - #[allow(clippy::too_many_arguments)] // TODO refactor - fn declare_qabl_interest( - &self, - tables: &mut Tables, - face: &mut Arc, - id: InterestId, - res: Option<&mut Arc>, - mode: InterestMode, - aggregate: bool, - ); - fn undeclare_qabl_interest( - &self, - tables: &mut Tables, - face: &mut Arc, - id: InterestId, - ); fn declare_queryable( &self, tables: &mut Tables, diff --git a/zenoh/src/net/routing/hat/p2p_peer/interests.rs b/zenoh/src/net/routing/hat/p2p_peer/interests.rs new file mode 100644 index 0000000000..0b058fb4b1 --- /dev/null +++ b/zenoh/src/net/routing/hat/p2p_peer/interests.rs @@ -0,0 +1,224 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::sync::{atomic::Ordering, Arc}; + +use zenoh_protocol::{ + core::WhatAmI, + network::{ + declare::ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareFinal, Interest, + }, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{ + face_hat, face_hat_mut, pubsub::declare_sub_interest, queries::declare_qabl_interest, HatCode, + HatFace, +}; +use crate::net::routing::{ + dispatcher::{ + face::{FaceState, InterestState}, + interests::{CurrentInterest, CurrentInterestCleanup}, + resource::Resource, + tables::{Tables, TablesLock}, + }, + hat::{CurrentFutureTrait, HatInterestTrait}, + RoutingContext, +}; + +pub(super) fn interests_new_face(tables: &mut Tables, face: &mut Arc) { + if face.whatami != WhatAmI::Client { + for mut src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if face.whatami == WhatAmI::Router { + for (res, options) in face_hat_mut!(&mut src_face).remote_interests.values() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + get_mut_unchecked(face).local_interests.insert( + id, + InterestState { + options: *options, + res: res.as_ref().map(|res| (*res).clone()), + finalized: false, + }, + ); + let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, face)); + face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode: InterestMode::CurrentFuture, + options: *options, + wire_expr, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + res.as_ref().map(|res| res.expr()).unwrap_or_default(), + )); + } + } + } + } +} + +impl HatInterestTrait for HatCode { + fn declare_interest( + &self, + tables: &mut Tables, + tables_ref: &Arc, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + options: InterestOptions, + ) { + if options.subscribers() { + declare_sub_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + ) + } + if options.queryables() { + declare_qabl_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + ) + } + face_hat_mut!(face) + .remote_interests + .insert(id, (res.as_ref().map(|res| (*res).clone()), options)); + + let interest = Arc::new(CurrentInterest { + src_face: face.clone(), + src_interest_id: id, + }); + + for dst_face in tables + .faces + .values_mut() + .filter(|f| f.whatami == WhatAmI::Router) + { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + get_mut_unchecked(dst_face).local_interests.insert( + id, + InterestState { + options, + res: res.as_ref().map(|res| (*res).clone()), + finalized: mode == InterestMode::Future, + }, + ); + if mode.current() { + let dst_face_mut = get_mut_unchecked(dst_face); + let cancellation_token = dst_face_mut.task_controller.get_cancellation_token(); + dst_face_mut + .pending_current_interests + .insert(id, (interest.clone(), cancellation_token)); + CurrentInterestCleanup::spawn_interest_clean_up_task(dst_face, tables_ref, id); + } + let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, dst_face)); + dst_face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode, + options, + wire_expr, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + res.as_ref().map(|res| res.expr()).unwrap_or_default(), + )); + } + + if mode.current() { + if let Some(interest) = Arc::into_inner(interest) { + tracing::debug!( + "Propagate DeclareFinal {}:{}", + interest.src_face, + interest.src_interest_id + ); + interest + .src_face + .primitives + .clone() + .send_declare(RoutingContext::new(Declare { + interest_id: Some(interest.src_interest_id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + })); + } + } + } + + fn undeclare_interest(&self, tables: &mut Tables, face: &mut Arc, id: InterestId) { + if let Some(interest) = face_hat_mut!(face).remote_interests.remove(&id) { + if !tables.faces.values().any(|f| { + f.whatami == WhatAmI::Client + && face_hat!(f) + .remote_interests + .values() + .any(|i| *i == interest) + }) { + for dst_face in tables + .faces + .values_mut() + .filter(|f| f.whatami == WhatAmI::Router) + { + for id in dst_face + .local_interests + .keys() + .cloned() + .collect::>() + { + let local_interest = dst_face.local_interests.get(&id).unwrap(); + if local_interest.res == interest.0 && local_interest.options == interest.1 + { + dst_face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode: InterestMode::Final, + options: InterestOptions::empty(), + wire_expr: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + local_interest + .res + .as_ref() + .map(|res| res.expr()) + .unwrap_or_default(), + )); + get_mut_unchecked(dst_face).local_interests.remove(&id); + } + } + } + } + } + } +} diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 30e51e05f1..38ee54e0f6 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -43,6 +43,7 @@ use zenoh_transport::unicast::TransportUnicast; use self::{ gossip::Network, + interests::interests_new_face, pubsub::{pubsub_new_face, undeclare_client_subscription}, queries::{queries_new_face, undeclare_client_queryable}, }; @@ -65,6 +66,7 @@ use crate::net::{ }; mod gossip; +mod interests; mod pubsub; mod queries; @@ -145,6 +147,7 @@ impl HatBaseTrait for HatCode { _tables_ref: &Arc, face: &mut Face, ) -> ZResult<()> { + interests_new_face(tables, &mut face.state); pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); Ok(()) @@ -173,6 +176,7 @@ impl HatBaseTrait for HatCode { ); } + interests_new_face(tables, &mut face.state); pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); @@ -194,9 +198,8 @@ impl HatBaseTrait for HatCode { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); - face_hat_mut!(face).remote_sub_interests.clear(); + face_hat_mut!(face).remote_interests.clear(); face_hat_mut!(face).local_subs.clear(); - face_hat_mut!(face).remote_qabl_interests.clear(); face_hat_mut!(face).local_qabls.clear(); let face = get_mut_unchecked(face); @@ -388,10 +391,9 @@ impl HatContext { struct HatFace { next_id: AtomicU32, // @TODO: manage rollover and uniqueness - remote_sub_interests: HashMap>, bool)>, + remote_interests: HashMap>, InterestOptions)>, local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, - remote_qabl_interests: HashMap>>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } @@ -400,10 +402,9 @@ impl HatFace { fn new() -> Self { Self { next_id: AtomicU32::new(0), - remote_sub_interests: HashMap::new(), + remote_interests: HashMap::new(), local_subs: HashMap::new(), remote_subs: HashMap::new(), - remote_qabl_interests: HashMap::new(), local_qabls: HashMap::new(), remote_qabls: HashMap::new(), } diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index b4be235a29..9cc2f05bf6 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -25,7 +25,6 @@ use zenoh_protocol::{ DeclareSubscriber, SubscriberId, UndeclareSubscriber, }, interest::{InterestId, InterestMode, InterestOptions}, - Interest, }, }; use zenoh_sync::get_mut_unchecked; @@ -35,11 +34,11 @@ use crate::{ key_expr::KeyExpr, net::routing::{ dispatcher::{ - face::{FaceState, InterestState}, + face::FaceState, resource::{NodeId, Resource, SessionContext}, tables::{Route, RoutingExpr, Tables}, }, - hat::{HatPubSubTrait, Sources}, + hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, router::{update_data_routes_from, RoutesIndexes}, RoutingContext, PREFIX_LIVELINESS, }, @@ -78,14 +77,16 @@ fn propagate_simple_subscription_to( )); } else { let matching_interests = face_hat!(dst_face) - .remote_sub_interests + .remote_interests .values() - .filter(|si| si.0.as_ref().map(|si| si.matches(res)).unwrap_or(true)) + .filter(|(r, o)| { + o.subscribers() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true) + }) .cloned() - .collect::>, bool)>>(); + .collect::>, InterestOptions)>>(); - for (int_res, aggregate) in matching_interests { - let res = if aggregate { + for (int_res, options) in matching_interests { + let res = if options.aggregate() { int_res.as_ref().unwrap_or(res) } else { res @@ -342,7 +343,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; - for mut src_face in tables + for src_face in tables .faces .values() .cloned() @@ -357,33 +358,6 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { &mut src_face.clone(), ); } - if face.whatami == WhatAmI::Router { - for (res, _) in face_hat_mut!(&mut src_face).remote_sub_interests.values() { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - let options = InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS; - get_mut_unchecked(face).local_interests.insert( - id, - InterestState { - options, - res: res.as_ref().map(|res| (*res).clone()), - finalized: false, - }, - ); - let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, face)); - face.primitives.send_interest(RoutingContext::with_expr( - Interest { - id, - mode: InterestMode::CurrentFuture, - options, - wire_expr, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - }, - res.as_ref().map(|res| res.expr()).unwrap_or_default(), - )); - } - } } } // recompute routes @@ -392,103 +366,127 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { update_data_routes_from(tables, &mut tables.root_res.clone()); } -impl HatPubSubTrait for HatCode { - fn declare_sub_interest( - &self, - tables: &mut Tables, - face: &mut Arc, - id: InterestId, - res: Option<&mut Arc>, - mode: InterestMode, - aggregate: bool, - ) { - face_hat_mut!(face) - .remote_sub_interests - .insert(id, (res.as_ref().map(|res| (*res).clone()), aggregate)); - for dst_face in tables - .faces - .values_mut() - .filter(|f| f.whatami == WhatAmI::Router) - { - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - let options = InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS; - get_mut_unchecked(dst_face).local_interests.insert( - id, - InterestState { - options, - res: res.as_ref().map(|res| (*res).clone()), - finalized: mode == InterestMode::Future, - }, - ); - let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, dst_face)); - dst_face.primitives.send_interest(RoutingContext::with_expr( - Interest { - id, - mode, - options, - wire_expr, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - }, - res.as_ref().map(|res| res.expr()).unwrap_or_default(), - )); - } - } - - fn undeclare_sub_interest( - &self, - tables: &mut Tables, - face: &mut Arc, - id: InterestId, - ) { - if let Some(interest) = face_hat_mut!(face).remote_sub_interests.remove(&id) { - if !tables.faces.values().any(|f| { - f.whatami == WhatAmI::Client - && face_hat!(f) - .remote_sub_interests - .values() - .any(|i| *i == interest) - }) { - for dst_face in tables +pub(super) fn declare_sub_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, +) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = (!mode.future()).then_some(id); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + if let Some(res) = res.as_ref() { + if aggregate { + if tables.faces.values().any(|src_face| { + src_face.id != face.id + && face_hat!(src_face) + .remote_subs + .values() + .any(|sub| sub.context.is_some() && sub.matches(res)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + res.expr(), + )); + } + } else { + for src_face in tables .faces - .values_mut() - .filter(|f| f.whatami == WhatAmI::Router) + .values() + .cloned() + .collect::>>() { - for id in dst_face - .local_interests - .keys() - .cloned() - .collect::>() - { - let local_interest = dst_face.local_interests.get(&id).unwrap(); - if local_interest.options.subscribers() - && (local_interest.res == interest.0) - { - dst_face.primitives.send_interest(RoutingContext::with_expr( - Interest { - id, - mode: InterestMode::Final, - options: InterestOptions::empty(), - wire_expr: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - }, - local_interest - .res - .as_ref() - .map(|res| res.expr()) - .unwrap_or_default(), - )); - get_mut_unchecked(dst_face).local_interests.remove(&id); + if src_face.id != face.id { + for sub in face_hat!(src_face).remote_subs.values() { + if sub.context.is_some() && sub.matches(res) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); + } } } } } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if src_face.id != face.id { + for sub in face_hat!(src_face).remote_subs.values() { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); + } + } + } } } +} +impl HatPubSubTrait for HatCode { fn declare_subscription( &self, tables: &mut Tables, diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index b909190184..cafe65b8c7 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -93,9 +93,9 @@ fn propagate_simple_queryable_to( && (current.is_none() || current.unwrap().1 != info) && (dst_face.whatami != WhatAmI::Client || face_hat!(dst_face) - .remote_qabl_interests + .remote_interests .values() - .any(|si| si.as_ref().map(|si| si.matches(res)).unwrap_or(true))) + .any(|(r, o)| o.queryables() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true))) && (src_face.is_none() || src_face.as_ref().unwrap().whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) @@ -342,93 +342,50 @@ lazy_static::lazy_static! { static ref EMPTY_ROUTE: Arc = Arc::new(Vec::new()); } -impl HatQueriesTrait for HatCode { - fn declare_qabl_interest( - &self, - tables: &mut Tables, - face: &mut Arc, - id: InterestId, - res: Option<&mut Arc>, - mode: InterestMode, - aggregate: bool, - ) { - if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = (!mode.future()).then_some(id); - if let Some(res) = res.as_ref() { - if aggregate { - if tables.faces.values().any(|src_face| { - src_face.id != face.id - && face_hat!(src_face) - .remote_qabls - .values() - .any(|qabl| qabl.context.is_some() && qabl.matches(res)) - }) { - let info = local_qabl_info(tables, res, face); - let id = if mode.future() { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face) - .local_qabls - .insert((*res).clone(), (id, info)); - id - } else { - 0 - }; - let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr, - ext_info: info, - }), - }, - res.expr(), - )); - } - } else { - for src_face in tables - .faces - .values() - .cloned() - .collect::>>() - { - if src_face.id != face.id { - for qabl in face_hat!(src_face).remote_qabls.values() { - if qabl.context.is_some() && qabl.matches(res) { - let info = local_qabl_info(tables, qabl, face); - let id = if mode.future() { - let id = - face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face) - .local_qabls - .insert(qabl.clone(), (id, info)); - id - } else { - 0 - }; - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } - } +pub(super) fn declare_qabl_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, +) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = (!mode.future()).then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if tables.faces.values().any(|src_face| { + src_face.id != face.id + && face_hat!(src_face) + .remote_qabls + .values() + .any(|qabl| qabl.context.is_some() && qabl.matches(res)) + }) { + let info = local_qabl_info(tables, res, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert((*res).clone(), (id, info)); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr, + ext_info: info, + }), + }, + res.expr(), + )); } } else { for src_face in tables @@ -439,7 +396,7 @@ impl HatQueriesTrait for HatCode { { if src_face.id != face.id { for qabl in face_hat!(src_face).remote_qabls.values() { - if qabl.context.is_some() { + if qabl.context.is_some() && qabl.matches(res) { let info = local_qabl_info(tables, qabl, face); let id = if mode.future() { let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); @@ -470,23 +427,50 @@ impl HatQueriesTrait for HatCode { } } } - } - if mode.future() { - face_hat_mut!(face) - .remote_qabl_interests - .insert(id, res.cloned()); + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if src_face.id != face.id { + for qabl in face_hat!(src_face).remote_qabls.values() { + if qabl.context.is_some() { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } + } } } +} - fn undeclare_qabl_interest( - &self, - _tables: &mut Tables, - face: &mut Arc, - id: InterestId, - ) { - face_hat_mut!(face).remote_qabl_interests.remove(&id); - } - +impl HatQueriesTrait for HatCode { fn declare_queryable( &self, tables: &mut Tables, diff --git a/zenoh/src/net/routing/hat/router/interests.rs b/zenoh/src/net/routing/hat/router/interests.rs new file mode 100644 index 0000000000..a12201d7ad --- /dev/null +++ b/zenoh/src/net/routing/hat/router/interests.rs @@ -0,0 +1,96 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::sync::Arc; + +use zenoh_protocol::{ + core::WhatAmI, + network::{ + declare::ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareFinal, + }, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{ + face_hat_mut, pubsub::declare_sub_interest, queries::declare_qabl_interest, HatCode, HatFace, +}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + resource::Resource, + tables::{Tables, TablesLock}, + }, + hat::{CurrentFutureTrait, HatInterestTrait}, + RoutingContext, +}; + +impl HatInterestTrait for HatCode { + fn declare_interest( + &self, + tables: &mut Tables, + _tables_ref: &Arc, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + mut options: InterestOptions, + ) { + if options.aggregate() && face.whatami == WhatAmI::Peer { + tracing::warn!( + "Received Interest with aggregate=true from peer {}. Not supported!", + face.zid + ); + options -= InterestOptions::AGGREGATE; + } + if options.subscribers() { + declare_sub_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + ) + } + if options.queryables() { + declare_qabl_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + ) + } + if mode.future() { + face_hat_mut!(face) + .remote_interests + .insert(id, (res.cloned(), options)); + } + if mode.current() { + face.primitives.send_declare(RoutingContext::new(Declare { + interest_id: Some(id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + })); + } + } + + fn undeclare_interest(&self, _tables: &mut Tables, face: &mut Arc, id: InterestId) { + face_hat_mut!(face).remote_interests.remove(&id); + } +} diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 46dfe6f058..c3f51eadba 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -31,7 +31,7 @@ use zenoh_protocol::{ core::ZenohIdProto, network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, - interest::InterestId, + interest::{InterestId, InterestOptions}, oam::id::OAM_LINKSTATE, Oam, }, @@ -64,6 +64,7 @@ use crate::net::{ runtime::Runtime, }; +mod interests; mod network; mod pubsub; mod queries; @@ -419,9 +420,8 @@ impl HatBaseTrait for HatCode { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); - face_hat_mut!(face).remote_sub_interests.clear(); + face_hat_mut!(face).remote_interests.clear(); face_hat_mut!(face).local_subs.clear(); - face_hat_mut!(face).remote_qabl_interests.clear(); face_hat_mut!(face).local_qabls.clear(); let face = get_mut_unchecked(face); @@ -782,10 +782,9 @@ impl HatContext { struct HatFace { link_id: usize, next_id: AtomicU32, // @TODO: manage rollover and uniqueness - remote_sub_interests: HashMap>, bool)>, + remote_interests: HashMap>, InterestOptions)>, local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, - remote_qabl_interests: HashMap>>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } @@ -795,10 +794,9 @@ impl HatFace { Self { link_id: 0, next_id: AtomicU32::new(0), - remote_sub_interests: HashMap::new(), + remote_interests: HashMap::new(), local_subs: HashMap::new(), remote_subs: HashMap::new(), - remote_qabl_interests: HashMap::new(), local_qabls: HashMap::new(), remote_qabls: HashMap::new(), } diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index dfb578ecf7..e13aade332 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -25,7 +25,7 @@ use zenoh_protocol::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareSubscriber, SubscriberId, UndeclareSubscriber, }, - interest::{InterestId, InterestMode}, + interest::{InterestId, InterestMode, InterestOptions}, }, }; use zenoh_sync::get_mut_unchecked; @@ -111,14 +111,14 @@ fn propagate_simple_subscription_to( } { let matching_interests = face_hat!(dst_face) - .remote_sub_interests + .remote_interests .values() - .filter(|si| si.0.as_ref().map(|si| si.matches(res)).unwrap_or(true)) + .filter(|(r, o)| o.subscribers() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true)) .cloned() - .collect::>, bool)>>(); + .collect::>, InterestOptions)>>(); - for (int_res, aggregate) in matching_interests { - let res = if aggregate { + for (int_res, options) in matching_interests { + let res = if options.aggregate() { int_res.as_ref().unwrap_or(res) } else { res @@ -860,111 +860,65 @@ pub(super) fn pubsub_linkstate_change( } } -impl HatPubSubTrait for HatCode { - fn declare_sub_interest( - &self, - tables: &mut Tables, - face: &mut Arc, - id: InterestId, - res: Option<&mut Arc>, - mode: InterestMode, - mut aggregate: bool, - ) { - if aggregate && face.whatami == WhatAmI::Peer { - tracing::warn!( - "Received Interest with aggregate=true from peer {}. Not supported!", - face.zid - ); - aggregate = true; - } - if mode.current() { - let interest_id = (!mode.future()).then_some(id); - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - }; - if let Some(res) = res.as_ref() { - if aggregate { - if hat!(tables).router_subs.iter().any(|sub| { - sub.context.is_some() - && sub.matches(res) - && (remote_client_subs(sub, face) - || remote_peer_subs(tables, sub) - || remote_router_subs(tables, sub)) - }) { - let id = if mode.future() { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face).local_subs.insert((*res).clone(), id); - id - } else { - 0 - }; - let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }), - }, - res.expr(), - )); - } - } else { - for sub in &hat!(tables).router_subs { - if sub.context.is_some() - && sub.matches(res) - && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) - || res_hat!(sub).peer_subs.iter().any(|r| *r != tables.zid) - || sub.session_ctxs.values().any(|s| { - s.face.id != face.id - && s.subs.is_some() - && (s.face.whatami == WhatAmI::Client - || face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables) - .failover_brokering(s.face.zid, face.zid))) - })) - { - let id = if mode.future() { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face).local_subs.insert(sub.clone(), id); - id - } else { - 0 - }; - let wire_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } +pub(crate) fn declare_sub_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, +) { + if mode.current() { + let interest_id = (!mode.future()).then_some(id); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).router_subs.iter().any(|sub| { + sub.context.is_some() + && sub.matches(res) + && (remote_client_subs(sub, face) + || remote_peer_subs(tables, sub) + || remote_router_subs(tables, sub)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + res.expr(), + )); } } else { for sub in &hat!(tables).router_subs { if sub.context.is_some() + && sub.matches(res) && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) || res_hat!(sub).peer_subs.iter().any(|r| *r != tables.zid) || sub.session_ctxs.values().any(|s| { - s.subs.is_some() - && (s.face.whatami != WhatAmI::Peer - || face.whatami != WhatAmI::Peer - || hat!(tables).failover_brokering(s.face.zid, face.zid)) + s.face.id != face.id + && s.subs.is_some() + && (s.face.whatami == WhatAmI::Client + || face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) })) { let id = if mode.future() { @@ -992,23 +946,47 @@ impl HatPubSubTrait for HatCode { } } } + } else { + for sub in &hat!(tables).router_subs { + if sub.context.is_some() + && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) + || res_hat!(sub).peer_subs.iter().any(|r| *r != tables.zid) + || sub.session_ctxs.values().any(|s| { + s.subs.is_some() + && (s.face.whatami != WhatAmI::Peer + || face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(s.face.zid, face.zid)) + })) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); + } + } } - if mode.future() { - face_hat_mut!(face) - .remote_sub_interests - .insert(id, (res.cloned(), aggregate)); - } - } - - fn undeclare_sub_interest( - &self, - _tables: &mut Tables, - face: &mut Arc, - id: InterestId, - ) { - face_hat_mut!(face).remote_sub_interests.remove(&id); } +} +impl HatPubSubTrait for HatCode { fn declare_subscription( &self, tables: &mut Tables, diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 217d74955f..9df58a32a5 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -242,9 +242,9 @@ fn propagate_simple_queryable( if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) && (current.is_none() || current.unwrap().1 != info) && face_hat!(dst_face) - .remote_qabl_interests + .remote_interests .values() - .any(|si| si.as_ref().map(|si| si.matches(res)).unwrap_or(true)) + .any(|(r, o)| o.queryables() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true)) && if full_peers_net { dst_face.whatami == WhatAmI::Client } else { @@ -1037,116 +1037,71 @@ lazy_static::lazy_static! { static ref EMPTY_ROUTE: Arc = Arc::new(Vec::new()); } -impl HatQueriesTrait for HatCode { - fn declare_qabl_interest( - &self, - tables: &mut Tables, - face: &mut Arc, - id: InterestId, - res: Option<&mut Arc>, - mode: InterestMode, - mut aggregate: bool, - ) { - if aggregate && face.whatami == WhatAmI::Peer { - tracing::warn!( - "Received Interest with aggregate=true from peer {}. Not supported!", - face.zid - ); - aggregate = true; - } - if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = (!mode.future()).then_some(id); - if let Some(res) = res.as_ref() { - if aggregate { - if hat!(tables).router_qabls.iter().any(|qabl| { - qabl.context.is_some() - && qabl.matches(res) - && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) - || res_hat!(qabl).peer_qabls.keys().any(|r| *r != tables.zid) - || qabl.session_ctxs.values().any(|s| { - s.face.id != face.id - && s.qabl.is_some() - && (s.face.whatami == WhatAmI::Client - || face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables) - .failover_brokering(s.face.zid, face.zid))) - })) - }) { - let info = local_qabl_info(tables, res, face); - let id = if mode.future() { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face) - .local_qabls - .insert((*res).clone(), (id, info)); - id - } else { - 0 - }; - let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr, - ext_info: info, - }), - }, - res.expr(), - )); - } - } else { - for qabl in hat!(tables).router_qabls.iter() { - if qabl.context.is_some() - && qabl.matches(res) - && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) - || res_hat!(qabl).peer_qabls.keys().any(|r| *r != tables.zid) - || qabl.session_ctxs.values().any(|s| { - s.qabl.is_some() - && (s.face.whatami != WhatAmI::Peer - || face.whatami != WhatAmI::Peer - || hat!(tables) - .failover_brokering(s.face.zid, face.zid)) - })) - { - let info = local_qabl_info(tables, qabl, face); - let id = if mode.future() { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face) - .local_qabls - .insert(qabl.clone(), (id, info)); - id - } else { - 0 - }; - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } +pub(crate) fn declare_qabl_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, +) { + if mode.current() { + let interest_id = (!mode.future()).then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).router_qabls.iter().any(|qabl| { + qabl.context.is_some() + && qabl.matches(res) + && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) + || res_hat!(qabl).peer_qabls.keys().any(|r| *r != tables.zid) + || qabl.session_ctxs.values().any(|s| { + s.face.id != face.id + && s.qabl.is_some() + && (s.face.whatami == WhatAmI::Client + || face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) + })) + }) { + let info = local_qabl_info(tables, res, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert((*res).clone(), (id, info)); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr, + ext_info: info, + }), + }, + res.expr(), + )); } } else { for qabl in hat!(tables).router_qabls.iter() { if qabl.context.is_some() - && (remote_client_qabls(qabl, face) - || remote_peer_qabls(tables, qabl) - || remote_router_qabls(tables, qabl)) + && qabl.matches(res) + && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) + || res_hat!(qabl).peer_qabls.keys().any(|r| *r != tables.zid) + || qabl.session_ctxs.values().any(|s| { + s.qabl.is_some() + && (s.face.whatami != WhatAmI::Peer + || face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(s.face.zid, face.zid)) + })) { let info = local_qabl_info(tables, qabl, face); let id = if mode.future() { @@ -1176,23 +1131,45 @@ impl HatQueriesTrait for HatCode { } } } + } else { + for qabl in hat!(tables).router_qabls.iter() { + if qabl.context.is_some() + && (remote_client_qabls(qabl, face) + || remote_peer_qabls(tables, qabl) + || remote_router_qabls(tables, qabl)) + { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } } - if mode.future() { - face_hat_mut!(face) - .remote_qabl_interests - .insert(id, res.cloned()); - } - } - - fn undeclare_qabl_interest( - &self, - _tables: &mut Tables, - face: &mut Arc, - id: InterestId, - ) { - face_hat_mut!(face).remote_qabl_interests.remove(&id); } +} +impl HatQueriesTrait for HatCode { fn declare_queryable( &self, tables: &mut Tables, From 0de438525054f4a5c832ac3e58621f13ba014f9b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 17 Jun 2024 11:59:57 +0200 Subject: [PATCH 467/598] exposed zread and zwrite needed by plugins, zenohId::into_keyexpr made internal --- commons/zenoh-config/src/wrappers.rs | 2 +- zenoh/src/lib.rs | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/commons/zenoh-config/src/wrappers.rs b/commons/zenoh-config/src/wrappers.rs index 73b66824b8..fd6d2ef50b 100644 --- a/commons/zenoh-config/src/wrappers.rs +++ b/commons/zenoh-config/src/wrappers.rs @@ -31,7 +31,7 @@ pub struct ZenohId(ZenohIdProto); impl ZenohId { /// Used by plugins for crating adminspace path - #[zenoh_macros::unstable] + #[zenoh_macros::internal] pub fn into_keyexpr(self) -> OwnedKeyExpr { self.into() } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index f62867c9b2..956cbe4896 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -364,7 +364,9 @@ compile_error!( #[zenoh_macros::internal] pub mod internal { - pub use zenoh_core::{zasync_executor_init, zerror, zlock, ztimeout, ResolveFuture}; + pub use zenoh_core::{ + zasync_executor_init, zerror, zlock, zread, ztimeout, zwrite, ResolveFuture, + }; pub use zenoh_result::bail; pub use zenoh_sync::Condition; pub use zenoh_task::{TaskController, TerminatableTask}; From a4ac6f4461962035db6107b1cb617621c72c71ba Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 17 Jun 2024 13:13:35 +0200 Subject: [PATCH 468/598] zasynclock added --- zenoh/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 956cbe4896..c5cd43c506 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -365,7 +365,7 @@ compile_error!( #[zenoh_macros::internal] pub mod internal { pub use zenoh_core::{ - zasync_executor_init, zerror, zlock, zread, ztimeout, zwrite, ResolveFuture, + zasync_executor_init, zasynclock, zerror, zlock, zread, ztimeout, zwrite, ResolveFuture, }; pub use zenoh_result::bail; pub use zenoh_sync::Condition; From 23e36b3fb61b50d9ee90883479858fe8093579ee Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 17 Jun 2024 14:47:47 +0200 Subject: [PATCH 469/598] Re-export more traits to zenoh::internal::buffers --- zenoh/src/lib.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c5cd43c506..97e33a0bc0 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -376,8 +376,12 @@ pub mod internal { /// reading and writing data. pub mod buffers { pub use zenoh_buffers::{ - buffer::SplitBuffer, - reader::{HasReader, Reader}, + buffer::{Buffer, SplitBuffer}, + reader::{ + AdvanceableReader, BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, + SiphonableReader, + }, + writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, ZBuf, ZBufReader, ZSlice, ZSliceBuffer, }; } From ff066ccf092b5cf741761eb1e26ca6b84fa37a38 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 17 Jun 2024 15:05:15 +0200 Subject: [PATCH 470/598] Enable internal feature for zenoh-config --- commons/zenoh-config/Cargo.toml | 2 +- zenoh/Cargo.toml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/commons/zenoh-config/Cargo.toml b/commons/zenoh-config/Cargo.toml index 6265eb6bc9..49c9d722f1 100644 --- a/commons/zenoh-config/Cargo.toml +++ b/commons/zenoh-config/Cargo.toml @@ -24,7 +24,7 @@ categories = { workspace = true } description = "Internal crate for zenoh." [features] -unstable = [] +internal = [] [dependencies] tracing = { workspace = true } diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index c7da78b8a3..559220e734 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -43,7 +43,7 @@ default = [ "transport_unixsock-stream", "transport_ws", ] -internal = ["zenoh-keyexpr/internal"] +internal = ["zenoh-keyexpr/internal", "zenoh-config/internal"] plugins = [] shared-memory = [ "zenoh-shm", @@ -63,7 +63,7 @@ transport_udp = ["zenoh-transport/transport_udp"] transport_unixsock-stream = ["zenoh-transport/transport_unixsock-stream"] transport_ws = ["zenoh-transport/transport_ws"] transport_vsock = ["zenoh-transport/transport_vsock"] -unstable = ["zenoh-keyexpr/unstable", "zenoh-config/unstable"] +unstable = ["zenoh-keyexpr/unstable"] [dependencies] tokio = { workspace = true, features = ["rt", "macros", "time"] } From cab6c451be5a21c787d132988c8da62c04beb4da Mon Sep 17 00:00:00 2001 From: Charles Schleich Date: Mon, 17 Jun 2024 16:47:47 +0200 Subject: [PATCH 471/598] Expose TimedHandle via internal --- zenoh/src/lib.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 97e33a0bc0..e07b8b6806 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -370,7 +370,9 @@ pub mod internal { pub use zenoh_result::bail; pub use zenoh_sync::Condition; pub use zenoh_task::{TaskController, TerminatableTask}; - pub use zenoh_util::{zenoh_home, LibLoader, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; + pub use zenoh_util::{ + zenoh_home, LibLoader, Timed, TimedEvent, TimedHandle, Timer, ZENOH_HOME_ENV_VAR, + }; /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate /// reading and writing data. From 338af96150928003de8477b5973cc99da03de768 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 17 Jun 2024 18:14:22 +0200 Subject: [PATCH 472/598] Implement serialize/deserialze for Value (#1158) * Implement serialize/deserialze for Value * Fix pre-commit --- zenoh/src/api/bytes.rs | 175 ++++++++++++++++++++++++++++++++++++++- zenoh/src/api/session.rs | 2 +- zenoh/src/api/value.rs | 12 --- 3 files changed, 175 insertions(+), 14 deletions(-) diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 1496492379..ef94d83116 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -26,7 +26,10 @@ use zenoh_buffers::{ ZBuf, ZBufReader, ZBufWriter, ZSlice, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_protocol::{core::Parameters, zenoh::ext::AttachmentType}; +use zenoh_protocol::{ + core::{Encoding as EncodingProto, Parameters}, + zenoh::ext::AttachmentType, +}; use zenoh_result::{ZError, ZResult}; #[cfg(feature = "shared-memory")] use zenoh_shm::{ @@ -37,6 +40,8 @@ use zenoh_shm::{ ShmBufInner, }; +use super::{encoding::Encoding, value::Value}; + /// Trait to encode a type `T` into a [`Value`]. pub trait Serialize { type Output; @@ -1220,6 +1225,174 @@ impl<'s> TryFrom<&'s mut ZBytes> for Parameters<'s> { } } +// Encoding +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: Encoding) -> Self::Output { + let e: EncodingProto = s.into(); + let codec = Zenoh080::new(); + let mut buffer = ZBuf::empty(); + let mut writer = buffer.writer(); + // SAFETY: we are serializing slices on a ZBuf, so serialization will never + // fail unless we run out of memory. In that case, Rust memory allocator + // will panic before the serializer has any chance to fail. + unsafe { + codec.write(&mut writer, &e).unwrap_unchecked(); + } + ZBytes::from(buffer) + } +} + +impl From for ZBytes { + fn from(t: Encoding) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&Encoding> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &Encoding) -> Self::Output { + ZSerde.serialize(s.clone()) + } +} + +impl From<&Encoding> for ZBytes { + fn from(t: &Encoding) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut Encoding> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut Encoding) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut Encoding> for ZBytes { + fn from(t: &mut Encoding) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, Encoding> for ZSerde { + type Input = &'a ZBytes; + type Error = zenoh_buffers::reader::DidntRead; + + fn deserialize(self, v: Self::Input) -> Result { + let codec = Zenoh080::new(); + let mut reader = v.0.reader(); + let e: EncodingProto = codec.read(&mut reader)?; + Ok(e.into()) + } +} + +impl TryFrom for Encoding { + type Error = zenoh_buffers::reader::DidntRead; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for Encoding { + type Error = zenoh_buffers::reader::DidntRead; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for Encoding { + type Error = zenoh_buffers::reader::DidntRead; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// Value +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: Value) -> Self::Output { + ZSerde.serialize((s.payload(), s.encoding())) + } +} + +impl From for ZBytes { + fn from(t: Value) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&Value> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &Value) -> Self::Output { + ZSerde.serialize(s.clone()) + } +} + +impl From<&Value> for ZBytes { + fn from(t: &Value) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut Value> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut Value) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut Value> for ZBytes { + fn from(t: &mut Value) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, Value> for ZSerde { + type Input = &'a ZBytes; + type Error = ZError; + + fn deserialize(self, v: Self::Input) -> Result { + let (payload, encoding) = v + .deserialize::<(ZBytes, Encoding)>() + .map_err(|e| zerror!("{:?}", e))?; + Ok(Value::new(payload, encoding)) + } +} + +impl TryFrom for Value { + type Error = ZError; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for Value { + type Error = ZError; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for Value { + type Error = ZError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // JSON impl Serialize for ZSerde { type Output = Result; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index c1d7290e72..1c4ae2086f 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -1701,7 +1701,7 @@ impl Session { } } (query.callback)(Reply { - result: Err(Value::from("Timeout").into()), + result: Err(Value::new("Timeout", Encoding::ZENOH_STRING).into()), replier_id: Some(zid.into()), }); } diff --git a/zenoh/src/api/value.rs b/zenoh/src/api/value.rs index 4d482da0b5..006767e427 100644 --- a/zenoh/src/api/value.rs +++ b/zenoh/src/api/value.rs @@ -59,18 +59,6 @@ impl Value { } } -impl From for Value -where - T: Into, -{ - fn from(t: T) -> Self { - Value { - payload: t.into(), - encoding: Encoding::default(), - } - } -} - impl From> for Value where T: Into, From a5b4a6582b6d55ae4977cb6aa235a70ed10f348e Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Tue, 18 Jun 2024 09:50:54 +0200 Subject: [PATCH 473/598] Refactor Liveliness implementation (#865) * Router implements interests protocol for clients * Send WireExpr in UndeclareSubscriber/UndeclareQueryable to clients for pico * Fix WireExprExt M flag encoding/decoding * Fix decl_key * Clients send all samples and queries to routers and peers * Avoid self declaration loop on interest * Fix query/replies copy/paste bugs * Peers implement interests protocol for clients * Don't send WireExpr in UndeclareSubscriber/UndeclareQueryable to clients * Add client writer-side filtering (#863) * Add client writer-side filtering * Reimplement liveliness with interests * Fix writer-side filtering before receiving FinalInterest * Fix pubsub interest based routing after router failover * refactor: Add boilerplace for new liveliness router hat * refactor: Handle incoming liveliness token declaration/undeclaration * refactor: Take `TokenId` id in `HatLivelinessTrait` methods * refactor: Implement router `HatLivelinessTrait` * chore: Add copyright headers * refactor: Implement client `HatLivelinessTrait` * refactor: Implement liveliness dispatching logic * refactor: Use `DeclareToken`/`UndeclareToken` in liveliness declaration/undeclaration * refactor: Implement Token Interest protocol for client & router * fix: Use correct token state in Liveliness query replies * chore: Temporarily Allow dead_code and unused_variables in p2p_peer and linkstate_peer hat * style: Rename `Primitives` to `IngressPrimitives` and `EPrimitives` to `EgressPrimitives` Ingress means "entering the router" and Egress means "leaving the router". This change could of course be reverted (through r-a) right before merging the refactor/liveliness-declaration branch. It would've been even better to move Session's IngressPrimitives impl to an EgressPrimitives, but that's not possible since a Session can recieve Query replies from itself. Ideally, Session in/egress should be Router e/ingress but the `EPrimitives` trait breaks this symmetry. Messages leaving the router are not the same as messages leaving the Session. This could be solved by making the `EgressPrimitives` trait generic over the message context, but the necessary refactoring would be a hude undertaking, and I don't have enough time nor familiarity with the codebase for it. * ci: Fix `clippy::suspicious_open_options` lint * chore: Fix naming issue after rebasing against `interests` * style: Rename liveliness to token * refactor: Add p2p_peer token implementation * fix: Set token id to 0 in multicast token declaration * fix: Mark unused arguments in p2p_peer `HatTokenTrait` impl * refactor: Add linkstate_peer token implementation * fix: Declare `Interest::TOKEN` in liveliness subscriber declaration * wip: Implement liveliness subscriber without liveliness prefix * Declare message can be Push/Request/RequestContinuous/Response * Address review comments * Remove F: Future flag from DeclareInterest * cargo fmt --all * Remove unused Interest flags field * Update doc * Remove unneeded interest_id field * Update commons/zenoh-protocol/src/network/declare.rs * Remove unused UndeclareInterest * Implement proper Declare Request/Response id correlation * Add new Interest network message * Update doc * Update codec * Fix stable build * Fix test_acl * Fix writer side filtering * fix: Cleanup token ressources * fix: Remove multicast token propagation * fix: Logic error in `Session::undeclare_subscriber_inner` * fix: Change `log::debug` to `log::trace` in `IngressPrimitives` * revert: Restore names of `Primitives` and `EPrimitives` methods * Add separate functions to compute matching status * Fix unstable imports * Remove useless checks * fix: Correctly set `interest_id` field everywhere * feat: Implement liveliness queries in `Session` * fix: Discard liveliness query on `DeclareFinal` with known interest id * feat: Propagate client liveliness queries, send local replies * feat: Handle liveliness query (final) replies * fix: Apply Clippy lints from Rust 1.77 * fix: `RwLock` deadlock in `Session::undeclare_subscriber_inner` * fix: Remove `zenoh::net::routing::PREFIX_LIVELINESS` The key assumption here is that no key expression can start with '@/liveliness' anymore. * fix: Set `ext_qos` to `QoSType::DEFAULT` when for liveliness queries * fix: Don't immediatly reply with `DeclareFinal` to token interest * fix: Send `DeclareToken` w/ interest id for `InterestMode::Current` * fix: Send `DeclareFinal` in response to `Current` token interest * fix: Set `ext_qos: QoSType::DEFAULT` in routing interest declaration * fix: Share ownership of `TokenQuery` b/w destination faces * fix: Remove unused code * fix: Rustfmt errors with CI config * test: Liveliness clique/brokered and subscriber/query scenarios * fix: Don't declare tokens with interest ids for `CurrentFuture` * fix: Incorrect wire expr for liveliness undeclaration callbacks * fix: Support liveliness queries to linkstate/p2p peers * test: Querying & fetching liveliness subscribers * fix: Incorrect keyexpr in liveliness query replies * fix: Remove `LivelinessQueryState::key_expr` * Fix locking * Only store tokens in response of a Future interest * Rename interest_id_counter * Remove dataroute related code from token dispatcher * Fix client token interest local replies * Remove no more needed compute_local_replies * Move TokenQuery to token module * Fix details * Use proper id in DeclareToken for p2p_peer * Remove comments * Add needed token_new_face functions * Don't register declares sent as response to a current interest * Code reorg * Add missing token_remove_node, token_tree_change and token_linkstate_change * Fix querying subscriber liveliness test * Send one shot Undeclares * Fix clippy warnings * Add misssing token related code in close_face * Properly propagate one shot undeclare token * Fix querying subscriber liveliness test * Fix invalid interest aggregate option handling * Fix interest propagation when pub client connecting to peer * Code reorg * Peers wait for DeclareFinal from routers before propagating to clients * Fix InterestFinal propagation * Only send back DeclareFinal if interest is current * Terminate liveliness interest for subsystems of peers * Fix liveliness test includes * Add liveliness local tests * Address review comments * Fix compilation * Fix stable build * Apply 2024 copyright to new files (interests new files as well) * Avoid clones * Remove unwraps * Fix Value usage after merge * Remove useless checks * Change &Option tp Option<&T> * Revert wrongly commited change in z_liveliness example --------- Co-authored-by: OlivierHecart Co-authored-by: OlivierHecart Co-authored-by: Luca Cominardi --- zenoh-ext/Cargo.toml | 7 +- zenoh-ext/tests/liveliness.rs | 354 ++++++ zenoh/src/api/admin.rs | 20 +- zenoh/src/api/liveliness.rs | 43 +- zenoh/src/api/publisher.rs | 8 +- zenoh/src/api/query.rs | 5 + zenoh/src/api/session.rs | 539 ++++++--- zenoh/src/api/subscriber.rs | 16 +- zenoh/src/net/routing/dispatcher/face.rs | 19 +- zenoh/src/net/routing/dispatcher/interests.rs | 2 +- zenoh/src/net/routing/dispatcher/mod.rs | 1 + zenoh/src/net/routing/dispatcher/queries.rs | 52 +- zenoh/src/net/routing/dispatcher/resource.rs | 2 + zenoh/src/net/routing/dispatcher/token.rs | 146 +++ zenoh/src/net/routing/hat/client/interests.rs | 66 +- zenoh/src/net/routing/hat/client/mod.rs | 47 +- zenoh/src/net/routing/hat/client/pubsub.rs | 35 +- zenoh/src/net/routing/hat/client/queries.rs | 57 +- zenoh/src/net/routing/hat/client/token.rs | 383 ++++++ .../routing/hat/linkstate_peer/interests.rs | 15 +- .../src/net/routing/hat/linkstate_peer/mod.rs | 52 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 34 +- .../net/routing/hat/linkstate_peer/queries.rs | 81 +- .../net/routing/hat/linkstate_peer/token.rs | 717 +++++++++++ zenoh/src/net/routing/hat/mod.rs | 36 +- .../src/net/routing/hat/p2p_peer/interests.rs | 16 +- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 47 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 85 +- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 48 +- zenoh/src/net/routing/hat/p2p_peer/token.rs | 482 ++++++++ zenoh/src/net/routing/hat/router/interests.rs | 15 +- zenoh/src/net/routing/hat/router/mod.rs | 63 +- zenoh/src/net/routing/hat/router/pubsub.rs | 111 +- zenoh/src/net/routing/hat/router/queries.rs | 84 +- zenoh/src/net/routing/hat/router/token.rs | 1051 +++++++++++++++++ zenoh/src/net/routing/mod.rs | 2 - zenoh/src/net/routing/router.rs | 2 +- zenoh/tests/liveliness.rs | 332 +++++- 38 files changed, 4359 insertions(+), 716 deletions(-) create mode 100644 zenoh-ext/tests/liveliness.rs create mode 100644 zenoh/src/net/routing/dispatcher/token.rs create mode 100644 zenoh/src/net/routing/hat/client/token.rs create mode 100644 zenoh/src/net/routing/hat/linkstate_peer/token.rs create mode 100644 zenoh/src/net/routing/hat/p2p_peer/token.rs create mode 100644 zenoh/src/net/routing/hat/router/token.rs diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index 47204a2d66..4f2613cb70 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -29,9 +29,7 @@ maintenance = { status = "actively-developed" } [features] unstable = [] default = [] -shared-memory = [ - "zenoh/shared-memory", -] +shared-memory = ["zenoh/shared-memory"] [dependencies] tokio = { workspace = true, features = [ @@ -53,5 +51,8 @@ serde_json = { workspace = true } zenoh = { workspace = true, features = ["unstable", "internal"], default-features = false } zenoh-macros = { workspace = true } +[dev-dependencies] +zenoh = { workspace = true, features = ["unstable"], default-features = true } + [package.metadata.docs.rs] features = ["unstable"] diff --git a/zenoh-ext/tests/liveliness.rs b/zenoh-ext/tests/liveliness.rs new file mode 100644 index 0000000000..23e901d458 --- /dev/null +++ b/zenoh-ext/tests/liveliness.rs @@ -0,0 +1,354 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use zenoh::{ + config::{self, EndPoint, WhatAmI}, + sample::SampleKind, +}; + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_querying_subscriber_clique() { + use std::time::Duration; + + use zenoh::{internal::ztimeout, prelude::*}; + use zenoh_ext::SubscriberBuilderExt; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const PEER1_ENDPOINT: &str = "udp/localhost:47447"; + + const LIVELINESS_KEYEXPR_1: &str = "test/liveliness/querying-subscriber/brokered/1"; + const LIVELINESS_KEYEXPR_2: &str = "test/liveliness/querying-subscriber/brokered/2"; + const LIVELINESS_KEYEXPR_ALL: &str = "test/liveliness/querying-subscriber/brokered/*"; + + zenoh_util::try_init_log_from_env(); + + let peer1 = { + let mut c = config::default(); + c.listen + .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (1) ZID: {}", s.zid()); + s + }; + + let peer2 = { + let mut c = config::default(); + c.connect + .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (2) ZID: {}", s.zid()); + s + }; + + let token1 = ztimeout!(peer2.liveliness().declare_token(LIVELINESS_KEYEXPR_1)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sub = ztimeout!(peer1 + .liveliness() + .declare_subscriber(LIVELINESS_KEYEXPR_ALL) + .querying()) + .unwrap(); + tokio::time::sleep(SLEEP).await; + + let _token2 = ztimeout!(peer2.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_2); + + drop(token1); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Delete); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_querying_subscriber_brokered() { + use std::time::Duration; + + use zenoh::{internal::ztimeout, prelude::*}; + use zenoh_ext::SubscriberBuilderExt; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const ROUTER_ENDPOINT: &str = "tcp/localhost:47448"; + + const LIVELINESS_KEYEXPR_1: &str = "test/liveliness/querying-subscriber/brokered/1"; + const LIVELINESS_KEYEXPR_2: &str = "test/liveliness/querying-subscriber/brokered/2"; + const LIVELINESS_KEYEXPR_ALL: &str = "test/liveliness/querying-subscriber/brokered/*"; + + zenoh_util::try_init_log_from_env(); + + let _router = { + let mut c = config::default(); + c.listen + .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Router)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Router ZID: {}", s.zid()); + s + }; + + let client1 = { + let mut c = config::default(); + c.connect + .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (1) ZID: {}", s.zid()); + s + }; + + let client2 = { + let mut c = config::default(); + c.connect + .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (2) ZID: {}", s.zid()); + s + }; + + let client3 = { + let mut c = config::default(); + c.connect + .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (3) ZID: {}", s.zid()); + s + }; + + let token1 = ztimeout!(client2.liveliness().declare_token(LIVELINESS_KEYEXPR_1)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sub = ztimeout!(client1 + .liveliness() + .declare_subscriber(LIVELINESS_KEYEXPR_ALL) + .querying()) + .unwrap(); + tokio::time::sleep(SLEEP).await; + + let _token2 = ztimeout!(client3.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_2); + + drop(token1); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Delete); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_fetching_subscriber_clique() { + use std::time::Duration; + + use zenoh::{internal::ztimeout, prelude::*}; + use zenoh_ext::SubscriberBuilderExt; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const PEER1_ENDPOINT: &str = "udp/localhost:47449"; + + const LIVELINESS_KEYEXPR_1: &str = "test/liveliness/querying-subscriber/brokered/1"; + const LIVELINESS_KEYEXPR_2: &str = "test/liveliness/querying-subscriber/brokered/2"; + const LIVELINESS_KEYEXPR_ALL: &str = "test/liveliness/querying-subscriber/brokered/*"; + + zenoh_util::try_init_log_from_env(); + + let peer1 = { + let mut c = config::default(); + c.listen + .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (1) ZID: {}", s.zid()); + s + }; + + let peer2 = { + let mut c = config::default(); + c.connect + .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (2) ZID: {}", s.zid()); + s + }; + + let token1 = ztimeout!(peer2.liveliness().declare_token(LIVELINESS_KEYEXPR_1)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sub = ztimeout!(peer1 + .liveliness() + .declare_subscriber(LIVELINESS_KEYEXPR_ALL) + .fetching(|cb| peer1 + .liveliness() + .get(LIVELINESS_KEYEXPR_ALL) + .callback(cb) + .wait())) + .unwrap(); + tokio::time::sleep(SLEEP).await; + + let _token2 = ztimeout!(peer2.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_2); + + drop(token1); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Delete); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_fetching_subscriber_brokered() { + use std::time::Duration; + + use zenoh::{internal::ztimeout, prelude::*}; + use zenoh_ext::SubscriberBuilderExt; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const ROUTER_ENDPOINT: &str = "tcp/localhost:47450"; + + const LIVELINESS_KEYEXPR_1: &str = "test/liveliness/querying-subscriber/brokered/1"; + const LIVELINESS_KEYEXPR_2: &str = "test/liveliness/querying-subscriber/brokered/2"; + const LIVELINESS_KEYEXPR_ALL: &str = "test/liveliness/querying-subscriber/brokered/*"; + + zenoh_util::try_init_log_from_env(); + + let _router = { + let mut c = config::default(); + c.listen + .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Router)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Router ZID: {}", s.zid()); + s + }; + + let client1 = { + let mut c = config::default(); + c.connect + .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (1) ZID: {}", s.zid()); + s + }; + + let client2 = { + let mut c = config::default(); + c.connect + .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (2) ZID: {}", s.zid()); + s + }; + + let client3 = { + let mut c = config::default(); + c.connect + .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (3) ZID: {}", s.zid()); + s + }; + + let token1 = ztimeout!(client2.liveliness().declare_token(LIVELINESS_KEYEXPR_1)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sub = ztimeout!(client1 + .liveliness() + .declare_subscriber(LIVELINESS_KEYEXPR_ALL) + .fetching(|cb| client1 + .liveliness() + .get(LIVELINESS_KEYEXPR_ALL) + .callback(cb) + .wait())) + .unwrap(); + tokio::time::sleep(SLEEP).await; + + let _token2 = ztimeout!(client3.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_2); + + drop(token1); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Delete); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); +} diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index 4c4d2a869e..9f2e073f75 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -31,6 +31,7 @@ use super::{ queryable::Query, sample::{DataInfo, Locality, SampleKind}, session::Session, + subscriber::SubscriberKind, }; macro_rules! ke_for_sure { @@ -162,11 +163,12 @@ impl TransportMulticastEventHandler for Handler { encoding: Some(Encoding::APPLICATION_JSON), ..Default::default() }; - self.session.handle_data( + self.session.execute_subscriber_callbacks( true, &expr, Some(info), serde_json::to_vec(&peer).unwrap().into(), + SubscriberKind::Subscriber, None, ); Ok(Arc::new(PeerHandler { @@ -207,7 +209,7 @@ impl TransportPeerEventHandler for PeerHandler { encoding: Some(Encoding::APPLICATION_JSON), ..Default::default() }; - self.session.handle_data( + self.session.execute_subscriber_callbacks( true, &self .expr @@ -215,6 +217,7 @@ impl TransportPeerEventHandler for PeerHandler { .with_suffix(&format!("/link/{}", s.finish())), Some(info), serde_json::to_vec(&link).unwrap().into(), + SubscriberKind::Subscriber, None, ); } @@ -226,7 +229,7 @@ impl TransportPeerEventHandler for PeerHandler { kind: SampleKind::Delete, ..Default::default() }; - self.session.handle_data( + self.session.execute_subscriber_callbacks( true, &self .expr @@ -234,6 +237,7 @@ impl TransportPeerEventHandler for PeerHandler { .with_suffix(&format!("/link/{}", s.finish())), Some(info), vec![0u8; 0].into(), + SubscriberKind::Subscriber, None, ); } @@ -245,8 +249,14 @@ impl TransportPeerEventHandler for PeerHandler { kind: SampleKind::Delete, ..Default::default() }; - self.session - .handle_data(true, &self.expr, Some(info), vec![0u8; 0].into(), None); + self.session.execute_subscriber_callbacks( + true, + &self.expr, + Some(info), + vec![0u8; 0].into(), + SubscriberKind::Subscriber, + None, + ); } fn as_any(&self) -> &dyn std::any::Any { diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 91f5d4b227..6e8cc30483 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -21,30 +21,17 @@ use std::{ use zenoh_config::unwrap_or_default; use zenoh_core::{Resolvable, Resolve, Result as ZResult, Wait}; -use zenoh_keyexpr::keyexpr; -use zenoh_protocol::{ - core::Parameters, - network::{declare::subscriber::ext::SubscriberInfo, request}, -}; use super::{ handlers::{locked, DefaultHandler, IntoHandler}, key_expr::KeyExpr, - query::{QueryConsolidation, QueryTarget, Reply}, - sample::{Locality, Sample, SourceInfo}, + query::Reply, + sample::{Locality, Sample}, session::{Session, SessionRef, Undeclarable}, subscriber::{Subscriber, SubscriberInner}, Id, }; -#[zenoh_macros::unstable] -pub(crate) static PREFIX_LIVELINESS: &str = crate::net::routing::PREFIX_LIVELINESS; - -#[zenoh_macros::unstable] -lazy_static::lazy_static!( - pub(crate) static ref KE_PREFIX_LIVELINESS: &'static keyexpr = unsafe { keyexpr::from_str_unchecked(PREFIX_LIVELINESS) }; -); - /// A structure with functions to declare a /// [`LivelinessToken`](LivelinessToken), query /// existing [`LivelinessTokens`](LivelinessToken) @@ -552,21 +539,18 @@ where { #[zenoh_macros::unstable] fn wait(self) -> ::To { + use super::subscriber::SubscriberKind; + let key_expr = self.key_expr?; let session = self.session; let (callback, handler) = self.handler.into_handler(); session - .declare_subscriber_inner( - &key_expr, - &Some(KeyExpr::from(*KE_PREFIX_LIVELINESS)), - Locality::default(), - callback, - &SubscriberInfo::DEFAULT, - ) + .declare_liveliness_subscriber_inner(&key_expr, None, Locality::default(), callback) .map(|sub_state| Subscriber { subscriber: SubscriberInner { session, state: sub_state, + kind: SubscriberKind::LivelinessSubscriber, undeclare_on_drop: true, }, handler, @@ -755,20 +739,7 @@ where fn wait(self) -> ::To { let (callback, receiver) = self.handler.into_handler(); self.session - .query( - &self.key_expr?, - &Parameters::empty(), - &Some(KeyExpr::from(*KE_PREFIX_LIVELINESS)), - QueryTarget::DEFAULT, - QueryConsolidation::DEFAULT, - request::ext::QoSType::REQUEST.into(), - Locality::default(), - self.timeout, - None, - None, - SourceInfo::empty(), - callback, - ) + .liveliness_query(&self.key_expr?, self.timeout, callback) .map(|_| receiver) } } diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index 8a4330676f..96cedc960f 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -53,7 +53,10 @@ use super::{ sample::{DataInfo, Locality, QoS, Sample, SampleFields, SampleKind}, session::{SessionRef, Undeclarable}, }; -use crate::{api::Id, net::primitives::Primitives}; +use crate::{ + api::{subscriber::SubscriberKind, Id}, + net::primitives::Primitives, +}; pub(crate) struct PublisherState { pub(crate) id: Id, @@ -621,11 +624,12 @@ impl Publisher<'_> { )), }; - self.session.handle_data( + self.session.execute_subscriber_callbacks( true, &self.key_expr.to_wire(&self.session), Some(data_info), payload.into(), + SubscriberKind::Subscriber, attachment, ); } diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 48e3674c85..e46d0a75ba 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -149,6 +149,11 @@ impl From for Result { } } +#[cfg(feature = "unstable")] +pub(crate) struct LivelinessQueryState { + pub(crate) callback: Callback<'static, Reply>, +} + pub(crate) struct QueryState { pub(crate) nb_final: usize, pub(crate) key_expr: KeyExpr<'static>, diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 1c4ae2086f..14e0899a55 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -31,7 +31,11 @@ use zenoh_collections::SingleOrVec; use zenoh_config::{unwrap_or_default, wrappers::ZenohId, Config, Notifier}; use zenoh_core::{zconfigurable, zread, Resolvable, Resolve, ResolveClosure, ResolveFuture, Wait}; #[cfg(feature = "unstable")] -use zenoh_protocol::network::{declare::SubscriberId, ext}; +use zenoh_protocol::network::{ + declare::{DeclareToken, SubscriberId, TokenId, UndeclareToken}, + ext, + interest::InterestId, +}; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, @@ -46,8 +50,9 @@ use zenoh_protocol::{ DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, interest::{InterestMode, InterestOptions}, - request::{self, ext::TargetType, Request}, - AtomicRequestId, Interest, Mapping, Push, RequestId, Response, ResponseFinal, + request::{self, ext::TargetType}, + AtomicRequestId, DeclareFinal, Interest, Mapping, Push, Request, RequestId, Response, + ResponseFinal, }, zenoh::{ query::{self, ext::QueryBodyType, Consolidation}, @@ -78,7 +83,7 @@ use super::{ queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, selector::Selector, - subscriber::{SubscriberBuilder, SubscriberState}, + subscriber::{SubscriberBuilder, SubscriberKind, SubscriberState}, value::Value, Id, }; @@ -87,6 +92,7 @@ use super::{ liveliness::{Liveliness, LivelinessTokenState}, publisher::Publisher, publisher::{MatchingListenerState, MatchingStatus}, + query::LivelinessQueryState, sample::SourceInfo, }; #[cfg(feature = "unstable")] @@ -108,18 +114,26 @@ pub(crate) struct SessionState { pub(crate) primitives: Option>, // @TODO replace with MaybeUninit ?? pub(crate) expr_id_counter: AtomicExprId, // @TODO: manage rollover and uniqueness pub(crate) qid_counter: AtomicRequestId, + #[cfg(feature = "unstable")] + pub(crate) liveliness_qid_counter: AtomicRequestId, pub(crate) local_resources: HashMap, pub(crate) remote_resources: HashMap, #[cfg(feature = "unstable")] pub(crate) remote_subscribers: HashMap>, pub(crate) publishers: HashMap, + #[cfg(feature = "unstable")] + pub(crate) remote_tokens: HashMap>, + //pub(crate) publications: Vec, pub(crate) subscribers: HashMap>, + pub(crate) liveliness_subscribers: HashMap>, pub(crate) queryables: HashMap>, #[cfg(feature = "unstable")] pub(crate) tokens: HashMap>, #[cfg(feature = "unstable")] pub(crate) matching_listeners: HashMap>, pub(crate) queries: HashMap, + #[cfg(feature = "unstable")] + pub(crate) liveliness_queries: HashMap, pub(crate) aggregated_subscribers: Vec, pub(crate) aggregated_publishers: Vec, } @@ -133,18 +147,26 @@ impl SessionState { primitives: None, expr_id_counter: AtomicExprId::new(1), // Note: start at 1 because 0 is reserved for NO_RESOURCE qid_counter: AtomicRequestId::new(0), + #[cfg(feature = "unstable")] + liveliness_qid_counter: AtomicRequestId::new(0), local_resources: HashMap::new(), remote_resources: HashMap::new(), #[cfg(feature = "unstable")] remote_subscribers: HashMap::new(), publishers: HashMap::new(), + #[cfg(feature = "unstable")] + remote_tokens: HashMap::new(), + //publications: Vec::new(), subscribers: HashMap::new(), + liveliness_subscribers: HashMap::new(), queryables: HashMap::new(), #[cfg(feature = "unstable")] tokens: HashMap::new(), #[cfg(feature = "unstable")] matching_listeners: HashMap::new(), queries: HashMap::new(), + #[cfg(feature = "unstable")] + liveliness_queries: HashMap::new(), aggregated_subscribers, aggregated_publishers, } @@ -244,14 +266,32 @@ impl SessionState { self.remote_key_to_expr(key_expr) } } + + pub(crate) fn subscribers(&self, kind: SubscriberKind) -> &HashMap> { + match kind { + SubscriberKind::Subscriber => &self.subscribers, + SubscriberKind::LivelinessSubscriber => &self.liveliness_subscribers, + } + } + + pub(crate) fn subscribers_mut( + &mut self, + kind: SubscriberKind, + ) -> &mut HashMap> { + match kind { + SubscriberKind::Subscriber => &mut self.subscribers, + SubscriberKind::LivelinessSubscriber => &mut self.liveliness_subscribers, + } + } } impl fmt::Debug for SessionState { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, - "SessionState{{ subscribers: {} }}", - self.subscribers.len() + "SessionState{{ subscribers: {}, liveliness_subscribers: {} }}", + self.subscribers.len(), + self.liveliness_subscribers.len() ) } } @@ -259,7 +299,36 @@ impl fmt::Debug for SessionState { pub(crate) struct ResourceNode { pub(crate) key_expr: OwnedKeyExpr, pub(crate) subscribers: Vec>, + pub(crate) liveliness_subscribers: Vec>, } + +impl ResourceNode { + pub(crate) fn new(key_expr: OwnedKeyExpr) -> Self { + Self { + key_expr, + subscribers: Vec::new(), + liveliness_subscribers: Vec::new(), + } + } + + pub(crate) fn subscribers(&self, kind: SubscriberKind) -> &Vec> { + match kind { + SubscriberKind::Subscriber => &self.subscribers, + SubscriberKind::LivelinessSubscriber => &self.liveliness_subscribers, + } + } + + pub(crate) fn subscribers_mut( + &mut self, + kind: SubscriberKind, + ) -> &mut Vec> { + match kind { + SubscriberKind::Subscriber => &mut self.subscribers, + SubscriberKind::LivelinessSubscriber => &mut self.liveliness_subscribers, + } + } +} + pub(crate) enum Resource { Prefix { prefix: Box }, Node(ResourceNode), @@ -274,10 +343,7 @@ impl Resource { } } pub(crate) fn for_keyexpr(key_expr: OwnedKeyExpr) -> Self { - Self::Node(ResourceNode { - key_expr, - subscribers: Vec::new(), - }) + Self::Node(ResourceNode::new(key_expr)) } pub(crate) fn name(&self) -> &str { match self { @@ -881,15 +947,15 @@ impl Session { None => { let expr_id = state.expr_id_counter.fetch_add(1, Ordering::SeqCst); let mut res = Resource::new(Box::from(prefix)); - if let Resource::Node(ResourceNode { - key_expr, - subscribers, - .. - }) = &mut res - { - for sub in state.subscribers.values() { - if key_expr.intersects(&sub.key_expr) { - subscribers.push(sub.clone()); + if let Resource::Node(res_node) = &mut res { + for kind in [ + SubscriberKind::Subscriber, + SubscriberKind::LivelinessSubscriber, + ] { + for sub in state.subscribers(kind).values() { + if res_node.key_expr.intersects(&sub.key_expr) { + res_node.subscribers_mut(kind).push(sub.clone()); + } } } } @@ -1014,7 +1080,7 @@ impl Session { pub(crate) fn declare_subscriber_inner( &self, key_expr: &KeyExpr, - scope: &Option, + scope: Option<&KeyExpr>, origin: Locality, callback: Callback<'static, Sample>, info: &SubscriberInfo, @@ -1031,61 +1097,63 @@ impl Session { id, remote_id: id, key_expr: key_expr.clone().into_owned(), - scope: scope.clone().map(|e| e.into_owned()), + scope: scope.map(|e| e.clone().into_owned()), origin, callback, }; - #[cfg(not(feature = "unstable"))] let declared_sub = origin != Locality::SessionLocal; - #[cfg(feature = "unstable")] - let declared_sub = origin != Locality::SessionLocal - && !key_expr - .as_str() - .starts_with(crate::api::liveliness::PREFIX_LIVELINESS); - - let declared_sub = - declared_sub - .then(|| { - match state - .aggregated_subscribers - .iter() - .find(|s| s.includes(&key_expr)) - { - Some(join_sub) => { - if let Some(joined_sub) = state.subscribers.values().find(|s| { + + let declared_sub = declared_sub + .then(|| { + match state + .aggregated_subscribers + .iter() + .find(|s| s.includes(&key_expr)) + { + Some(join_sub) => { + if let Some(joined_sub) = state + .subscribers(SubscriberKind::Subscriber) + .values() + .find(|s| { s.origin != Locality::SessionLocal && join_sub.includes(&s.key_expr) - }) { - sub_state.remote_id = joined_sub.remote_id; - None - } else { - Some(join_sub.clone().into()) - } + }) + { + sub_state.remote_id = joined_sub.remote_id; + None + } else { + Some(join_sub.clone().into()) } - None => { - if let Some(twin_sub) = state.subscribers.values().find(|s| { - s.origin != Locality::SessionLocal && s.key_expr == key_expr - }) { - sub_state.remote_id = twin_sub.remote_id; - None - } else { - Some(key_expr.clone()) - } + } + None => { + if let Some(twin_sub) = state + .subscribers(SubscriberKind::Subscriber) + .values() + .find(|s| s.origin != Locality::SessionLocal && s.key_expr == key_expr) + { + sub_state.remote_id = twin_sub.remote_id; + None + } else { + Some(key_expr.clone()) } } - }) - .flatten(); + } + }) + .flatten(); let sub_state = Arc::new(sub_state); - state.subscribers.insert(sub_state.id, sub_state.clone()); + state + .subscribers_mut(SubscriberKind::Subscriber) + .insert(sub_state.id, sub_state.clone()); for res in state .local_resources .values_mut() .filter_map(Resource::as_node_mut) { if key_expr.intersects(&res.key_expr) { - res.subscribers.push(sub_state.clone()); + res.subscribers_mut(SubscriberKind::Subscriber) + .push(sub_state.clone()); } } for res in state @@ -1094,7 +1162,8 @@ impl Session { .filter_map(Resource::as_node_mut) { if key_expr.intersects(&res.key_expr) { - res.subscribers.push(sub_state.clone()); + res.subscribers_mut(SubscriberKind::Subscriber) + .push(sub_state.clone()); } } @@ -1141,61 +1210,39 @@ impl Session { let state = zread!(self.state); self.update_status_up(&state, &key_expr) } - } else { - #[cfg(feature = "unstable")] - if key_expr - .as_str() - .starts_with(crate::api::liveliness::PREFIX_LIVELINESS) - { - let primitives = state.primitives.as_ref().unwrap().clone(); - drop(state); - - primitives.send_interest(Interest { - id, - mode: InterestMode::CurrentFuture, - options: InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS, - wire_expr: Some(key_expr.to_wire(self).to_owned()), - ext_qos: network::ext::QoSType::DEFAULT, - ext_tstamp: None, - ext_nodeid: network::ext::NodeIdType::DEFAULT, - }); - } } Ok(sub_state) } - pub(crate) fn undeclare_subscriber_inner(&self, sid: Id) -> ZResult<()> { + pub(crate) fn undeclare_subscriber_inner(&self, sid: Id, kind: SubscriberKind) -> ZResult<()> { let mut state = zwrite!(self.state); - if let Some(sub_state) = state.subscribers.remove(&sid) { + if let Some(sub_state) = state + .subscribers_mut(SubscriberKind::Subscriber) + .remove(&sid) + { trace!("undeclare_subscriber({:?})", sub_state); for res in state .local_resources .values_mut() .filter_map(Resource::as_node_mut) { - res.subscribers.retain(|sub| sub.id != sub_state.id); + res.subscribers_mut(kind) + .retain(|sub| sub.id != sub_state.id); } for res in state .remote_resources .values_mut() .filter_map(Resource::as_node_mut) { - res.subscribers.retain(|sub| sub.id != sub_state.id); + res.subscribers_mut(kind) + .retain(|sub| sub.id != sub_state.id); } - #[cfg(not(feature = "unstable"))] - let send_forget = sub_state.origin != Locality::SessionLocal; - #[cfg(feature = "unstable")] - let send_forget = sub_state.origin != Locality::SessionLocal - && !sub_state - .key_expr - .as_str() - .starts_with(crate::api::liveliness::PREFIX_LIVELINESS); - if send_forget { + if sub_state.origin != Locality::SessionLocal && kind == SubscriberKind::Subscriber { // Note: there might be several Subscribers on the same KeyExpr. // Before calling forget_subscriber(key_expr), check if this was the last one. - if !state.subscribers.values().any(|s| { + if !state.subscribers(kind).values().any(|s| { s.origin != Locality::SessionLocal && s.remote_id == sub_state.remote_id }) { let primitives = state.primitives.as_ref().unwrap().clone(); @@ -1220,11 +1267,7 @@ impl Session { } } else { #[cfg(feature = "unstable")] - if sub_state - .key_expr - .as_str() - .starts_with(crate::api::liveliness::PREFIX_LIVELINESS) - { + if kind == SubscriberKind::LivelinessSubscriber { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); @@ -1239,6 +1282,7 @@ impl Session { }); } } + Ok(()) } else { Err(zerror!("Unable to find subscriber").into()) @@ -1321,7 +1365,6 @@ impl Session { let mut state = zwrite!(self.state); tracing::trace!("declare_liveliness({:?})", key_expr); let id = self.runtime.next_id(); - let key_expr = KeyExpr::from(*crate::api::liveliness::KE_PREFIX_LIVELINESS / key_expr); let tok_state = Arc::new(LivelinessTokenState { id, key_expr: key_expr.clone().into_owned(), @@ -1335,15 +1378,83 @@ impl Session { ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr: key_expr.to_wire(self).to_owned(), - ext_info: SubscriberInfo::DEFAULT, }), }); Ok(tok_state) } + #[cfg(feature = "unstable")] + pub(crate) fn declare_liveliness_subscriber_inner( + &self, + key_expr: &KeyExpr, + scope: Option<&KeyExpr>, + origin: Locality, + callback: Callback<'static, Sample>, + ) -> ZResult> { + let mut state = zwrite!(self.state); + trace!("declare_liveliness_subscriber({:?})", key_expr); + let id = self.runtime.next_id(); + let key_expr = match scope { + Some(scope) => scope / key_expr, + None => key_expr.clone(), + }; + + let sub_state = SubscriberState { + id, + remote_id: id, + key_expr: key_expr.clone().into_owned(), + scope: scope.map(|e| e.clone().into_owned()), + origin, + callback, + }; + + let sub_state = Arc::new(sub_state); + + state + .subscribers_mut(SubscriberKind::LivelinessSubscriber) + .insert(sub_state.id, sub_state.clone()); + + for res in state + .local_resources + .values_mut() + .filter_map(Resource::as_node_mut) + { + if key_expr.intersects(&res.key_expr) { + res.subscribers_mut(SubscriberKind::LivelinessSubscriber) + .push(sub_state.clone()); + } + } + + for res in state + .remote_resources + .values_mut() + .filter_map(Resource::as_node_mut) + { + if key_expr.intersects(&res.key_expr) { + res.subscribers_mut(SubscriberKind::LivelinessSubscriber) + .push(sub_state.clone()); + } + } + + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + + primitives.send_interest(Interest { + id, + mode: InterestMode::Future, + options: InterestOptions::KEYEXPRS + InterestOptions::TOKENS, + wire_expr: Some(key_expr.to_wire(self).to_owned()), + ext_qos: declare::ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + }); + + Ok(sub_state) + } + #[zenoh_macros::unstable] pub(crate) fn undeclare_liveliness(&self, tid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); @@ -1360,7 +1471,7 @@ impl Session { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + body: DeclareBody::UndeclareToken(UndeclareToken { id: tok_state.id, ext_wire_expr: WireExprType::null(), }), @@ -1530,12 +1641,13 @@ impl Session { } } - pub(crate) fn handle_data( + pub(crate) fn execute_subscriber_callbacks( &self, local: bool, key_expr: &WireExpr, info: Option, payload: ZBuf, + kind: SubscriberKind, attachment: Option, ) { let mut callbacks = SingleOrVec::default(); @@ -1543,7 +1655,7 @@ impl Session { if key_expr.suffix.is_empty() { match state.get_res(&key_expr.scope, key_expr.mapping, local) { Some(Resource::Node(res)) => { - for sub in &res.subscribers { + for sub in res.subscribers(kind) { if sub.origin == Locality::Any || (local == (sub.origin == Locality::SessionLocal)) { @@ -1593,7 +1705,7 @@ impl Session { } else { match state.wireexpr_to_keyexpr(key_expr, local) { Ok(key_expr) => { - for sub in state.subscribers.values() { + for sub in state.subscribers(kind).values() { if (sub.origin == Locality::Any || (local == (sub.origin == Locality::SessionLocal))) && key_expr.intersects(&sub.key_expr) @@ -1783,6 +1895,61 @@ impl Session { Ok(()) } + #[cfg(feature = "unstable")] + pub(crate) fn liveliness_query( + &self, + key_expr: &KeyExpr<'_>, + timeout: Duration, + callback: Callback<'static, Reply>, + ) -> ZResult<()> { + tracing::trace!("liveliness.get({}, {:?})", key_expr, timeout); + let mut state = zwrite!(self.state); + let id = state.liveliness_qid_counter.fetch_add(1, Ordering::SeqCst); + let token = self.task_controller.get_cancellation_token(); + self.task_controller + .spawn_with_rt(zenoh_runtime::ZRuntime::Net, { + let state = self.state.clone(); + let zid = self.runtime.zid(); + async move { + tokio::select! { + _ = tokio::time::sleep(timeout) => { + let mut state = zwrite!(state); + if let Some(query) = state.liveliness_queries.remove(&id) { + std::mem::drop(state); + tracing::debug!("Timeout on liveliness query {}! Send error and close.", id); + (query.callback)(Reply { + result: Err(Value::new("Timeout", Encoding::ZENOH_STRING).into()), + replier_id: Some(zid.into()), + }); + } + } + _ = token.cancelled() => {} + } + } + }); + + tracing::trace!("Register liveliness query {}", id); + let wexpr = key_expr.to_wire(self).to_owned(); + state + .liveliness_queries + .insert(id, LivelinessQueryState { callback }); + + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + + primitives.send_interest(Interest { + id, + mode: InterestMode::Current, + options: InterestOptions::KEYEXPRS + InterestOptions::TOKENS, + wire_expr: Some(wexpr.clone()), + ext_qos: request::ext::QoSType::DEFAULT, + ext_tstamp: None, + ext_nodeid: request::ext::NodeIdType::DEFAULT, + }); + + Ok(()) + } + #[allow(clippy::too_many_arguments)] pub(crate) fn handle_query( &self, @@ -2025,18 +2192,21 @@ impl Primitives for Session { let state = &mut zwrite!(self.state); match state.remote_key_to_expr(&m.wire_expr) { Ok(key_expr) => { - let mut subs = Vec::new(); - for sub in state.subscribers.values() { - if key_expr.intersects(&sub.key_expr) { - subs.push(sub.clone()); + let mut res_node = ResourceNode::new(key_expr.clone().into()); + for kind in [ + SubscriberKind::Subscriber, + SubscriberKind::LivelinessSubscriber, + ] { + for sub in state.subscribers(kind).values() { + if key_expr.intersects(&sub.key_expr) { + res_node.subscribers_mut(kind).push(sub.clone()); + } } } - let res = Resource::Node(ResourceNode { - key_expr: key_expr.into(), - subscribers: subs, - }); - state.remote_resources.insert(m.id, res); + state + .remote_resources + .insert(m.id, Resource::Node(res_node)); } Err(e) => error!( "Received Resource for invalid wire_expr `{}`: {}", @@ -2059,14 +2229,6 @@ impl Primitives for Session { Ok(expr) => { state.remote_subscribers.insert(m.id, expr.clone()); self.update_status_up(&state, &expr); - - if expr - .as_str() - .starts_with(crate::api::liveliness::PREFIX_LIVELINESS) - { - drop(state); - self.handle_data(false, &m.wire_expr, None, ZBuf::default(), None); - } } Err(err) => { tracing::error!( @@ -2084,24 +2246,6 @@ impl Primitives for Session { let mut state = zwrite!(self.state); if let Some(expr) = state.remote_subscribers.remove(&m.id) { self.update_status_down(&state, &expr); - - if expr - .as_str() - .starts_with(crate::api::liveliness::PREFIX_LIVELINESS) - { - drop(state); - let data_info = DataInfo { - kind: SampleKind::Delete, - ..Default::default() - }; - self.handle_data( - false, - &expr.to_wire(self), - Some(data_info), - ZBuf::default(), - None, - ); - } } else { tracing::error!("Received Undeclare Subscriber for unkown id: {}", m.id); } @@ -2113,14 +2257,121 @@ impl Primitives for Session { zenoh_protocol::network::DeclareBody::UndeclareQueryable(m) => { trace!("recv UndeclareQueryable {:?}", m.id); } - DeclareBody::DeclareToken(m) => { + zenoh_protocol::network::DeclareBody::DeclareToken(m) => { trace!("recv DeclareToken {:?}", m.id); + #[cfg(feature = "unstable")] + { + let mut state = zwrite!(self.state); + match state + .wireexpr_to_keyexpr(&m.wire_expr, false) + .map(|e| e.into_owned()) + { + Ok(key_expr) => { + if let Some(interest_id) = msg.interest_id { + if let Some(query) = state.liveliness_queries.get(&interest_id) { + let reply = Reply { + result: Ok(Sample { + key_expr, + payload: ZBytes::empty(), + kind: SampleKind::Put, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + }), + replier_id: None, + }; + + (query.callback)(reply); + } + } else { + state.remote_tokens.insert(m.id, key_expr.clone()); + + drop(state); + + self.execute_subscriber_callbacks( + false, + &m.wire_expr, + None, + ZBuf::default(), + SubscriberKind::LivelinessSubscriber, + #[cfg(feature = "unstable")] + None, + ); + } + } + Err(err) => { + tracing::error!("Received DeclareToken for unkown wire_expr: {}", err) + } + } + } } - DeclareBody::UndeclareToken(m) => { + zenoh_protocol::network::DeclareBody::UndeclareToken(m) => { trace!("recv UndeclareToken {:?}", m.id); + #[cfg(feature = "unstable")] + { + let mut state = zwrite!(self.state); + if let Some(key_expr) = state.remote_tokens.remove(&m.id) { + drop(state); + + let data_info = DataInfo { + kind: SampleKind::Delete, + ..Default::default() + }; + + self.execute_subscriber_callbacks( + false, + &key_expr.to_wire(self), + Some(data_info), + ZBuf::default(), + SubscriberKind::LivelinessSubscriber, + #[cfg(feature = "unstable")] + None, + ); + } else if m.ext_wire_expr.wire_expr != WireExpr::empty() { + match state + .wireexpr_to_keyexpr(&m.ext_wire_expr.wire_expr, false) + .map(|e| e.into_owned()) + { + Ok(key_expr) => { + drop(state); + + let data_info = DataInfo { + kind: SampleKind::Delete, + ..Default::default() + }; + + self.execute_subscriber_callbacks( + false, + &key_expr.to_wire(self), + Some(data_info), + ZBuf::default(), + SubscriberKind::LivelinessSubscriber, + #[cfg(feature = "unstable")] + None, + ); + } + Err(err) => { + tracing::error!( + "Received UndeclareToken for unkown wire_expr: {}", + err + ) + } + } + } + } } - DeclareBody::DeclareFinal(_) => { + DeclareBody::DeclareFinal(DeclareFinal) => { trace!("recv DeclareFinal {:?}", msg.interest_id); + + #[cfg(feature = "unstable")] + if let Some(interest_id) = msg.interest_id { + let mut state = zwrite!(self.state); + let _ = state.liveliness_queries.remove(&interest_id); + } } } } @@ -2137,11 +2388,12 @@ impl Primitives for Session { source_id: m.ext_sinfo.as_ref().map(|i| i.id.into()), source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; - self.handle_data( + self.execute_subscriber_callbacks( false, &msg.wire_expr, Some(info), m.payload, + SubscriberKind::Subscriber, m.ext_attachment.map(Into::into), ) } @@ -2154,11 +2406,12 @@ impl Primitives for Session { source_id: m.ext_sinfo.as_ref().map(|i| i.id.into()), source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; - self.handle_data( + self.execute_subscriber_callbacks( false, &msg.wire_expr, Some(info), ZBuf::empty(), + SubscriberKind::Subscriber, m.ext_attachment.map(Into::into), ) } diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index c77dbc8791..493df4a54c 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -78,6 +78,7 @@ impl fmt::Debug for SubscriberState { pub(crate) struct SubscriberInner<'a> { pub(crate) session: SessionRef<'a>, pub(crate) state: Arc, + pub(crate) kind: SubscriberKind, pub(crate) undeclare_on_drop: bool, } @@ -146,7 +147,7 @@ impl Wait for SubscriberUndeclaration<'_> { self.subscriber.undeclare_on_drop = false; self.subscriber .session - .undeclare_subscriber_inner(self.subscriber.state.id) + .undeclare_subscriber_inner(self.subscriber.state.id, self.subscriber.kind) } } @@ -162,7 +163,9 @@ impl IntoFuture for SubscriberUndeclaration<'_> { impl Drop for SubscriberInner<'_> { fn drop(&mut self) { if self.undeclare_on_drop { - let _ = self.session.undeclare_subscriber_inner(self.state.id); + let _ = self + .session + .undeclare_subscriber_inner(self.state.id, self.kind); } } } @@ -377,7 +380,7 @@ where session .declare_subscriber_inner( &key_expr, - &None, + None, self.origin, callback, &SubscriberInfo { @@ -388,6 +391,7 @@ where subscriber: SubscriberInner { session, state: sub_state, + kind: SubscriberKind::Subscriber, undeclare_on_drop: true, }, handler: receiver, @@ -540,3 +544,9 @@ impl DerefMut for Subscriber<'_, Handler> { /// A [`Subscriber`] that provides data through a `flume` channel. pub type FlumeSubscriber<'a> = Subscriber<'a, flume::Receiver>; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum SubscriberKind { + Subscriber, + LivelinessSubscriber, +} diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 88f2abe9c1..4627a40654 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -287,10 +287,25 @@ impl Primitives for Face { ); } zenoh_protocol::network::DeclareBody::DeclareToken(m) => { - tracing::warn!("Received unsupported {m:?}") + declare_token( + ctrl_lock.as_ref(), + &self.tables, + &mut self.state.clone(), + m.id, + &m.wire_expr, + msg.ext_nodeid.node_id, + msg.interest_id, + ); } zenoh_protocol::network::DeclareBody::UndeclareToken(m) => { - tracing::warn!("Received unsupported {m:?}") + undeclare_token( + ctrl_lock.as_ref(), + &self.tables, + &mut self.state.clone(), + m.id, + &m.ext_wire_expr, + msg.ext_nodeid.node_id, + ); } zenoh_protocol::network::DeclareBody::DeclareFinal(_) => { if let Some(id) = msg.interest_id { diff --git a/zenoh/src/net/routing/dispatcher/interests.rs b/zenoh/src/net/routing/dispatcher/interests.rs index ab3764d14f..32724363f9 100644 --- a/zenoh/src/net/routing/dispatcher/interests.rs +++ b/zenoh/src/net/routing/dispatcher/interests.rs @@ -1,5 +1,5 @@ // -// Copyright (c) 2023 ZettaScale Technology +// Copyright (c) 2024 ZettaScale Technology // // This program and the accompanying materials are made available under the // terms of the Eclipse Public License 2.0 which is available at diff --git a/zenoh/src/net/routing/dispatcher/mod.rs b/zenoh/src/net/routing/dispatcher/mod.rs index 0f42ae2aee..dc17b91b6b 100644 --- a/zenoh/src/net/routing/dispatcher/mod.rs +++ b/zenoh/src/net/routing/dispatcher/mod.rs @@ -23,3 +23,4 @@ pub mod pubsub; pub mod queries; pub mod resource; pub mod tables; +pub mod token; diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 56e4ce9335..f1163c829d 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -21,6 +21,8 @@ use async_trait::async_trait; use tokio_util::sync::CancellationToken; use zenoh_buffers::ZBuf; use zenoh_config::WhatAmI; +#[cfg(feature = "stats")] +use zenoh_protocol::zenoh::reply::ReplyBody; use zenoh_protocol::{ core::{key_expr::keyexpr, Encoding, WireExpr}, network::{ @@ -31,7 +33,7 @@ use zenoh_protocol::{ }, response::{self, ext::ResponderIdType, Response, ResponseFinal}, }, - zenoh::{self, query::Consolidation, reply::ReplyBody, Put, Reply, RequestBody, ResponseBody}, + zenoh::{self, RequestBody, ResponseBody}, }; use zenoh_sync::get_mut_unchecked; use zenoh_util::Timed; @@ -572,58 +574,10 @@ pub fn route_query( let queries_lock = zwrite!(tables_ref.queries_lock); let route = compute_final_route(&rtables, &route, face, &mut expr, &ext_target, query); - let local_replies = - rtables - .hat_code - .compute_local_replies(&rtables, &prefix, expr.suffix, face); - let zid = rtables.zid; - let timeout = ext_timeout.unwrap_or(rtables.queries_default_timeout); - drop(queries_lock); drop(rtables); - for (wexpr, payload) in local_replies { - let payload = ResponseBody::Reply(Reply { - consolidation: Consolidation::DEFAULT, // @TODO: handle Del case - ext_unknown: vec![], // @TODO: handle unknown extensions - payload: ReplyBody::Put(Put { - // @TODO: handle Del case - timestamp: None, // @TODO: handle timestamp - encoding: Encoding::empty(), // @TODO: handle encoding - ext_sinfo: None, // @TODO: handle source info - ext_attachment: None, // @TODO: expose it in the API - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_unknown: vec![], // @TODO: handle unknown extensions - payload, - }), - }); - #[cfg(feature = "stats")] - if !admin { - inc_res_stats!(face, tx, user, payload) - } else { - inc_res_stats!(face, tx, admin, payload) - } - - face.primitives - .clone() - .send_response(RoutingContext::with_expr( - Response { - rid: qid, - wire_expr: wexpr, - payload, - ext_qos: response::ext::QoSType::DECLARE, - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid, - eid: 0, // 0 is reserved for routing core - }), - }, - expr.full_expr().to_string(), - )); - } - if route.is_empty() { tracing::debug!( "Send final reply {}:{} (no matching queryables or not master)", diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index e6b13dc2c8..a638c9a24f 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -57,6 +57,7 @@ pub(crate) struct SessionContext { pub(crate) remote_expr_id: Option, pub(crate) subs: Option, pub(crate) qabl: Option, + pub(crate) token: bool, pub(crate) in_interceptor_cache: Option>, pub(crate) e_interceptor_cache: Option>, } @@ -69,6 +70,7 @@ impl SessionContext { remote_expr_id: None, subs: None, qabl: None, + token: false, in_interceptor_cache: None, e_interceptor_cache: None, } diff --git a/zenoh/src/net/routing/dispatcher/token.rs b/zenoh/src/net/routing/dispatcher/token.rs new file mode 100644 index 0000000000..c563ce8802 --- /dev/null +++ b/zenoh/src/net/routing/dispatcher/token.rs @@ -0,0 +1,146 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::Arc; + +use zenoh_keyexpr::keyexpr; +use zenoh_protocol::{ + core::WireExpr, + network::{ + declare::{common::ext, TokenId}, + interest::InterestId, + }, +}; + +use super::{ + face::FaceState, + tables::{NodeId, TablesLock}, +}; +use crate::net::routing::{hat::HatTrait, router::Resource}; + +pub(crate) fn declare_token( + hat_code: &(dyn HatTrait + Send + Sync), + tables: &TablesLock, + face: &mut Arc, + id: TokenId, + expr: &WireExpr, + node_id: NodeId, + interest_id: Option, +) { + let rtables = zread!(tables.tables); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + tracing::debug!( + "{} Declare token {} ({}{})", + face, + id, + prefix.expr(), + expr.suffix + ); + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + hat_code.declare_token(&mut wtables, face, id, &mut res, node_id, interest_id); + drop(wtables); + } + None => tracing::error!( + "{} Declare token {} for unknown scope {}!", + face, + id, + expr.scope + ), + } +} + +pub(crate) fn undeclare_token( + hat_code: &(dyn HatTrait + Send + Sync), + tables: &TablesLock, + face: &mut Arc, + id: TokenId, + expr: &ext::WireExprType, + node_id: NodeId, +) { + let (res, mut wtables) = if expr.wire_expr.is_empty() { + (None, zwrite!(tables.tables)) + } else { + let rtables = zread!(tables.tables); + match rtables + .get_mapping(face, &expr.wire_expr.scope, expr.wire_expr.mapping) + .cloned() + { + Some(mut prefix) => { + match Resource::get_resource(&prefix, expr.wire_expr.suffix.as_ref()) { + Some(res) => { + drop(rtables); + (Some(res), zwrite!(tables.tables)) + } + None => { + // Here we create a Resource that will immediately be removed after treatment + // TODO this could be improved + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.wire_expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = Resource::make_resource( + &mut wtables, + &mut prefix, + expr.wire_expr.suffix.as_ref(), + ); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (Some(res), wtables) + } + } + } + None => { + tracing::error!( + "{} Undeclare liveliness token with unknown scope {}", + face, + expr.wire_expr.scope + ); + return; + } + } + }; + + if let Some(res) = hat_code.undeclare_token(&mut wtables, face, id, res, node_id) { + tracing::debug!("{} Undeclare token {} ({})", face, id, res.expr()); + } else { + tracing::error!("{} Undeclare unknown token {}", face, id); + } +} diff --git a/zenoh/src/net/routing/hat/client/interests.rs b/zenoh/src/net/routing/hat/client/interests.rs index 3757677893..57e380ee12 100644 --- a/zenoh/src/net/routing/hat/client/interests.rs +++ b/zenoh/src/net/routing/hat/client/interests.rs @@ -1,5 +1,5 @@ // -// Copyright (c) 2023 ZettaScale Technology +// Copyright (c) 2024 ZettaScale Technology // // This program and the accompanying materials are made available under the // terms of the Eclipse Public License 2.0 which is available at @@ -23,10 +23,11 @@ use zenoh_protocol::{ }; use zenoh_sync::get_mut_unchecked; -use super::{face_hat, face_hat_mut, HatCode, HatFace}; +use super::{face_hat, face_hat_mut, token::declare_token_interest, HatCode, HatFace}; use crate::net::routing::{ dispatcher::{ face::{FaceState, InterestState}, + interests::{CurrentInterest, CurrentInterestCleanup}, resource::Resource, tables::{Tables, TablesLock}, }, @@ -74,16 +75,32 @@ impl HatInterestTrait for HatCode { fn declare_interest( &self, tables: &mut Tables, - _tables_ref: &Arc, + tables_ref: &Arc, face: &mut Arc, id: InterestId, res: Option<&mut Arc>, mode: InterestMode, options: InterestOptions, ) { + if options.tokens() { + declare_token_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + ) + } face_hat_mut!(face) .remote_interests .insert(id, (res.as_ref().map(|res| (*res).clone()), options)); + + let interest = Arc::new(CurrentInterest { + src_face: face.clone(), + src_interest_id: id, + }); + for dst_face in tables .faces .values_mut() @@ -98,6 +115,14 @@ impl HatInterestTrait for HatCode { finalized: mode == InterestMode::Future, }, ); + if mode.current() && options.tokens() { + let dst_face_mut = get_mut_unchecked(dst_face); + let cancellation_token = dst_face_mut.task_controller.get_cancellation_token(); + dst_face_mut + .pending_current_interests + .insert(id, (interest.clone(), cancellation_token)); + CurrentInterestCleanup::spawn_interest_clean_up_task(dst_face, tables_ref, id); + } let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, dst_face)); dst_face.primitives.send_interest(RoutingContext::with_expr( Interest { @@ -114,13 +139,34 @@ impl HatInterestTrait for HatCode { } if mode.current() { - face.primitives.send_declare(RoutingContext::new(Declare { - interest_id: Some(id), - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareFinal(DeclareFinal), - })); + if options.tokens() { + if let Some(interest) = Arc::into_inner(interest) { + tracing::debug!( + "Propagate DeclareFinal {}:{}", + interest.src_face, + interest.src_interest_id + ); + interest + .src_face + .primitives + .clone() + .send_declare(RoutingContext::new(Declare { + interest_id: Some(interest.src_interest_id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + })); + } + } else { + face.primitives.send_declare(RoutingContext::new(Declare { + interest_id: Some(id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + })); + } } } diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index f41b36e584..0cbdd6d4bc 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -23,9 +23,10 @@ use std::{ sync::{atomic::AtomicU32, Arc}, }; +use token::{token_new_face, undeclare_client_token}; use zenoh_config::WhatAmI; use zenoh_protocol::network::{ - declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, + declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId, TokenId}, interest::{InterestId, InterestOptions}, Oam, }; @@ -56,6 +57,7 @@ use crate::net::{ mod interests; mod pubsub; mod queries; +mod token; macro_rules! face_hat { ($f:expr) => { @@ -105,6 +107,7 @@ impl HatBaseTrait for HatCode { interests_new_face(tables, &mut face.state); pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); + token_new_face(tables, &mut face.state); Ok(()) } @@ -118,18 +121,27 @@ impl HatBaseTrait for HatCode { interests_new_face(tables, &mut face.state); pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); + token_new_face(tables, &mut face.state); Ok(()) } fn close_face(&self, tables: &TablesLock, face: &mut Arc) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); + let face = get_mut_unchecked(face); + let hat_face = match face.hat.downcast_mut::() { + Some(hate_face) => hate_face, + None => { + tracing::error!("Error downcasting face hat in close_face!"); + return; + } + }; - face_hat_mut!(face).remote_interests.clear(); - face_hat_mut!(face).local_subs.clear(); - face_hat_mut!(face).local_qabls.clear(); + hat_face.remote_interests.clear(); + hat_face.local_subs.clear(); + hat_face.local_qabls.clear(); + hat_face.local_tokens.clear(); - let face = get_mut_unchecked(face); for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); Resource::clean(res); @@ -142,13 +154,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for (_id, mut res) in face - .hat - .downcast_mut::() - .unwrap() - .remote_subs - .drain() - { + for (_id, mut res) in hat_face.remote_subs.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); @@ -170,13 +176,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for (_id, mut res) in face - .hat - .downcast_mut::() - .unwrap() - .remote_qabls - .drain() - { + for (_id, mut res) in hat_face.remote_qabls.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); @@ -196,6 +196,11 @@ impl HatBaseTrait for HatCode { qabls_matches.push(res); } } + + for (_id, mut res) in hat_face.remote_tokens.drain() { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_token(&mut wtables, &mut face_clone, &mut res); + } drop(wtables); let mut matches_data_routes = vec![]; @@ -296,6 +301,8 @@ struct HatFace { remote_subs: HashMap>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, + local_tokens: HashMap, TokenId>, + remote_tokens: HashMap>, } impl HatFace { @@ -307,6 +314,8 @@ impl HatFace { remote_subs: HashMap::new(), local_qabls: HashMap::new(), remote_qabls: HashMap::new(), + local_tokens: HashMap::new(), + remote_tokens: HashMap::new(), } } } diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 7ba6005e5a..41dae88cdf 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -37,7 +37,7 @@ use crate::{ }, hat::{HatPubSubTrait, Sources}, router::{update_data_routes_from, RoutesIndexes}, - RoutingContext, PREFIX_LIVELINESS, + RoutingContext, }, }; @@ -49,8 +49,7 @@ fn propagate_simple_subscription_to( sub_info: &SubscriberInfo, src_face: &mut Arc, ) { - if (src_face.id != dst_face.id - || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) + if src_face.id != dst_face.id && !face_hat!(dst_face).local_subs.contains_key(res) && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { @@ -201,22 +200,20 @@ pub(super) fn undeclare_client_subscription( } if client_subs.len() == 1 { let face = &mut client_subs[0]; - if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); - } + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); } } } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index bc1fddbb3b..cebc04cd2f 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -18,14 +18,13 @@ use std::{ }; use ordered_float::OrderedFloat; -use zenoh_buffers::ZBuf; use zenoh_protocol::{ core::{ key_expr::{ include::{Includer, DEFAULT_INCLUDER}, OwnedKeyExpr, }, - WhatAmI, WireExpr, + WhatAmI, }, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, @@ -43,7 +42,7 @@ use crate::net::routing::{ }, hat::{HatQueriesTrait, Sources}, router::RoutesIndexes, - RoutingContext, PREFIX_LIVELINESS, + RoutingContext, }; #[inline] @@ -86,11 +85,17 @@ fn propagate_simple_queryable( for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); let current = face_hat!(dst_face).local_qabls.get(res); - if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) + if src_face + .as_ref() + .map(|src_face| dst_face.id != src_face.id) + .unwrap_or(true) && (current.is_none() || current.unwrap().1 != info) - && (src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client) + && src_face + .as_ref() + .map(|src_face| { + src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client + }) + .unwrap_or(true) { let id = current .map(|c| c.0) @@ -360,44 +365,6 @@ impl HatQueriesTrait for HatCode { Arc::new(route) } - #[inline] - fn compute_local_replies( - &self, - tables: &Tables, - prefix: &Arc, - suffix: &str, - face: &Arc, - ) -> Vec<(WireExpr<'static>, ZBuf)> { - let mut result = vec![]; - // Only the first routing point in the query route - // should return the liveliness tokens - if face.whatami == WhatAmI::Client { - let key_expr = prefix.expr() + suffix; - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - tracing::warn!("Invalid KE reached the system: {}", e); - return result; - } - }; - if key_expr.starts_with(PREFIX_LIVELINESS) { - let res = Resource::get_resource(prefix, suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) { - result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); - } - } - } - } - result - } - fn get_query_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { get_routes_entries() } diff --git a/zenoh/src/net/routing/hat/client/token.rs b/zenoh/src/net/routing/hat/client/token.rs new file mode 100644 index 0000000000..3b52bad36a --- /dev/null +++ b/zenoh/src/net/routing/hat/client/token.rs @@ -0,0 +1,383 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::{atomic::Ordering, Arc}; + +use zenoh_config::WhatAmI; +use zenoh_protocol::network::{ + declare::{common::ext::WireExprType, TokenId}, + ext, + interest::{InterestId, InterestMode}, + Declare, DeclareBody, DeclareToken, UndeclareToken, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{face_hat, face_hat_mut, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{face::FaceState, tables::Tables}, + hat::{CurrentFutureTrait, HatTokenTrait}, + router::{NodeId, Resource, SessionContext}, + RoutingContext, +}; + +#[inline] +fn propagate_simple_token_to( + _tables: &mut Tables, + dst_face: &mut Arc, + res: &Arc, + src_face: &mut Arc, +) { + if (src_face.id != dst_face.id || dst_face.whatami == WhatAmI::Client) + && !face_hat!(dst_face).local_tokens.contains_key(res) + && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) + { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + )); + } +} + +fn propagate_simple_token(tables: &mut Tables, res: &Arc, src_face: &mut Arc) { + for mut dst_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + propagate_simple_token_to(tables, &mut dst_face, res, src_face); + } +} + +fn register_client_token( + _tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, +) { + // Register liveliness + { + let res = get_mut_unchecked(res); + match res.session_ctxs.get_mut(&face.id) { + Some(ctx) => { + if !ctx.token { + get_mut_unchecked(ctx).token = true; + } + } + None => { + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).token = true; + } + } + } + face_hat_mut!(face).remote_tokens.insert(id, res.clone()); +} + +fn declare_client_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, + interest_id: Option, +) { + register_client_token(tables, face, id, res); + + propagate_simple_token(tables, res, face); + + let wire_expr = Resource::decl_key(res, face); + if let Some(interest_id) = interest_id { + if let Some((interest, _)) = face.pending_current_interests.get(&interest_id) { + interest + .src_face + .primitives + .send_declare(RoutingContext::with_expr( + Declare { + interest_id: Some(interest.src_interest_id), + ext_qos: ext::QoSType::default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + res.expr(), + )) + } + } +} + +#[inline] +fn client_tokens(res: &Arc) -> Vec> { + res.session_ctxs + .values() + .filter_map(|ctx| { + if ctx.token { + Some(ctx.face.clone()) + } else { + None + } + }) + .collect() +} + +fn propagate_forget_simple_token(tables: &mut Tables, res: &Arc) { + for face in tables.faces.values_mut() { + if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } else if face_hat!(face) + .remote_interests + .values() + .any(|(r, o)| o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true)) + { + // Token has never been declared on this face. + // Send an Undeclare with a one shot generated id and a WireExpr ext. + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), + ext_wire_expr: WireExprType { + wire_expr: Resource::get_best_key(res, "", face.id), + }, + }), + }, + res.expr(), + )); + } + } +} + +pub(super) fn undeclare_client_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, +) { + if !face_hat_mut!(face) + .remote_tokens + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).token = false; + } + + let mut client_tokens = client_tokens(res); + if client_tokens.is_empty() { + propagate_forget_simple_token(tables, res); + } + if client_tokens.len() == 1 { + let face = &mut client_tokens[0]; + if face.whatami != WhatAmI::Client { + if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } + } +} + +fn forget_client_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: Option>, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { + undeclare_client_token(tables, face, &mut res); + Some(res) + } else if let Some(mut res) = res { + undeclare_client_token(tables, face, &mut res); + Some(res) + } else { + None + } +} + +pub(super) fn token_new_face(tables: &mut Tables, face: &mut Arc) { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for token in face_hat!(src_face).remote_tokens.values() { + propagate_simple_token_to(tables, face, token, &mut src_face.clone()); + } + } +} + +pub(crate) fn declare_token_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, +) { + if mode.current() { + let interest_id = (!mode.future()).then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if tables.faces.values().any(|src_face| { + face_hat!(src_face) + .remote_tokens + .values() + .any(|token| token.context.is_some() && token.matches(res)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + res.expr(), + )); + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for token in face_hat!(src_face).remote_tokens.values() { + if token.context.is_some() && token.matches(res) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(token, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + res.expr(), + )) + } + } + } + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for token in face_hat!(src_face).remote_tokens.values() { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(token, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + )); + } + } + } + } +} + +impl HatTokenTrait for HatCode { + fn declare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, + _node_id: NodeId, + interest_id: Option, + ) { + declare_client_token(tables, face, id, res, interest_id); + } + + fn undeclare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: Option>, + _node_id: NodeId, + ) -> Option> { + forget_client_token(tables, face, id, res) + } +} diff --git a/zenoh/src/net/routing/hat/linkstate_peer/interests.rs b/zenoh/src/net/routing/hat/linkstate_peer/interests.rs index 413f06f67b..40bfb49780 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/interests.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/interests.rs @@ -1,5 +1,5 @@ // -// Copyright (c) 2023 ZettaScale Technology +// Copyright (c) 2024 ZettaScale Technology // // This program and the accompanying materials are made available under the // terms of the Eclipse Public License 2.0 which is available at @@ -21,7 +21,8 @@ use zenoh_protocol::network::{ use zenoh_sync::get_mut_unchecked; use super::{ - face_hat_mut, pubsub::declare_sub_interest, queries::declare_qabl_interest, HatCode, HatFace, + face_hat_mut, pubsub::declare_sub_interest, queries::declare_qabl_interest, + token::declare_token_interest, HatCode, HatFace, }; use crate::net::routing::{ dispatcher::{ @@ -64,6 +65,16 @@ impl HatInterestTrait for HatCode { options.aggregate(), ) } + if options.tokens() { + declare_token_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + ) + } if mode.future() { face_hat_mut!(face) .remote_interests diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index bc10eaee8a..500ac29510 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -24,6 +24,7 @@ use std::{ time::Duration, }; +use token::{token_remove_node, undeclare_client_token}; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ common::ZExtBody, @@ -67,6 +68,7 @@ mod interests; mod network; mod pubsub; mod queries; +mod token; macro_rules! hat { ($t:expr) => { @@ -116,6 +118,7 @@ use face_hat_mut; struct HatTables { peer_subs: HashSet>, + peer_tokens: HashSet>, peer_qabls: HashSet>, peers_net: Option, peers_trees_task: Option, @@ -134,6 +137,7 @@ impl HatTables { fn new() -> Self { Self { peer_subs: HashSet::new(), + peer_tokens: HashSet::new(), peer_qabls: HashSet::new(), peers_net: None, peers_trees_task: None, @@ -157,6 +161,7 @@ impl HatTables { tracing::trace!("Compute routes"); pubsub::pubsub_tree_change(&mut tables, &new_childs); queries::queries_tree_change(&mut tables, &new_childs); + token::token_tree_change(&mut tables, &new_childs); tracing::trace!("Computations completed"); hat_mut!(tables).peers_trees_task = None; @@ -250,12 +255,20 @@ impl HatBaseTrait for HatCode { fn close_face(&self, tables: &TablesLock, face: &mut Arc) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); + let face = get_mut_unchecked(face); + let hat_face = match face.hat.downcast_mut::() { + Some(hate_face) => hate_face, + None => { + tracing::error!("Error downcasting face hat in close_face!"); + return; + } + }; - face_hat_mut!(face).remote_interests.clear(); - face_hat_mut!(face).local_subs.clear(); - face_hat_mut!(face).local_qabls.clear(); + hat_face.remote_interests.clear(); + hat_face.local_subs.clear(); + hat_face.local_qabls.clear(); + hat_face.local_tokens.clear(); - let face = get_mut_unchecked(face); for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); Resource::clean(res); @@ -268,13 +281,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for (_id, mut res) in face - .hat - .downcast_mut::() - .unwrap() - .remote_subs - .drain() - { + for (_id, mut res) in hat_face.remote_subs.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); @@ -296,13 +303,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for (_, mut res) in face - .hat - .downcast_mut::() - .unwrap() - .remote_qabls - .drain() - { + for (_, mut res) in hat_face.remote_qabls.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); @@ -322,6 +323,11 @@ impl HatBaseTrait for HatCode { qabls_matches.push(res); } } + + for (_id, mut res) in hat_face.remote_tokens.drain() { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_token(&mut wtables, &mut face_clone, &mut res); + } drop(wtables); let mut matches_data_routes = vec![]; @@ -377,6 +383,7 @@ impl HatBaseTrait for HatCode { for (_, removed_node) in changes.removed_nodes { pubsub_remove_node(tables, &removed_node.zid); queries_remove_node(tables, &removed_node.zid); + token_remove_node(tables, &removed_node.zid); } hat_mut!(tables).schedule_compute_trees(tables_ref.clone()); @@ -420,6 +427,7 @@ impl HatBaseTrait for HatCode { { pubsub_remove_node(tables, &removed_node.zid); queries_remove_node(tables, &removed_node.zid); + token_remove_node(tables, &removed_node.zid); } hat_mut!(tables).schedule_compute_trees(tables_ref.clone()); @@ -463,17 +471,17 @@ impl HatBaseTrait for HatCode { } struct HatContext { - router_subs: HashSet, peer_subs: HashSet, peer_qabls: HashMap, + peer_tokens: HashSet, } impl HatContext { fn new() -> Self { Self { - router_subs: HashSet::new(), peer_subs: HashSet::new(), peer_qabls: HashMap::new(), + peer_tokens: HashSet::new(), } } } @@ -484,6 +492,8 @@ struct HatFace { remote_interests: HashMap>, InterestOptions)>, local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, + local_tokens: HashMap, SubscriberId>, + remote_tokens: HashMap>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } @@ -498,6 +508,8 @@ impl HatFace { remote_subs: HashMap::new(), local_qabls: HashMap::new(), remote_qabls: HashMap::new(), + local_tokens: HashMap::new(), + remote_tokens: HashMap::new(), } } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index a1ff061602..49bd026a31 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -45,7 +45,7 @@ use crate::net::routing::{ }, hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, router::RoutesIndexes, - RoutingContext, PREFIX_LIVELINESS, + RoutingContext, }; #[inline] @@ -62,7 +62,10 @@ fn send_sourced_subscription_to_net_childs( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { let key_expr = Resource::decl_key(res, &mut someface); someface.primitives.send_declare(RoutingContext::with_expr( @@ -97,7 +100,7 @@ fn propagate_simple_subscription_to( sub_info: &SubscriberInfo, src_face: &mut Arc, ) { - if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) + if (src_face.id != dst_face.id) && !face_hat!(dst_face).local_subs.contains_key(res) && dst_face.whatami == WhatAmI::Client { @@ -230,10 +233,8 @@ fn register_peer_subscription( propagate_sourced_subscription(tables, res, sub_info, Some(face), &peer); } - if tables.whatami == WhatAmI::Peer { - // Propagate subscription to clients - propagate_simple_subscription(tables, res, sub_info, face); - } + // Propagate subscription to clients + propagate_simple_subscription(tables, res, sub_info, face); } fn declare_peer_subscription( @@ -329,7 +330,10 @@ fn send_forget_sourced_subscription_to_net_childs( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { let wire_expr = Resource::decl_key(res, &mut someface); someface.primitives.send_declare(RoutingContext::with_expr( @@ -447,9 +451,7 @@ fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, pe .peer_subs .retain(|sub| !Arc::ptr_eq(sub, res)); - if tables.whatami == WhatAmI::Peer { - propagate_forget_simple_subscription(tables, res); - } + propagate_forget_simple_subscription(tables, res); } } @@ -492,7 +494,7 @@ pub(super) fn undeclare_client_subscription( if client_subs.len() == 1 && !peer_subs { let mut face = &mut client_subs[0]; - if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { + if face.whatami != WhatAmI::Client { if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -571,10 +573,16 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohIdProto) { } pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec]) { + let net = match hat!(tables).peers_net.as_ref() { + Some(net) => net, + None => { + tracing::error!("Error accessing peers_net in pubsub_tree_change!"); + return; + } + }; // propagate subs to new childs for (tree_sid, tree_childs) in new_childs.iter().enumerate() { if !tree_childs.is_empty() { - let net = hat!(tables).peers_net.as_ref().unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 16ed7cc7ef..36fc03c03d 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -19,14 +19,13 @@ use std::{ use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; -use zenoh_buffers::ZBuf; use zenoh_protocol::{ core::{ key_expr::{ include::{Includer, DEFAULT_INCLUDER}, OwnedKeyExpr, }, - WhatAmI, WireExpr, ZenohIdProto, + WhatAmI, ZenohIdProto, }, network::{ declare::{ @@ -51,7 +50,7 @@ use crate::net::routing::{ }, hat::{CurrentFutureTrait, HatQueriesTrait, Sources}, router::RoutesIndexes, - RoutingContext, PREFIX_LIVELINESS, + RoutingContext, }; #[inline] @@ -134,7 +133,11 @@ fn send_sourced_queryable_to_net_childs( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { + if src_face + .as_ref() + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { let key_expr = Resource::decl_key(res, &mut someface); someface.primitives.send_declare(RoutingContext::with_expr( @@ -170,7 +173,10 @@ fn propagate_simple_queryable( for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); let current = face_hat!(dst_face).local_qabls.get(res); - if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) + if src_face + .as_ref() + .map(|src_face| dst_face.id != src_face.id) + .unwrap_or(true) && (current.is_none() || current.unwrap().1 != info) && dst_face.whatami == WhatAmI::Client && face_hat!(dst_face) @@ -259,10 +265,8 @@ fn register_peer_queryable( propagate_sourced_queryable(tables, res, qabl_info, face.as_deref_mut(), &peer); } - if tables.whatami == WhatAmI::Peer { - // Propagate queryable to clients - propagate_simple_queryable(tables, res, face); - } + // Propagate queryable to clients + propagate_simple_queryable(tables, res, face); } fn declare_peer_queryable( @@ -352,7 +356,10 @@ fn send_forget_sourced_queryable_to_net_childs( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { let wire_expr = Resource::decl_key(res, &mut someface); someface.primitives.send_declare(RoutingContext::with_expr( @@ -470,9 +477,7 @@ fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: .peer_qabls .retain(|qabl| !Arc::ptr_eq(qabl, res)); - if tables.whatami == WhatAmI::Peer { - propagate_forget_simple_queryable(tables, res); - } + propagate_forget_simple_queryable(tables, res); } } @@ -602,10 +607,16 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohIdProto) { } pub(super) fn queries_tree_change(tables: &mut Tables, new_childs: &[Vec]) { + let net = match hat!(tables).peers_net.as_ref() { + Some(net) => net, + None => { + tracing::error!("Error accessing peers_net in queries_tree_change!"); + return; + } + }; // propagate qabls to new childs for (tree_sid, tree_childs) in new_childs.iter().enumerate() { if !tree_childs.is_empty() { - let net = hat!(tables).peers_net.as_ref().unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; @@ -939,48 +950,6 @@ impl HatQueriesTrait for HatCode { Arc::new(route) } - #[inline] - fn compute_local_replies( - &self, - tables: &Tables, - prefix: &Arc, - suffix: &str, - face: &Arc, - ) -> Vec<(WireExpr<'static>, ZBuf)> { - let mut result = vec![]; - // Only the first routing point in the query route - // should return the liveliness tokens - if face.whatami == WhatAmI::Client { - let key_expr = prefix.expr() + suffix; - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - tracing::warn!("Invalid KE reached the system: {}", e); - return result; - } - }; - if key_expr.starts_with(PREFIX_LIVELINESS) { - let res = Resource::get_resource(prefix, suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if (mres.context.is_some() - && (!res_hat!(mres).router_subs.is_empty() - || !res_hat!(mres).peer_subs.is_empty())) - || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) - { - result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); - } - } - } - } - result - } - fn get_query_routes_entries(&self, tables: &Tables) -> RoutesIndexes { get_routes_entries(tables) } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/token.rs b/zenoh/src/net/routing/hat/linkstate_peer/token.rs new file mode 100644 index 0000000000..0085d8deb0 --- /dev/null +++ b/zenoh/src/net/routing/hat/linkstate_peer/token.rs @@ -0,0 +1,717 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::{atomic::Ordering, Arc}; + +use petgraph::graph::NodeIndex; +use zenoh_protocol::{ + core::{WhatAmI, ZenohIdProto}, + network::{ + declare::{common::ext::WireExprType, TokenId}, + ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareToken, UndeclareToken, + }, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{ + face_hat, face_hat_mut, get_peer, hat, hat_mut, network::Network, res_hat, res_hat_mut, + HatCode, HatContext, HatFace, HatTables, +}; +use crate::net::routing::{ + dispatcher::{face::FaceState, tables::Tables}, + hat::{CurrentFutureTrait, HatTokenTrait}, + router::{NodeId, Resource, SessionContext}, + RoutingContext, +}; + +#[inline] +fn send_sourced_token_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + routing_context: NodeId, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let key_expr = Resource::decl_key(res, &mut someface); + + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareToken(DeclareToken { + id: 0, // Sourced tokens do not use ids + wire_expr: key_expr, + }), + }, + res.expr(), + )); + } + } + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +#[inline] +fn propagate_simple_token_to( + _tables: &mut Tables, + dst_face: &mut Arc, + res: &Arc, + _src_face: &mut Arc, +) { + if !face_hat!(dst_face).local_tokens.contains_key(res) && dst_face.whatami == WhatAmI::Client { + if dst_face.whatami != WhatAmI::Client { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + )); + } else { + let matching_interests = face_hat!(dst_face) + .remote_interests + .values() + .filter(|(r, o)| o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true)) + .cloned() + .collect::>, InterestOptions)>>(); + + for (int_res, options) in matching_interests { + let res = if options.aggregate() { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_tokens.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + )); + } + } + } + } +} + +fn propagate_simple_token(tables: &mut Tables, res: &Arc, src_face: &mut Arc) { + for mut dst_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + propagate_simple_token_to(tables, &mut dst_face, res, src_face); + } +} + +fn propagate_sourced_token( + tables: &Tables, + res: &Arc, + src_face: Option<&Arc>, + source: &ZenohIdProto, +) { + let net = hat!(tables).peers_net.as_ref().unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_sourced_token_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + src_face, + tree_sid.index() as NodeId, + ); + } else { + tracing::trace!( + "Propagating token {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => tracing::error!( + "Error propagating token {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn register_peer_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + peer: ZenohIdProto, +) { + if !res_hat!(res).peer_tokens.contains(&peer) { + // Register peer liveliness + { + res_hat_mut!(res).peer_tokens.insert(peer); + hat_mut!(tables).peer_tokens.insert(res.clone()); + } + + // Propagate liveliness to peers + propagate_sourced_token(tables, res, Some(face), &peer); + } + + // Propagate liveliness to clients + propagate_simple_token(tables, res, face); +} + +fn declare_peer_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + peer: ZenohIdProto, +) { + register_peer_token(tables, face, res, peer); +} + +fn register_client_token( + _tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, +) { + // Register liveliness + { + let res = get_mut_unchecked(res); + match res.session_ctxs.get_mut(&face.id) { + Some(ctx) => { + if !ctx.token { + get_mut_unchecked(ctx).token = true; + } + } + None => { + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).token = true; + } + } + } + face_hat_mut!(face).remote_tokens.insert(id, res.clone()); +} + +fn declare_client_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, +) { + register_client_token(tables, face, id, res); + let zid = tables.zid; + register_peer_token(tables, face, res, zid); +} + +#[inline] +fn remote_peer_tokens(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .peer_tokens + .iter() + .any(|peer| peer != &tables.zid) +} + +#[inline] +fn client_tokens(res: &Arc) -> Vec> { + res.session_ctxs + .values() + .filter_map(|ctx| { + if ctx.token { + Some(ctx.face.clone()) + } else { + None + } + }) + .collect() +} + +#[inline] +fn remote_client_tokens(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.token) +} + +#[inline] +fn send_forget_sourced_token_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + routing_context: Option, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let wire_expr = Resource::decl_key(res, &mut someface); + + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context.unwrap_or(0), + }, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: 0, // Sourced tokens do not use ids + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + } + } + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +fn propagate_forget_simple_token(tables: &mut Tables, res: &Arc) { + for mut face in tables.faces.values().cloned() { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + for res in face_hat!(face) + .local_tokens + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_tokens(&m, &face) || remote_peer_tokens(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } + } +} + +fn propagate_forget_sourced_token( + tables: &Tables, + res: &Arc, + src_face: Option<&Arc>, + source: &ZenohIdProto, +) { + let net = hat!(tables).peers_net.as_ref().unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_forget_sourced_token_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + src_face, + Some(tree_sid.index() as NodeId), + ); + } else { + tracing::trace!( + "Propagating forget token {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => tracing::error!( + "Error propagating forget token {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn unregister_peer_token(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { + res_hat_mut!(res).peer_tokens.retain(|token| token != peer); + + if res_hat!(res).peer_tokens.is_empty() { + hat_mut!(tables) + .peer_tokens + .retain(|token| !Arc::ptr_eq(token, res)); + + propagate_forget_simple_token(tables, res); + } +} + +fn undeclare_peer_token( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + peer: &ZenohIdProto, +) { + if res_hat!(res).peer_tokens.contains(peer) { + unregister_peer_token(tables, res, peer); + propagate_forget_sourced_token(tables, res, face, peer); + } +} + +fn forget_peer_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + peer: &ZenohIdProto, +) { + undeclare_peer_token(tables, Some(face), res, peer); +} + +pub(super) fn undeclare_client_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, +) { + if !face_hat_mut!(face) + .remote_tokens + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).token = false; + } + + let mut client_tokens = client_tokens(res); + let peer_tokens = remote_peer_tokens(tables, res); + if client_tokens.is_empty() { + undeclare_peer_token(tables, None, res, &tables.zid.clone()); + } + + if client_tokens.len() == 1 && !peer_tokens { + let mut face = &mut client_tokens[0]; + if face.whatami != WhatAmI::Client { + if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + for res in face_hat!(face) + .local_tokens + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_tokens(&m, face) + || remote_peer_tokens(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } + } + } + } +} + +fn forget_client_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { + undeclare_client_token(tables, face, &mut res); + Some(res) + } else { + None + } +} + +pub(super) fn token_remove_node(tables: &mut Tables, node: &ZenohIdProto) { + for mut res in hat!(tables) + .peer_tokens + .iter() + .filter(|res| res_hat!(res).peer_tokens.contains(node)) + .cloned() + .collect::>>() + { + unregister_peer_token(tables, &mut res, node); + Resource::clean(&mut res) + } +} + +pub(super) fn token_tree_change(tables: &mut Tables, new_childs: &[Vec]) { + let net = match hat!(tables).peers_net.as_ref() { + Some(net) => net, + None => { + tracing::error!("Error accessing peers_net in token_tree_change!"); + return; + } + }; + // propagate tokens to new childs + for (tree_sid, tree_childs) in new_childs.iter().enumerate() { + if !tree_childs.is_empty() { + let tree_idx = NodeIndex::new(tree_sid); + if net.graph.contains_node(tree_idx) { + let tree_id = net.graph[tree_idx].zid; + + let tokens_res = &hat!(tables).peer_tokens; + + for res in tokens_res { + let tokens = &res_hat!(res).peer_tokens; + for token in tokens { + if *token == tree_id { + send_sourced_token_to_net_childs( + tables, + net, + tree_childs, + res, + None, + tree_sid as NodeId, + ); + } + } + } + } + } + } +} + +pub(crate) fn declare_token_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, +) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = (!mode.future()).then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).peer_tokens.iter().any(|token| { + token.context.is_some() + && token.matches(res) + && (remote_client_tokens(token, face) || remote_peer_tokens(tables, token)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + res.expr(), + )); + } + } else { + for token in &hat!(tables).peer_tokens { + if token.context.is_some() + && token.matches(res) + && (remote_client_tokens(token, face) || remote_peer_tokens(tables, token)) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(token, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + )); + } + } + } + } else { + for token in &hat!(tables).peer_tokens { + if token.context.is_some() + && (remote_client_tokens(token, face) || remote_peer_tokens(tables, token)) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(token, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + )); + } + } + } + } +} + +impl HatTokenTrait for HatCode { + fn declare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, + node_id: NodeId, + _interest_id: Option, + ) { + if face.whatami != WhatAmI::Client { + if let Some(peer) = get_peer(tables, face, node_id) { + declare_peer_token(tables, face, res, peer) + } + } else { + declare_client_token(tables, face, id, res) + } + } + + fn undeclare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: Option>, + node_id: NodeId, + ) -> Option> { + if face.whatami != WhatAmI::Client { + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_token(tables, face, &mut res, &peer); + Some(res) + } else { + None + } + } else { + None + } + } else { + forget_client_token(tables, face, id) + } + } +} diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index b30e6e9277..f2175474d4 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -19,14 +19,13 @@ //! [Click here for Zenoh's documentation](../zenoh/index.html) use std::{any::Any, sync::Arc}; -use zenoh_buffers::ZBuf; use zenoh_config::{unwrap_or_default, Config, WhatAmI}; use zenoh_protocol::{ - core::{WireExpr, ZenohIdProto}, + core::ZenohIdProto, network::{ declare::{ queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, QueryableId, - SubscriberId, + SubscriberId, TokenId, }, interest::{InterestId, InterestMode, InterestOptions}, Oam, @@ -73,7 +72,7 @@ impl Sources { } pub(crate) trait HatTrait: - HatBaseTrait + HatInterestTrait + HatPubSubTrait + HatQueriesTrait + HatBaseTrait + HatInterestTrait + HatPubSubTrait + HatQueriesTrait + HatTokenTrait { } @@ -222,14 +221,6 @@ pub(crate) trait HatQueriesTrait { ) -> Arc; fn get_query_routes_entries(&self, tables: &Tables) -> RoutesIndexes; - - fn compute_local_replies( - &self, - tables: &Tables, - prefix: &Arc, - suffix: &str, - face: &Arc, - ) -> Vec<(WireExpr<'static>, ZBuf)>; } pub(crate) fn new_hat(whatami: WhatAmI, config: &Config) -> Box { @@ -246,6 +237,27 @@ pub(crate) fn new_hat(whatami: WhatAmI, config: &Config) -> Box, + id: TokenId, + res: &mut Arc, + node_id: NodeId, + interest_id: Option, + ); + + fn undeclare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: Option>, + node_id: NodeId, + ) -> Option>; +} + trait CurrentFutureTrait { fn future(&self) -> bool; fn current(&self) -> bool; diff --git a/zenoh/src/net/routing/hat/p2p_peer/interests.rs b/zenoh/src/net/routing/hat/p2p_peer/interests.rs index 0b058fb4b1..4fe8936cc7 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/interests.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/interests.rs @@ -1,5 +1,5 @@ // -// Copyright (c) 2023 ZettaScale Technology +// Copyright (c) 2024 ZettaScale Technology // // This program and the accompanying materials are made available under the // terms of the Eclipse Public License 2.0 which is available at @@ -24,8 +24,8 @@ use zenoh_protocol::{ use zenoh_sync::get_mut_unchecked; use super::{ - face_hat, face_hat_mut, pubsub::declare_sub_interest, queries::declare_qabl_interest, HatCode, - HatFace, + face_hat, face_hat_mut, pubsub::declare_sub_interest, queries::declare_qabl_interest, + token::declare_token_interest, HatCode, HatFace, }; use crate::net::routing::{ dispatcher::{ @@ -107,6 +107,16 @@ impl HatInterestTrait for HatCode { options.aggregate(), ) } + if options.tokens() { + declare_token_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + ) + } face_hat_mut!(face) .remote_interests .insert(id, (res.as_ref().map(|res| (*res).clone()), options)); diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 38ee54e0f6..eab2f393de 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -23,6 +23,7 @@ use std::{ sync::{atomic::AtomicU32, Arc}, }; +use token::{token_new_face, undeclare_client_token}; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ common::ZExtBody, @@ -30,7 +31,7 @@ use zenoh_protocol::{ declare::{ ext::{NodeIdType, QoSType}, queryable::ext::QueryableInfoType, - QueryableId, SubscriberId, + QueryableId, SubscriberId, TokenId, }, interest::{InterestId, InterestOptions}, oam::id::OAM_LINKSTATE, @@ -69,6 +70,7 @@ mod gossip; mod interests; mod pubsub; mod queries; +mod token; macro_rules! hat_mut { ($t:expr) => { @@ -150,6 +152,7 @@ impl HatBaseTrait for HatCode { interests_new_face(tables, &mut face.state); pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); + token_new_face(tables, &mut face.state); Ok(()) } @@ -179,6 +182,7 @@ impl HatBaseTrait for HatCode { interests_new_face(tables, &mut face.state); pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); + token_new_face(tables, &mut face.state); if face.state.whatami == WhatAmI::Peer { face.state @@ -197,12 +201,20 @@ impl HatBaseTrait for HatCode { fn close_face(&self, tables: &TablesLock, face: &mut Arc) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); + let face = get_mut_unchecked(face); + let hat_face = match face.hat.downcast_mut::() { + Some(hate_face) => hate_face, + None => { + tracing::error!("Error downcasting face hat in close_face!"); + return; + } + }; - face_hat_mut!(face).remote_interests.clear(); - face_hat_mut!(face).local_subs.clear(); - face_hat_mut!(face).local_qabls.clear(); + hat_face.remote_interests.clear(); + hat_face.local_subs.clear(); + hat_face.local_qabls.clear(); + hat_face.local_tokens.clear(); - let face = get_mut_unchecked(face); for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); Resource::clean(res); @@ -215,13 +227,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for (_id, mut res) in face - .hat - .downcast_mut::() - .unwrap() - .remote_subs - .drain() - { + for (_id, mut res) in hat_face.remote_subs.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); @@ -243,13 +249,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for (_id, mut res) in face - .hat - .downcast_mut::() - .unwrap() - .remote_qabls - .drain() - { + for (_id, mut res) in hat_face.remote_qabls.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); @@ -269,6 +269,11 @@ impl HatBaseTrait for HatCode { qabls_matches.push(res); } } + + for (_id, mut res) in hat_face.remote_tokens.drain() { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_token(&mut wtables, &mut face_clone, &mut res); + } drop(wtables); let mut matches_data_routes = vec![]; @@ -394,6 +399,8 @@ struct HatFace { remote_interests: HashMap>, InterestOptions)>, local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, + local_tokens: HashMap, TokenId>, + remote_tokens: HashMap>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } @@ -405,6 +412,8 @@ impl HatFace { remote_interests: HashMap::new(), local_subs: HashMap::new(), remote_subs: HashMap::new(), + local_tokens: HashMap::new(), + remote_tokens: HashMap::new(), local_qabls: HashMap::new(), remote_qabls: HashMap::new(), } diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 9cc2f05bf6..bc0a6f7de2 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -40,7 +40,7 @@ use crate::{ }, hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, router::{update_data_routes_from, RoutesIndexes}, - RoutingContext, PREFIX_LIVELINESS, + RoutingContext, }, }; @@ -52,8 +52,7 @@ fn propagate_simple_subscription_to( sub_info: &SubscriberInfo, src_face: &mut Arc, ) { - if (src_face.id != dst_face.id - || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) + if (src_face.id != dst_face.id) && !face_hat!(dst_face).local_subs.contains_key(res) && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { @@ -277,47 +276,45 @@ pub(super) fn undeclare_client_subscription( if client_subs.len() == 1 { let mut face = &mut client_subs[0]; - if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); - } - for res in face_hat!(face) - .local_subs - .keys() - .cloned() - .collect::>>() - { - if !res.context().matches.iter().any(|m| { - m.upgrade() - .is_some_and(|m| m.context.is_some() && remote_client_subs(&m, face)) - }) { - if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); - } + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && remote_client_subs(&m, face)) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); } } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index cafe65b8c7..6a8ebbc8e6 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -18,14 +18,13 @@ use std::{ }; use ordered_float::OrderedFloat; -use zenoh_buffers::ZBuf; use zenoh_protocol::{ core::{ key_expr::{ include::{Includer, DEFAULT_INCLUDER}, OwnedKeyExpr, }, - WhatAmI, WireExpr, + WhatAmI, }, network::{ declare::{ @@ -46,7 +45,7 @@ use crate::net::routing::{ }, hat::{CurrentFutureTrait, HatQueriesTrait, Sources}, router::{update_query_routes_from, RoutesIndexes}, - RoutingContext, PREFIX_LIVELINESS, + RoutingContext, }; #[inline] @@ -89,7 +88,10 @@ fn propagate_simple_queryable_to( ) { let info = local_qabl_info(tables, res, dst_face); let current = face_hat!(dst_face).local_qabls.get(res); - if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) + if src_face + .as_ref() + .map(|src_face| dst_face.id != src_face.id) + .unwrap_or(true) && (current.is_none() || current.unwrap().1 != info) && (dst_face.whatami != WhatAmI::Client || face_hat!(dst_face) @@ -599,44 +601,6 @@ impl HatQueriesTrait for HatCode { Arc::new(route) } - #[inline] - fn compute_local_replies( - &self, - tables: &Tables, - prefix: &Arc, - suffix: &str, - face: &Arc, - ) -> Vec<(WireExpr<'static>, ZBuf)> { - let mut result = vec![]; - // Only the first routing point in the query route - // should return the liveliness tokens - if face.whatami == WhatAmI::Client { - let key_expr = prefix.expr() + suffix; - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - tracing::warn!("Invalid KE reached the system: {}", e); - return result; - } - }; - if key_expr.starts_with(PREFIX_LIVELINESS) { - let res = Resource::get_resource(prefix, suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) { - result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); - } - } - } - } - result - } - fn get_query_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { get_routes_entries() } diff --git a/zenoh/src/net/routing/hat/p2p_peer/token.rs b/zenoh/src/net/routing/hat/p2p_peer/token.rs new file mode 100644 index 0000000000..65c351c812 --- /dev/null +++ b/zenoh/src/net/routing/hat/p2p_peer/token.rs @@ -0,0 +1,482 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::{atomic::Ordering, Arc}; + +use zenoh_config::WhatAmI; +use zenoh_protocol::network::{ + declare::{common::ext::WireExprType, TokenId}, + ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareToken, UndeclareToken, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{face_hat, face_hat_mut, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{face::FaceState, tables::Tables}, + hat::{CurrentFutureTrait, HatTokenTrait}, + router::{NodeId, Resource, SessionContext}, + RoutingContext, +}; + +#[inline] +fn propagate_simple_token_to( + _tables: &mut Tables, + dst_face: &mut Arc, + res: &Arc, + src_face: &mut Arc, +) { + if (src_face.id != dst_face.id || dst_face.whatami == WhatAmI::Client) + && !face_hat!(dst_face).local_tokens.contains_key(res) + && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) + { + if dst_face.whatami != WhatAmI::Client { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + )); + } else { + let matching_interests = face_hat!(dst_face) + .remote_interests + .values() + .filter(|(r, o)| o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true)) + .cloned() + .collect::>, InterestOptions)>>(); + + for (int_res, options) in matching_interests { + let res = if options.aggregate() { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_tokens.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + )); + } + } + } + } +} + +fn propagate_simple_token(tables: &mut Tables, res: &Arc, src_face: &mut Arc) { + for mut dst_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + propagate_simple_token_to(tables, &mut dst_face, res, src_face); + } +} + +fn register_client_token( + _tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, +) { + // Register liveliness + { + let res = get_mut_unchecked(res); + match res.session_ctxs.get_mut(&face.id) { + Some(ctx) => { + if !ctx.token { + get_mut_unchecked(ctx).token = true; + } + } + None => { + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).token = true; + } + } + } + face_hat_mut!(face).remote_tokens.insert(id, res.clone()); +} + +fn declare_client_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, +) { + register_client_token(tables, face, id, res); + + propagate_simple_token(tables, res, face); +} + +#[inline] +fn client_tokens(res: &Arc) -> Vec> { + res.session_ctxs + .values() + .filter_map(|ctx| { + if ctx.token { + Some(ctx.face.clone()) + } else { + None + } + }) + .collect() +} + +#[inline] +fn remote_client_tokens(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.token) +} + +fn propagate_forget_simple_token(tables: &mut Tables, res: &Arc) { + for mut face in tables.faces.values().cloned() { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } else if face_hat!(face).remote_interests.values().any(|(r, o)| { + o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true) && !o.aggregate() + }) { + // Token has never been declared on this face. + // Send an Undeclare with a one shot generated id and a WireExpr ext. + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), + ext_wire_expr: WireExprType { + wire_expr: Resource::get_best_key(res, "", face.id), + }, + }), + }, + res.expr(), + )); + } + for res in face_hat!(face) + .local_tokens + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && remote_client_tokens(&m, &face)) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } else if face_hat!(face).remote_interests.values().any(|(r, o)| { + o.tokens() + && r.as_ref().map(|r| r.matches(&res)).unwrap_or(true) + && !o.aggregate() + }) { + // Token has never been declared on this face. + // Send an Undeclare with a one shot generated id and a WireExpr ext. + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), + ext_wire_expr: WireExprType { + wire_expr: Resource::get_best_key(&res, "", face.id), + }, + }), + }, + res.expr(), + )); + } + } + } + } +} + +pub(super) fn undeclare_client_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, +) { + if !face_hat_mut!(face) + .remote_tokens + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).token = false; + } + + let mut client_tokens = client_tokens(res); + if client_tokens.is_empty() { + propagate_forget_simple_token(tables, res); + } + + if client_tokens.len() == 1 { + let mut face = &mut client_tokens[0]; + if face.whatami != WhatAmI::Client { + if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + for res in face_hat!(face) + .local_tokens + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && remote_client_tokens(&m, face)) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } + } + } + } +} + +fn forget_client_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: Option>, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { + undeclare_client_token(tables, face, &mut res); + Some(res) + } else if let Some(mut res) = res { + undeclare_client_token(tables, face, &mut res); + Some(res) + } else { + None + } +} + +pub(super) fn token_new_face(tables: &mut Tables, face: &mut Arc) { + if face.whatami != WhatAmI::Client { + for mut src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for token in face_hat!(src_face.clone()).remote_tokens.values() { + propagate_simple_token_to(tables, face, token, &mut src_face); + } + } + } +} + +pub(crate) fn declare_token_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, +) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = (!mode.future()).then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if tables.faces.values().any(|src_face| { + face_hat!(src_face) + .remote_tokens + .values() + .any(|token| token.context.is_some() && token.matches(res)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + res.expr(), + )); + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for token in face_hat!(src_face).remote_tokens.values() { + if token.context.is_some() && token.matches(res) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(token, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + )); + } + } + } + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for token in face_hat!(src_face).remote_tokens.values() { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(token, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + )); + } + } + } + } +} + +impl HatTokenTrait for HatCode { + fn declare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, + _node_id: NodeId, + _interest_id: Option, + ) { + declare_client_token(tables, face, id, res) + } + + fn undeclare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: Option>, + _node_id: NodeId, + ) -> Option> { + forget_client_token(tables, face, id, res) + } +} diff --git a/zenoh/src/net/routing/hat/router/interests.rs b/zenoh/src/net/routing/hat/router/interests.rs index a12201d7ad..f9f289bfa7 100644 --- a/zenoh/src/net/routing/hat/router/interests.rs +++ b/zenoh/src/net/routing/hat/router/interests.rs @@ -1,5 +1,5 @@ // -// Copyright (c) 2023 ZettaScale Technology +// Copyright (c) 2024 ZettaScale Technology // // This program and the accompanying materials are made available under the // terms of the Eclipse Public License 2.0 which is available at @@ -24,7 +24,8 @@ use zenoh_protocol::{ use zenoh_sync::get_mut_unchecked; use super::{ - face_hat_mut, pubsub::declare_sub_interest, queries::declare_qabl_interest, HatCode, HatFace, + face_hat_mut, pubsub::declare_sub_interest, queries::declare_qabl_interest, + token::declare_token_interest, HatCode, HatFace, }; use crate::net::routing::{ dispatcher::{ @@ -74,6 +75,16 @@ impl HatInterestTrait for HatCode { options.aggregate(), ) } + if options.tokens() { + declare_token_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + ) + } if mode.future() { face_hat_mut!(face) .remote_interests diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index c3f51eadba..94352ea77d 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -25,12 +25,13 @@ use std::{ time::Duration, }; +use token::{token_linkstate_change, token_remove_node, undeclare_client_token}; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ common::ZExtBody, core::ZenohIdProto, network::{ - declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, + declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId, TokenId}, interest::{InterestId, InterestOptions}, oam::id::OAM_LINKSTATE, Oam, @@ -68,6 +69,7 @@ mod interests; mod network; mod pubsub; mod queries; +mod token; macro_rules! hat { ($t:expr) => { @@ -118,6 +120,8 @@ use face_hat_mut; struct HatTables { router_subs: HashSet>, peer_subs: HashSet>, + router_tokens: HashSet>, + peer_tokens: HashSet>, router_qabls: HashSet>, peer_qabls: HashSet>, routers_net: Option, @@ -148,6 +152,8 @@ impl HatTables { peer_subs: HashSet::new(), router_qabls: HashSet::new(), peer_qabls: HashSet::new(), + router_tokens: HashSet::new(), + peer_tokens: HashSet::new(), routers_net: None, peers_net: None, shared_nodes: vec![], @@ -278,6 +284,7 @@ impl HatTables { tracing::trace!("Compute routes"); pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); queries::queries_tree_change(&mut tables, &new_childs, net_type); + token::token_tree_change(&mut tables, &new_childs, net_type); tracing::trace!("Computations completed"); match net_type { @@ -419,12 +426,20 @@ impl HatBaseTrait for HatCode { fn close_face(&self, tables: &TablesLock, face: &mut Arc) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); + let face = get_mut_unchecked(face); + let hat_face = match face.hat.downcast_mut::() { + Some(hate_face) => hate_face, + None => { + tracing::error!("Error downcasting face hat in close_face!"); + return; + } + }; - face_hat_mut!(face).remote_interests.clear(); - face_hat_mut!(face).local_subs.clear(); - face_hat_mut!(face).local_qabls.clear(); + hat_face.remote_interests.clear(); + hat_face.local_subs.clear(); + hat_face.local_qabls.clear(); + hat_face.local_tokens.clear(); - let face = get_mut_unchecked(face); for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); Resource::clean(res); @@ -437,13 +452,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for (_id, mut res) in face - .hat - .downcast_mut::() - .unwrap() - .remote_subs - .drain() - { + for (_id, mut res) in hat_face.remote_subs.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); @@ -465,13 +474,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for (_, mut res) in face - .hat - .downcast_mut::() - .unwrap() - .remote_qabls - .drain() - { + for (_, mut res) in hat_face.remote_qabls.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); @@ -491,6 +494,11 @@ impl HatBaseTrait for HatCode { qabls_matches.push(res); } } + + for (_id, mut res) in hat_face.remote_tokens.drain() { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_token(&mut wtables, &mut face_clone, &mut res); + } drop(wtables); let mut matches_data_routes = vec![]; @@ -550,6 +558,7 @@ impl HatBaseTrait for HatCode { { pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); + token_remove_node(tables, &removed_node.zid, WhatAmI::Router); } if hat!(tables).full_net(WhatAmI::Peer) { @@ -577,6 +586,7 @@ impl HatBaseTrait for HatCode { &removed_node.zid, WhatAmI::Peer, ); + token_remove_node(tables, &removed_node.zid, WhatAmI::Peer); } hat_mut!(tables).shared_nodes = shared_nodes( @@ -598,6 +608,11 @@ impl HatBaseTrait for HatCode { &updated_node.zid, &updated_node.links, ); + token_linkstate_change( + tables, + &updated_node.zid, + &updated_node.links, + ); } } } @@ -657,6 +672,7 @@ impl HatBaseTrait for HatCode { { pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); + token_remove_node(tables, &removed_node.zid, WhatAmI::Router); } if hat!(tables).full_net(WhatAmI::Peer) { @@ -679,6 +695,7 @@ impl HatBaseTrait for HatCode { { pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Peer); queries_remove_node(tables, &removed_node.zid, WhatAmI::Peer); + token_remove_node(tables, &removed_node.zid, WhatAmI::Peer); } hat_mut!(tables).shared_nodes = shared_nodes( @@ -766,6 +783,8 @@ struct HatContext { peer_subs: HashSet, router_qabls: HashMap, peer_qabls: HashMap, + router_tokens: HashSet, + peer_tokens: HashSet, } impl HatContext { @@ -775,6 +794,8 @@ impl HatContext { peer_subs: HashSet::new(), router_qabls: HashMap::new(), peer_qabls: HashMap::new(), + router_tokens: HashSet::new(), + peer_tokens: HashSet::new(), } } } @@ -787,6 +808,8 @@ struct HatFace { remote_subs: HashMap>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, + local_tokens: HashMap, TokenId>, + remote_tokens: HashMap>, } impl HatFace { @@ -799,6 +822,8 @@ impl HatFace { remote_subs: HashMap::new(), local_qabls: HashMap::new(), remote_qabls: HashMap::new(), + local_tokens: HashMap::new(), + remote_tokens: HashMap::new(), } } } diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index e13aade332..5c1ced2405 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -45,7 +45,7 @@ use crate::net::routing::{ }, hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, router::RoutesIndexes, - RoutingContext, PREFIX_LIVELINESS, + RoutingContext, }; #[inline] @@ -62,7 +62,10 @@ fn send_sourced_subscription_to_net_childs( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { let key_expr = Resource::decl_key(res, &mut someface); someface.primitives.send_declare(RoutingContext::with_expr( @@ -98,8 +101,7 @@ fn propagate_simple_subscription_to( src_face: &mut Arc, full_peer_net: bool, ) { - if (src_face.id != dst_face.id - || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) + if src_face.id != dst_face.id && !face_hat!(dst_face).local_subs.contains_key(res) && if full_peer_net { dst_face.whatami == WhatAmI::Client @@ -368,7 +370,10 @@ fn send_forget_sourced_subscription_to_net_childs( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { let wire_expr = Resource::decl_key(res, &mut someface); someface.primitives.send_declare(RoutingContext::with_expr( @@ -622,51 +627,49 @@ pub(super) fn undeclare_client_subscription( if client_subs.len() == 1 && !router_subs && !peer_subs { let mut face = &mut client_subs[0]; - if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); - } - for res in face_hat!(face) - .local_subs - .keys() - .cloned() - .collect::>>() - { - if !res.context().matches.iter().any(|m| { - m.upgrade().is_some_and(|m| { - m.context.is_some() - && (remote_client_subs(&m, face) - || remote_peer_subs(tables, &m) - || remote_router_subs(tables, &m)) - }) - }) { - if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); - } + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_subs(&m, face) + || remote_peer_subs(tables, &m) + || remote_router_subs(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); } } } @@ -731,10 +734,16 @@ pub(super) fn pubsub_tree_change( new_childs: &[Vec], net_type: WhatAmI, ) { + let net = match hat!(tables).get_net(net_type) { + Some(net) => net, + None => { + tracing::error!("Error accessing net in pubsub_tree_change!"); + return; + } + }; // propagate subs to new childs for (tree_sid, tree_childs) in new_childs.iter().enumerate() { if !tree_childs.is_empty() { - let net = hat!(tables).get_net(net_type).unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 9df58a32a5..5a89757c46 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -19,14 +19,13 @@ use std::{ use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; -use zenoh_buffers::ZBuf; use zenoh_protocol::{ core::{ key_expr::{ include::{Includer, DEFAULT_INCLUDER}, OwnedKeyExpr, }, - WhatAmI, WireExpr, ZenohIdProto, + WhatAmI, ZenohIdProto, }, network::{ declare::{ @@ -51,7 +50,7 @@ use crate::net::routing::{ }, hat::{CurrentFutureTrait, HatQueriesTrait, Sources}, router::RoutesIndexes, - RoutingContext, PREFIX_LIVELINESS, + RoutingContext, }; #[inline] @@ -202,7 +201,11 @@ fn send_sourced_queryable_to_net_childs( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { + if src_face + .as_ref() + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { let key_expr = Resource::decl_key(res, &mut someface); someface.primitives.send_declare(RoutingContext::with_expr( @@ -239,7 +242,10 @@ fn propagate_simple_queryable( for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); let current = face_hat!(dst_face).local_qabls.get(res); - if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) + if src_face + .as_ref() + .map(|src_face| dst_face.id != src_face.id) + .unwrap_or(true) && (current.is_none() || current.unwrap().1 != info) && face_hat!(dst_face) .remote_interests @@ -249,11 +255,14 @@ fn propagate_simple_queryable( dst_face.whatami == WhatAmI::Client } else { dst_face.whatami != WhatAmI::Router - && (src_face.is_none() - || src_face.as_ref().unwrap().whatami != WhatAmI::Peer - || dst_face.whatami != WhatAmI::Peer - || hat!(tables) - .failover_brokering(src_face.as_ref().unwrap().zid, dst_face.zid)) + && src_face + .as_ref() + .map(|src_face| { + src_face.whatami != WhatAmI::Peer + || dst_face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(src_face.zid, dst_face.zid) + }) + .unwrap_or(true) } { let id = current @@ -486,7 +495,10 @@ fn send_forget_sourced_queryable_to_net_childs( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { let wire_expr = Resource::decl_key(res, &mut someface); someface.primitives.send_declare(RoutingContext::with_expr( @@ -953,10 +965,16 @@ pub(super) fn queries_tree_change( new_childs: &[Vec], net_type: WhatAmI, ) { + let net = match hat!(tables).get_net(net_type) { + Some(net) => net, + None => { + tracing::error!("Error accessing net in queries_tree_change!"); + return; + } + }; // propagate qabls to new childs for (tree_sid, tree_childs) in new_childs.iter().enumerate() { if !tree_childs.is_empty() { - let net = hat!(tables).get_net(net_type).unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; @@ -1376,48 +1394,6 @@ impl HatQueriesTrait for HatCode { Arc::new(route) } - #[inline] - fn compute_local_replies( - &self, - tables: &Tables, - prefix: &Arc, - suffix: &str, - face: &Arc, - ) -> Vec<(WireExpr<'static>, ZBuf)> { - let mut result = vec![]; - // Only the first routing point in the query route - // should return the liveliness tokens - if face.whatami == WhatAmI::Client { - let key_expr = prefix.expr() + suffix; - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - tracing::warn!("Invalid KE reached the system: {}", e); - return result; - } - }; - if key_expr.starts_with(PREFIX_LIVELINESS) { - let res = Resource::get_resource(prefix, suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if (mres.context.is_some() - && (!res_hat!(mres).router_subs.is_empty() - || !res_hat!(mres).peer_subs.is_empty())) - || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) - { - result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); - } - } - } - } - result - } - fn get_query_routes_entries(&self, tables: &Tables) -> RoutesIndexes { get_routes_entries(tables) } diff --git a/zenoh/src/net/routing/hat/router/token.rs b/zenoh/src/net/routing/hat/router/token.rs new file mode 100644 index 0000000000..583a4dc336 --- /dev/null +++ b/zenoh/src/net/routing/hat/router/token.rs @@ -0,0 +1,1051 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::{atomic::Ordering, Arc}; + +use petgraph::graph::NodeIndex; +use zenoh_protocol::{ + core::{WhatAmI, ZenohIdProto}, + network::{ + declare::{common::ext::WireExprType, TokenId}, + ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareToken, UndeclareToken, + }, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{ + face_hat, face_hat_mut, get_peer, get_router, hat, hat_mut, network::Network, res_hat, + res_hat_mut, HatCode, HatContext, HatFace, HatTables, +}; +use crate::net::routing::{ + dispatcher::{face::FaceState, tables::Tables}, + hat::{CurrentFutureTrait, HatTokenTrait}, + router::{NodeId, Resource, SessionContext}, + RoutingContext, +}; + +#[inline] +fn send_sourced_token_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + routing_context: NodeId, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let key_expr = Resource::decl_key(res, &mut someface); + + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareToken(DeclareToken { + id: 0, // Sourced tokens do not use ids + wire_expr: key_expr, + }), + }, + res.expr(), + )); + } + } + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +#[inline] +fn propagate_simple_token_to( + tables: &mut Tables, + dst_face: &mut Arc, + res: &Arc, + src_face: &mut Arc, + full_peer_net: bool, +) { + if (src_face.id != dst_face.id || dst_face.whatami == WhatAmI::Client) + && !face_hat!(dst_face).local_tokens.contains_key(res) + && if full_peer_net { + dst_face.whatami == WhatAmI::Client + } else { + dst_face.whatami != WhatAmI::Router + && (src_face.whatami != WhatAmI::Peer + || dst_face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) + } + { + let matching_interests = face_hat!(dst_face) + .remote_interests + .values() + .filter(|(r, o)| o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true)) + .cloned() + .collect::>, InterestOptions)>>(); + + for (int_res, options) in matching_interests { + let res = if options.aggregate() { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_tokens.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + )); + } + } + } +} + +fn propagate_simple_token(tables: &mut Tables, res: &Arc, src_face: &mut Arc) { + let full_peer_net = hat!(tables).full_net(WhatAmI::Peer); + for mut dst_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + propagate_simple_token_to(tables, &mut dst_face, res, src_face, full_peer_net); + } +} + +fn propagate_sourced_token( + tables: &Tables, + res: &Arc, + src_face: Option<&Arc>, + source: &ZenohIdProto, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_sourced_token_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + src_face, + tree_sid.index() as NodeId, + ); + } else { + tracing::trace!( + "Propagating liveliness {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => tracing::error!( + "Error propagating token {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn register_router_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + router: ZenohIdProto, +) { + if !res_hat!(res).router_tokens.contains(&router) { + // Register router liveliness + { + res_hat_mut!(res).router_tokens.insert(router); + hat_mut!(tables).router_tokens.insert(res.clone()); + } + + // Propagate liveliness to routers + propagate_sourced_token(tables, res, Some(face), &router, WhatAmI::Router); + } + // Propagate liveliness to peers + if hat!(tables).full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { + register_peer_token(tables, face, res, tables.zid) + } + + // Propagate liveliness to clients + propagate_simple_token(tables, res, face); +} + +fn declare_router_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + router: ZenohIdProto, +) { + register_router_token(tables, face, res, router); +} + +fn register_peer_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + peer: ZenohIdProto, +) { + if !res_hat!(res).peer_tokens.contains(&peer) { + // Register peer liveliness + { + res_hat_mut!(res).peer_tokens.insert(peer); + hat_mut!(tables).peer_tokens.insert(res.clone()); + } + + // Propagate liveliness to peers + propagate_sourced_token(tables, res, Some(face), &peer, WhatAmI::Peer); + } +} + +fn declare_peer_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + peer: ZenohIdProto, +) { + register_peer_token(tables, face, res, peer); + let zid = tables.zid; + register_router_token(tables, face, res, zid); +} + +fn register_client_token( + _tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, +) { + // Register liveliness + { + let res = get_mut_unchecked(res); + match res.session_ctxs.get_mut(&face.id) { + Some(ctx) => { + if !ctx.token { + get_mut_unchecked(ctx).token = true; + } + } + None => { + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).token = true; + } + } + } + face_hat_mut!(face).remote_tokens.insert(id, res.clone()); +} + +fn declare_client_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, +) { + register_client_token(tables, face, id, res); + let zid = tables.zid; + register_router_token(tables, face, res, zid); +} + +#[inline] +fn remote_router_tokens(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .router_tokens + .iter() + .any(|peer| peer != &tables.zid) +} + +#[inline] +fn remote_peer_tokens(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .peer_tokens + .iter() + .any(|peer| peer != &tables.zid) +} + +#[inline] +fn client_tokens(res: &Arc) -> Vec> { + res.session_ctxs + .values() + .filter_map(|ctx| { + if ctx.token { + Some(ctx.face.clone()) + } else { + None + } + }) + .collect() +} + +#[inline] +fn remote_client_tokens(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.token) +} + +#[inline] +fn send_forget_sourced_token_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + routing_context: Option, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let wire_expr = Resource::decl_key(res, &mut someface); + + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context.unwrap_or(0), + }, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: 0, // Sourced tokens do not use ids + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + } + } + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +fn propagate_forget_simple_token(tables: &mut Tables, res: &Arc) { + for mut face in tables.faces.values().cloned() { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } else if face_hat!(face).remote_interests.values().any(|(r, o)| { + o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true) && !o.aggregate() + }) { + // Token has never been declared on this face. + // Send an Undeclare with a one shot generated id and a WireExpr ext. + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), + ext_wire_expr: WireExprType { + wire_expr: Resource::get_best_key(res, "", face.id), + }, + }), + }, + res.expr(), + )); + } + for res in face_hat!(&mut face) + .local_tokens + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_tokens(&m, &face) + || remote_peer_tokens(tables, &m) + || remote_router_tokens(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } else if face_hat!(face).remote_interests.values().any(|(r, o)| { + o.tokens() + && r.as_ref().map(|r| r.matches(&res)).unwrap_or(true) + && !o.aggregate() + }) { + // Token has never been declared on this face. + // Send an Undeclare with a one shot generated id and a WireExpr ext. + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), + ext_wire_expr: WireExprType { + wire_expr: Resource::get_best_key(&res, "", face.id), + }, + }), + }, + res.expr(), + )); + } + } + } + } +} + +fn propagate_forget_simple_token_to_peers(tables: &mut Tables, res: &Arc) { + if !hat!(tables).full_net(WhatAmI::Peer) + && res_hat!(res).router_tokens.len() == 1 + && res_hat!(res).router_tokens.contains(&tables.zid) + { + for mut face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if face.whatami == WhatAmI::Peer + && face_hat!(face).local_tokens.contains_key(res) + && !res.session_ctxs.values().any(|s| { + face.zid != s.face.zid + && s.token + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables).failover_brokering(s.face.zid, face.zid))) + }) + { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } + } +} + +fn propagate_forget_sourced_token( + tables: &Tables, + res: &Arc, + src_face: Option<&Arc>, + source: &ZenohIdProto, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_forget_sourced_token_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + src_face, + Some(tree_sid.index() as NodeId), + ); + } else { + tracing::trace!( + "Propagating forget token {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => tracing::error!( + "Error propagating forget token {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn unregister_router_token(tables: &mut Tables, res: &mut Arc, router: &ZenohIdProto) { + res_hat_mut!(res) + .router_tokens + .retain(|token| token != router); + + if res_hat!(res).router_tokens.is_empty() { + hat_mut!(tables) + .router_tokens + .retain(|token| !Arc::ptr_eq(token, res)); + + if hat_mut!(tables).full_net(WhatAmI::Peer) { + undeclare_peer_token(tables, None, res, &tables.zid.clone()); + } + propagate_forget_simple_token(tables, res); + } + + propagate_forget_simple_token_to_peers(tables, res); +} + +fn undeclare_router_token( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + router: &ZenohIdProto, +) { + if res_hat!(res).router_tokens.contains(router) { + unregister_router_token(tables, res, router); + propagate_forget_sourced_token(tables, res, face, router, WhatAmI::Router); + } +} + +fn forget_router_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + router: &ZenohIdProto, +) { + undeclare_router_token(tables, Some(face), res, router); +} + +fn unregister_peer_token(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { + res_hat_mut!(res).peer_tokens.retain(|token| token != peer); + + if res_hat!(res).peer_tokens.is_empty() { + hat_mut!(tables) + .peer_tokens + .retain(|token| !Arc::ptr_eq(token, res)); + } +} + +fn undeclare_peer_token( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + peer: &ZenohIdProto, +) { + if res_hat!(res).peer_tokens.contains(peer) { + unregister_peer_token(tables, res, peer); + propagate_forget_sourced_token(tables, res, face, peer, WhatAmI::Peer); + } +} + +fn forget_peer_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + peer: &ZenohIdProto, +) { + undeclare_peer_token(tables, Some(face), res, peer); + let client_tokens = res.session_ctxs.values().any(|ctx| ctx.token); + let peer_tokens = remote_peer_tokens(tables, res); + let zid = tables.zid; + if !client_tokens && !peer_tokens { + undeclare_router_token(tables, None, res, &zid); + } +} + +pub(super) fn undeclare_client_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, +) { + if !face_hat_mut!(face) + .remote_tokens + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).token = false; + } + + let mut client_tokens = client_tokens(res); + let router_tokens = remote_router_tokens(tables, res); + let peer_tokens = remote_peer_tokens(tables, res); + if client_tokens.is_empty() && !peer_tokens { + undeclare_router_token(tables, None, res, &tables.zid.clone()); + } else { + propagate_forget_simple_token_to_peers(tables, res); + } + + if client_tokens.len() == 1 && !router_tokens && !peer_tokens { + let mut face = &mut client_tokens[0]; + if face.whatami != WhatAmI::Client { + if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + for res in face_hat!(face) + .local_tokens + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_tokens(&m, face) + || remote_peer_tokens(tables, &m) + || remote_router_tokens(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } + } + } + } +} + +fn forget_client_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { + undeclare_client_token(tables, face, &mut res); + Some(res) + } else { + None + } +} + +pub(super) fn token_remove_node(tables: &mut Tables, node: &ZenohIdProto, net_type: WhatAmI) { + match net_type { + WhatAmI::Router => { + for mut res in hat!(tables) + .router_tokens + .iter() + .filter(|res| res_hat!(res).router_tokens.contains(node)) + .cloned() + .collect::>>() + { + unregister_router_token(tables, &mut res, node); + Resource::clean(&mut res) + } + } + WhatAmI::Peer => { + for mut res in hat!(tables) + .peer_tokens + .iter() + .filter(|res| res_hat!(res).peer_tokens.contains(node)) + .cloned() + .collect::>>() + { + unregister_peer_token(tables, &mut res, node); + let client_tokens = res.session_ctxs.values().any(|ctx| ctx.token); + let peer_tokens = remote_peer_tokens(tables, &res); + if !client_tokens && !peer_tokens { + undeclare_router_token(tables, None, &mut res, &tables.zid.clone()); + } + Resource::clean(&mut res) + } + } + _ => (), + } +} + +pub(super) fn token_tree_change( + tables: &mut Tables, + new_childs: &[Vec], + net_type: WhatAmI, +) { + let net = match hat!(tables).get_net(net_type) { + Some(net) => net, + None => { + tracing::error!("Error accessing net in token_tree_change!"); + return; + } + }; + // propagate tokens to new childs + for (tree_sid, tree_childs) in new_childs.iter().enumerate() { + if !tree_childs.is_empty() { + let tree_idx = NodeIndex::new(tree_sid); + if net.graph.contains_node(tree_idx) { + let tree_id = net.graph[tree_idx].zid; + + let tokens_res = match net_type { + WhatAmI::Router => &hat!(tables).router_tokens, + _ => &hat!(tables).peer_tokens, + }; + + for res in tokens_res { + let tokens = match net_type { + WhatAmI::Router => &res_hat!(res).router_tokens, + _ => &res_hat!(res).peer_tokens, + }; + for token in tokens { + if *token == tree_id { + send_sourced_token_to_net_childs( + tables, + net, + tree_childs, + res, + None, + tree_sid as NodeId, + ); + } + } + } + } + } + } +} + +pub(super) fn token_linkstate_change( + tables: &mut Tables, + zid: &ZenohIdProto, + links: &[ZenohIdProto], +) { + if let Some(src_face) = tables.get_face(zid).cloned() { + if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { + for res in face_hat!(src_face).remote_tokens.values() { + let client_tokens = res + .session_ctxs + .values() + .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.token); + if !remote_router_tokens(tables, res) && !client_tokens { + for ctx in get_mut_unchecked(&mut res.clone()) + .session_ctxs + .values_mut() + { + let dst_face = &mut get_mut_unchecked(ctx).face; + if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { + if let Some(id) = face_hat!(dst_face).local_tokens.get(res).cloned() { + let forget = !HatTables::failover_brokering_to(links, dst_face.zid) + && { + let ctx_links = hat!(tables) + .peers_net + .as_ref() + .map(|net| net.get_links(dst_face.zid)) + .unwrap_or_else(|| &[]); + res.session_ctxs.values().any(|ctx2| { + ctx2.face.whatami == WhatAmI::Peer + && ctx2.token + && HatTables::failover_brokering_to( + ctx_links, + ctx2.face.zid, + ) + }) + }; + if forget { + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + + face_hat_mut!(dst_face).local_tokens.remove(res); + } + } else if HatTables::failover_brokering_to(links, ctx.face.zid) { + let dst_face = &mut get_mut_unchecked(ctx).face; + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + )); + } + } + } + } + } + } + } +} + +pub(crate) fn declare_token_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, +) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = (!mode.future()).then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).router_tokens.iter().any(|token| { + token.context.is_some() + && token.matches(res) + && (remote_client_tokens(token, face) + || remote_peer_tokens(tables, token) + || remote_router_tokens(tables, token)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + res.expr(), + )); + } + } else { + for token in &hat!(tables).router_tokens { + if token.context.is_some() + && token.matches(res) + && (res_hat!(token) + .router_tokens + .iter() + .any(|r| *r != tables.zid) + || res_hat!(token).peer_tokens.iter().any(|r| *r != tables.zid) + || token.session_ctxs.values().any(|s| { + s.face.id != face.id + && s.token + && (s.face.whatami == WhatAmI::Client + || face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) + })) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(token, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + )); + } + } + } + } else { + for token in &hat!(tables).router_tokens { + if token.context.is_some() + && (res_hat!(token) + .router_tokens + .iter() + .any(|r| *r != tables.zid) + || res_hat!(token).peer_tokens.iter().any(|r| *r != tables.zid) + || token.session_ctxs.values().any(|s| { + s.token + && (s.face.whatami != WhatAmI::Peer + || face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(s.face.zid, face.zid)) + })) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(token, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + )); + } + } + } + } +} + +impl HatTokenTrait for HatCode { + fn declare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, + node_id: NodeId, + _interest_id: Option, + ) { + match face.whatami { + WhatAmI::Router => { + if let Some(router) = get_router(tables, face, node_id) { + declare_router_token(tables, face, res, router) + } + } + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(tables, face, node_id) { + declare_peer_token(tables, face, res, peer) + } + } else { + declare_client_token(tables, face, id, res) + } + } + _ => declare_client_token(tables, face, id, res), + } + } + + fn undeclare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: Option>, + node_id: NodeId, + ) -> Option> { + match face.whatami { + WhatAmI::Router => { + if let Some(mut res) = res { + if let Some(router) = get_router(tables, face, node_id) { + forget_router_token(tables, face, &mut res, &router); + Some(res) + } else { + None + } + } else { + None + } + } + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_token(tables, face, &mut res, &peer); + Some(res) + } else { + None + } + } else { + None + } + } else { + forget_client_token(tables, face, id) + } + } + _ => forget_client_token(tables, face, id), + } + } +} diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index 9601465326..f4a5e4c2da 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -32,8 +32,6 @@ use zenoh_protocol::{ use self::{dispatcher::face::Face, router::Resource}; use super::runtime; -pub(crate) static PREFIX_LIVELINESS: &str = "@/liveliness"; - pub(crate) struct RoutingContext { pub(crate) msg: Msg, pub(crate) inface: OnceCell, diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index d11ab24b5b..f13d1a7b95 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -23,6 +23,7 @@ use zenoh_protocol::core::{WhatAmI, ZenohIdProto}; use zenoh_result::ZResult; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast, TransportPeer}; +pub(crate) use super::dispatcher::token::*; pub use super::dispatcher::{pubsub::*, queries::*, resource::*}; use super::{ dispatcher::{ @@ -60,7 +61,6 @@ impl Router { }) } - #[allow(clippy::too_many_arguments)] pub fn init_link_state(&mut self, runtime: Runtime) { let ctrl_lock = zlock!(self.tables.ctrl_lock); let mut tables = zwrite!(self.tables.tables); diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 00d181311e..dbd850da24 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -11,63 +11,317 @@ // Contributors: // ZettaScale Zenoh Team, // -#![cfg(feature = "unstable")] -use std::time::Duration; - -use zenoh::{ - config, - prelude::*, - sample::{Sample, SampleKind}, -}; +#[cfg(feature = "unstable")] use zenoh_core::ztimeout; +#[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn zenoh_liveliness() { +async fn test_liveliness_subscriber_clique() { + use std::time::Duration; + + use zenoh::{config, prelude::*, sample::SampleKind}; + use zenoh_config::WhatAmI; + use zenoh_link::EndPoint; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); + const PEER1_ENDPOINT: &str = "tcp/localhost:47447"; + const LIVELINESS_KEYEXPR: &str = "test/liveliness/subscriber/clique"; + + zenoh_util::try_init_log_from_env(); + + let peer1 = { + let mut c = config::default(); + c.listen + .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (1) ZID: {}", s.zid()); + s + }; - let mut c1 = config::peer(); - c1.listen - .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) - .unwrap(); - c1.scouting.multicast.set_enabled(Some(false)).unwrap(); - let session1 = ztimeout!(zenoh::open(c1)).unwrap(); - let mut c2 = config::peer(); - c2.connect - .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) - .unwrap(); - c2.scouting.multicast.set_enabled(Some(false)).unwrap(); - let session2 = ztimeout!(zenoh::open(c2)).unwrap(); - - let sub = ztimeout!(session2 - .liveliness() - .declare_subscriber("zenoh_liveliness_test")) - .unwrap(); - - let token = ztimeout!(session1.liveliness().declare_token("zenoh_liveliness_test")).unwrap(); + let peer2 = { + let mut c = config::default(); + c.connect + .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (2) ZID: {}", s.zid()); + s + }; + let sub = ztimeout!(peer1.liveliness().declare_subscriber(LIVELINESS_KEYEXPR)).unwrap(); tokio::time::sleep(SLEEP).await; - let replies = ztimeout!(session2.liveliness().get("zenoh_liveliness_test")).unwrap(); - let sample: Sample = ztimeout!(replies.recv_async()) - .unwrap() - .into_result() - .unwrap(); + let token = ztimeout!(peer2.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); assert!(sample.kind() == SampleKind::Put); - assert!(sample.key_expr().as_str() == "zenoh_liveliness_test"); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); - assert!(ztimeout!(replies.recv_async()).is_err()); + drop(token); + tokio::time::sleep(SLEEP).await; let sample = ztimeout!(sub.recv_async()).unwrap(); + assert!(sample.kind() == SampleKind::Delete); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); +} + +#[cfg(feature = "unstable")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_query_clique() { + use std::time::Duration; + + use zenoh::{config, prelude::*, sample::SampleKind}; + use zenoh_config::WhatAmI; + use zenoh_link::EndPoint; + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const PEER1_ENDPOINT: &str = "tcp/localhost:47448"; + const LIVELINESS_KEYEXPR: &str = "test/liveliness/query/clique"; + + zenoh_util::try_init_log_from_env(); + + let peer1 = { + let mut c = config::default(); + c.listen + .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (1) ZID: {}", s.zid()); + s + }; + + let peer2 = { + let mut c = config::default(); + c.connect + .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (2) ZID: {}", s.zid()); + s + }; + + let _token = ztimeout!(peer1.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let get = ztimeout!(peer2.liveliness().get(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(get.recv_async()).unwrap().into_result().unwrap(); assert!(sample.kind() == SampleKind::Put); - assert!(sample.key_expr().as_str() == "zenoh_liveliness_test"); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); +} + +#[cfg(feature = "unstable")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_subscriber_brokered() { + use std::time::Duration; + + use zenoh::{config, prelude::*, sample::SampleKind}; + use zenoh_config::WhatAmI; + use zenoh_link::EndPoint; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const ROUTER_ENDPOINT: &str = "tcp/localhost:47449"; + const LIVELINESS_KEYEXPR: &str = "test/liveliness/subscriber/brokered"; + + zenoh_util::try_init_log_from_env(); + + let _router = { + let mut c = config::default(); + c.listen + .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Router)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Router ZID: {}", s.zid()); + s + }; + + let client1 = { + let mut c = config::default(); + c.connect + .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (1) ZID: {}", s.zid()); + s + }; + + let client2 = { + let mut c = config::default(); + c.connect + .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (2) ZID: {}", s.zid()); + s + }; + + let sub = ztimeout!(client1.liveliness().declare_subscriber(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let token = ztimeout!(client2.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert!(sample.kind() == SampleKind::Put); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); drop(token); + tokio::time::sleep(SLEEP).await; + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert!(sample.kind() == SampleKind::Delete); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); +} + +#[cfg(feature = "unstable")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_query_brokered() { + use std::time::Duration; + + use zenoh::{config, prelude::*, sample::SampleKind}; + use zenoh_config::WhatAmI; + use zenoh_link::EndPoint; + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const ROUTER_ENDPOINT: &str = "tcp/localhost:47450"; + const LIVELINESS_KEYEXPR: &str = "test/liveliness/query/brokered"; + + zenoh_util::try_init_log_from_env(); + + let _router = { + let mut c = config::default(); + c.listen + .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Router)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Router ZID: {}", s.zid()); + s + }; + + let client1 = { + let mut c = config::default(); + c.connect + .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (1) ZID: {}", s.zid()); + s + }; + + let client2 = { + let mut c = config::default(); + c.connect + .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (2) ZID: {}", s.zid()); + s + }; + + let _token = ztimeout!(client1.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let get = ztimeout!(client2.liveliness().get(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(get.recv_async()).unwrap().into_result().unwrap(); + assert!(sample.kind() == SampleKind::Put); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); +} + +#[cfg(feature = "unstable")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_subscriber_local() { + use std::time::Duration; + + use zenoh::{config, prelude::*, sample::SampleKind}; + use zenoh_config::WhatAmI; + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const LIVELINESS_KEYEXPR: &str = "test/liveliness/subscriber/local"; + + zenoh_util::try_init_log_from_env(); + + let peer = { + let mut c = config::default(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (1) ZID: {}", s.zid()); + s + }; + + let sub = ztimeout!(peer.liveliness().declare_subscriber(LIVELINESS_KEYEXPR)).unwrap(); tokio::time::sleep(SLEEP).await; - let replies = ztimeout!(session2.liveliness().get("zenoh_liveliness_test")).unwrap(); - assert!(ztimeout!(replies.recv_async()).is_err()); + let token = ztimeout!(peer.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert!(sample.kind() == SampleKind::Put); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); + + drop(token); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert!(sample.kind() == SampleKind::Delete); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); +} + +#[cfg(feature = "unstable")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_query_local() { + use std::time::Duration; + + use zenoh::{config, prelude::*, sample::SampleKind}; + use zenoh_config::WhatAmI; + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const LIVELINESS_KEYEXPR: &str = "test/liveliness/query/local"; + + zenoh_util::try_init_log_from_env(); + + let peer = { + let mut c = config::default(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (1) ZID: {}", s.zid()); + s + }; - assert!(replies.try_recv().is_err()); + let _token = ztimeout!(peer.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let get = ztimeout!(peer.liveliness().get(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(get.recv_async()).unwrap().into_result().unwrap(); + assert!(sample.kind() == SampleKind::Put); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); } From 2392bfe6cec3a7851b0c4d5f81b686d142c4dba4 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 18 Jun 2024 09:56:33 +0200 Subject: [PATCH 474/598] Remove unwraps --- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 6a8ebbc8e6..74b676ca80 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -98,9 +98,12 @@ fn propagate_simple_queryable_to( .remote_interests .values() .any(|(r, o)| o.queryables() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true))) - && (src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client) + && src_face + .as_ref() + .map(|src_face| { + src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client + }) + .unwrap_or(true) { let id = current .map(|c| c.0) From 811a5f6b95a3c5471cb8e9cd89dcd6b004f89947 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 18 Jun 2024 10:13:39 +0200 Subject: [PATCH 475/598] Add serialize/deserialize to ZBytesWriter/ZBytesReader --- zenoh/src/api/bytes.rs | 121 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index ef94d83116..aac17ea508 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -18,6 +18,7 @@ use std::{ string::FromUtf8Error, sync::Arc, }; +use uhlc::Timestamp; use unwrap_infallible::UnwrapInfallible; use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, @@ -183,6 +184,22 @@ impl ZBytes { #[derive(Debug)] pub struct ZBytesReader<'a>(ZBufReader<'a>); +impl ZBytesReader<'_> { + /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. + pub fn deserialize(&mut self) -> ZResult + where + T: TryFrom, + >::Error: Debug, + { + let codec = Zenoh080::new(); + let abuf: ZBuf = codec.read(&mut self.0).map_err(|e| zerror!("{:?}", e))?; + let apld = ZBytes::new(abuf); + + let a = T::try_from(apld).map_err(|e| zerror!("{:?}", e))?; + Ok(a) + } +} + impl std::io::Read for ZBytesReader<'_> { fn read(&mut self, buf: &mut [u8]) -> std::io::Result { std::io::Read::read(&mut self.0, buf) @@ -200,6 +217,22 @@ impl std::io::Seek for ZBytesReader<'_> { #[derive(Debug)] pub struct ZBytesWriter<'a>(ZBufWriter<'a>); +impl ZBytesWriter<'_> { + pub fn serialize(&mut self, t: T) -> ZResult<()> + where + T: TryInto, + >::Error: Debug, + { + let tpld: ZBytes = t.try_into().map_err(|e| zerror!("{:?}", e))?; + let codec = Zenoh080::new(); + codec + .write(&mut self.0, &tpld.0) + .map_err(|e| zerror!("{:?}", e))?; + + Ok(()) + } +} + impl std::io::Write for ZBytesWriter<'_> { fn write(&mut self, buf: &[u8]) -> std::io::Result { std::io::Write::write(&mut self.0, buf) @@ -1225,6 +1258,94 @@ impl<'s> TryFrom<&'s mut ZBytes> for Parameters<'s> { } } +// Timestamp +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: Timestamp) -> Self::Output { + ZSerde.serialize(&s) + } +} + +impl From for ZBytes { + fn from(t: Timestamp) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&Timestamp> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &Timestamp) -> Self::Output { + let codec = Zenoh080::new(); + let mut buffer = ZBuf::empty(); + let mut writer = buffer.writer(); + // SAFETY: we are serializing slices on a ZBuf, so serialization will never + // fail unless we run out of memory. In that case, Rust memory allocator + // will panic before the serializer has any chance to fail. + unsafe { + codec.write(&mut writer, s).unwrap_unchecked(); + } + ZBytes::from(buffer) + } +} + +impl From<&Timestamp> for ZBytes { + fn from(t: &Timestamp) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut Timestamp> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut Timestamp) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut Timestamp> for ZBytes { + fn from(t: &mut Timestamp) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, Timestamp> for ZSerde { + type Input = &'a ZBytes; + type Error = zenoh_buffers::reader::DidntRead; + + fn deserialize(self, v: Self::Input) -> Result { + let codec = Zenoh080::new(); + let mut reader = v.0.reader(); + let e: Timestamp = codec.read(&mut reader)?; + Ok(e) + } +} + +impl TryFrom for Timestamp { + type Error = zenoh_buffers::reader::DidntRead; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for Timestamp { + type Error = zenoh_buffers::reader::DidntRead; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for Timestamp { + type Error = zenoh_buffers::reader::DidntRead; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Encoding impl Serialize for ZSerde { type Output = ZBytes; From 5080d18a3211808f8721b050f80d657b7dfc1da0 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 18 Jun 2024 10:32:05 +0200 Subject: [PATCH 476/598] Add tests --- zenoh/src/api/bytes.rs | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index aac17ea508..27cfdc3e3f 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -2233,6 +2233,39 @@ mod tests { } basic(); + // WARN: test function body produces stack overflow, so I split it into subroutines + #[inline(never)] + fn reader_writer() { + let mut bytes = ZBytes::empty(); + let mut writer = bytes.writer(); + + let i1 = 1_u8; + let i2 = String::from("abcdef"); + let i3 = vec![2u8; 64]; + + println!("Write: {:?}", i1); + writer.serialize(i1).unwrap(); + println!("Write: {:?}", i2); + writer.serialize(&i2).unwrap(); + println!("Write: {:?}", i3); + writer.serialize(&i3).unwrap(); + + let mut reader = bytes.reader(); + let o1: u8 = reader.deserialize().unwrap(); + println!("Read: {:?}", o1); + let o2: String = reader.deserialize().unwrap(); + println!("Read: {:?}", o2); + let o3: Vec = reader.deserialize().unwrap(); + println!("Read: {:?}", o3); + + println!(); + + assert_eq!(i1, o1); + assert_eq!(i2, o2); + assert_eq!(i3, o3); + } + reader_writer(); + // SHM #[cfg(feature = "shared-memory")] { From eac2e5a05c66ebb5c45bdf3c303792514dbf094d Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 18 Jun 2024 12:09:17 +0200 Subject: [PATCH 477/598] Remove subscriber and query scopes (#1166) --- zenoh/src/api/liveliness.rs | 2 +- zenoh/src/api/query.rs | 7 --- zenoh/src/api/session.rs | 111 ++---------------------------------- zenoh/src/api/subscriber.rs | 2 - 4 files changed, 7 insertions(+), 115 deletions(-) diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 6e8cc30483..11cfc78918 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -545,7 +545,7 @@ where let session = self.session; let (callback, handler) = self.handler.into_handler(); session - .declare_liveliness_subscriber_inner(&key_expr, None, Locality::default(), callback) + .declare_liveliness_subscriber_inner(&key_expr, Locality::default(), callback) .map(|sub_state| Subscriber { subscriber: SubscriberInner { session, diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index e46d0a75ba..408be5514b 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -158,7 +158,6 @@ pub(crate) struct QueryState { pub(crate) nb_final: usize, pub(crate) key_expr: KeyExpr<'static>, pub(crate) parameters: Parameters<'static>, - pub(crate) scope: Option>, pub(crate) reception_mode: ConsolidationMode, pub(crate) replies: Option>, pub(crate) callback: Callback<'static, Reply>, @@ -195,7 +194,6 @@ impl QueryState { pub struct SessionGetBuilder<'a, 'b, Handler> { pub(crate) session: &'a Session, pub(crate) selector: ZResult>, - pub(crate) scope: ZResult>>, pub(crate) target: QueryTarget, pub(crate) consolidation: QueryConsolidation, pub(crate) qos: QoSBuilder, @@ -280,7 +278,6 @@ impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { let SessionGetBuilder { session, selector, - scope, target, consolidation, qos, @@ -295,7 +292,6 @@ impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { SessionGetBuilder { session, selector, - scope, target, consolidation, qos, @@ -367,7 +363,6 @@ impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { let SessionGetBuilder { session, selector, - scope, target, consolidation, qos, @@ -382,7 +377,6 @@ impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { SessionGetBuilder { session, selector, - scope, target, consolidation, qos, @@ -496,7 +490,6 @@ where .query( &key_expr, ¶meters, - &self.scope?, self.target, self.consolidation, self.qos.into(), diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 14e0899a55..e874cd2393 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -13,7 +13,7 @@ // use std::{ collections::HashMap, - convert::{TryFrom, TryInto}, + convert::TryInto, fmt, future::{IntoFuture, Ready}, ops::Deref, @@ -878,7 +878,6 @@ impl Session { SessionGetBuilder { session: self, selector, - scope: Ok(None), target: QueryTarget::DEFAULT, consolidation: QueryConsolidation::DEFAULT, qos: qos.into(), @@ -1080,7 +1079,6 @@ impl Session { pub(crate) fn declare_subscriber_inner( &self, key_expr: &KeyExpr, - scope: Option<&KeyExpr>, origin: Locality, callback: Callback<'static, Sample>, info: &SubscriberInfo, @@ -1088,16 +1086,11 @@ impl Session { let mut state = zwrite!(self.state); tracing::trace!("declare_subscriber({:?})", key_expr); let id = self.runtime.next_id(); - let key_expr = match scope { - Some(scope) => scope / key_expr, - None => key_expr.clone(), - }; let mut sub_state = SubscriberState { id, remote_id: id, key_expr: key_expr.clone().into_owned(), - scope: scope.map(|e| e.clone().into_owned()), origin, callback, }; @@ -1109,7 +1102,7 @@ impl Session { match state .aggregated_subscribers .iter() - .find(|s| s.includes(&key_expr)) + .find(|s| s.includes(key_expr)) { Some(join_sub) => { if let Some(joined_sub) = state @@ -1129,7 +1122,7 @@ impl Session { if let Some(twin_sub) = state .subscribers(SubscriberKind::Subscriber) .values() - .find(|s| s.origin != Locality::SessionLocal && s.key_expr == key_expr) + .find(|s| s.origin != Locality::SessionLocal && s.key_expr == *key_expr) { sub_state.remote_id = twin_sub.remote_id; None @@ -1390,23 +1383,17 @@ impl Session { pub(crate) fn declare_liveliness_subscriber_inner( &self, key_expr: &KeyExpr, - scope: Option<&KeyExpr>, origin: Locality, callback: Callback<'static, Sample>, ) -> ZResult> { let mut state = zwrite!(self.state); trace!("declare_liveliness_subscriber({:?})", key_expr); let id = self.runtime.next_id(); - let key_expr = match scope { - Some(scope) => scope / key_expr, - None => key_expr.clone(), - }; let sub_state = SubscriberState { id, remote_id: id, key_expr: key_expr.clone().into_owned(), - scope: scope.map(|e| e.clone().into_owned()), origin, callback, }; @@ -1659,34 +1646,7 @@ impl Session { if sub.origin == Locality::Any || (local == (sub.origin == Locality::SessionLocal)) { - match &sub.scope { - Some(scope) => { - if !res.key_expr.starts_with(&***scope) { - tracing::warn!( - "Received Data for `{}`, which didn't start with scope `{}`: don't deliver to scoped Subscriber.", - res.key_expr, - scope, - ); - } else { - match KeyExpr::try_from(&res.key_expr[(scope.len() + 1)..]) - { - Ok(key_expr) => callbacks.push(( - sub.callback.clone(), - key_expr.into_owned(), - )), - Err(e) => { - tracing::warn!( - "Error unscoping received Data for `{}`: {}", - res.key_expr, - e, - ); - } - } - } - } - None => callbacks - .push((sub.callback.clone(), res.key_expr.clone().into())), - }; + callbacks.push((sub.callback.clone(), res.key_expr.clone().into())); } } } @@ -1710,33 +1670,7 @@ impl Session { || (local == (sub.origin == Locality::SessionLocal))) && key_expr.intersects(&sub.key_expr) { - match &sub.scope { - Some(scope) => { - if !key_expr.starts_with(&***scope) { - tracing::warn!( - "Received Data for `{}`, which didn't start with scope `{}`: don't deliver to scoped Subscriber.", - key_expr, - scope, - ); - } else { - match KeyExpr::try_from(&key_expr[(scope.len() + 1)..]) { - Ok(key_expr) => callbacks.push(( - sub.callback.clone(), - key_expr.into_owned(), - )), - Err(e) => { - tracing::warn!( - "Error unscoping received Data for `{}`: {}", - key_expr, - e, - ); - } - } - } - } - None => callbacks - .push((sub.callback.clone(), key_expr.clone().into_owned())), - }; + callbacks.push((sub.callback.clone(), key_expr.clone().into_owned())); } } } @@ -1765,7 +1699,6 @@ impl Session { &self, key_expr: &KeyExpr<'_>, parameters: &Parameters<'_>, - scope: &Option>, target: QueryTarget, consolidation: QueryConsolidation, qos: QoS, @@ -1823,20 +1756,14 @@ impl Session { } }); - let key_expr = match scope { - Some(scope) => scope / key_expr, - None => key_expr.clone().into_owned(), - }; - tracing::trace!("Register query {} (nb_final = {})", qid, nb_final); let wexpr = key_expr.to_wire(self).to_owned(); state.queries.insert( qid, QueryState { nb_final, - key_expr, + key_expr: key_expr.clone().into_owned(), parameters: parameters.clone().into_owned(), - scope: scope.clone().map(|e| e.into_owned()), reception_mode: consolidation, replies: (consolidation != ConsolidationMode::None).then(HashMap::new), callback, @@ -2480,32 +2407,6 @@ impl Primitives for Session { ); return; } - let key_expr = match &query.scope { - Some(scope) => { - if !key_expr.starts_with(&***scope) { - tracing::warn!( - "Received Reply for `{}` from `{:?}, which didn't start with scope `{}`: dropping Reply.", - key_expr, - msg.ext_respid, - scope, - ); - return; - } - match KeyExpr::try_from(&key_expr[(scope.len() + 1)..]) { - Ok(key_expr) => key_expr, - Err(e) => { - tracing::warn!( - "Error unscoping received Reply for `{}` from `{:?}: {}", - key_expr, - msg.ext_respid, - e, - ); - return; - } - } - } - None => key_expr, - }; struct Ret { payload: ZBuf, diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 493df4a54c..79b4429876 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -37,7 +37,6 @@ pub(crate) struct SubscriberState { pub(crate) id: Id, pub(crate) remote_id: Id, pub(crate) key_expr: KeyExpr<'static>, - pub(crate) scope: Option>, pub(crate) origin: Locality, pub(crate) callback: Callback<'static, Sample>, } @@ -380,7 +379,6 @@ where session .declare_subscriber_inner( &key_expr, - None, self.origin, callback, &SubscriberInfo { From 61196700762efcc93c2b2bc44550ff94fbf7645d Mon Sep 17 00:00:00 2001 From: "ChenYing Kuo (CY)" Date: Tue, 18 Jun 2024 18:11:18 +0800 Subject: [PATCH 478/598] Remove the packages in plugins since it's re-exported now. (#1161) Signed-off-by: ChenYing Kuo --- Cargo.lock | 8 -------- plugins/zenoh-backend-example/Cargo.toml | 3 --- plugins/zenoh-backend-example/src/lib.rs | 3 +-- plugins/zenoh-plugin-example/Cargo.toml | 2 -- plugins/zenoh-plugin-example/src/lib.rs | 5 +++-- plugins/zenoh-plugin-rest/Cargo.toml | 2 -- plugins/zenoh-plugin-rest/examples/z_serve_sse.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 8 +++++--- plugins/zenoh-plugin-storage-manager/Cargo.toml | 1 - plugins/zenoh-plugin-storage-manager/src/lib.rs | 6 +++--- 10 files changed, 13 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a6f286aec3..81dc081b98 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5137,10 +5137,7 @@ dependencies = [ "serde_json", "tracing", "zenoh", - "zenoh-core", "zenoh-plugin-trait", - "zenoh-result", - "zenoh-util", "zenoh_backend_traits", ] @@ -5551,9 +5548,7 @@ dependencies = [ "serde_json", "tracing", "zenoh", - "zenoh-core", "zenoh-plugin-trait", - "zenoh-result", "zenoh-util", ] @@ -5580,8 +5575,6 @@ dependencies = [ "tracing", "zenoh", "zenoh-plugin-trait", - "zenoh-result", - "zenoh-util", ] [[package]] @@ -5607,7 +5600,6 @@ dependencies = [ "urlencoding", "zenoh", "zenoh-plugin-trait", - "zenoh-util", "zenoh_backend_traits", ] diff --git a/plugins/zenoh-backend-example/Cargo.toml b/plugins/zenoh-backend-example/Cargo.toml index e77ce51294..df505bd211 100644 --- a/plugins/zenoh-backend-example/Cargo.toml +++ b/plugins/zenoh-backend-example/Cargo.toml @@ -36,9 +36,6 @@ git-version = { workspace = true } tracing = { workspace = true } serde_json = { workspace = true } zenoh = { workspace = true, features = ["default"] } -zenoh-core = { workspace = true } zenoh-plugin-trait = { workspace = true } -zenoh-result = { workspace = true } -zenoh-util = { workspace = true } async-trait = { workspace = true } zenoh_backend_traits = { workspace = true } diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index 89fcd5afd5..bd64fd5024 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -15,14 +15,13 @@ use std::collections::{hash_map::Entry, HashMap}; use async_std::sync::RwLock; use async_trait::async_trait; -use zenoh::{internal::Value, key_expr::OwnedKeyExpr, time::Timestamp}; +use zenoh::{internal::Value, key_expr::OwnedKeyExpr, prelude::*, time::Timestamp}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, Volume, VolumeInstance, }; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin}; -use zenoh_result::ZResult; #[cfg(feature = "dynamic_plugin")] zenoh_plugin_trait::declare_plugin!(ExampleBackend); diff --git a/plugins/zenoh-plugin-example/Cargo.toml b/plugins/zenoh-plugin-example/Cargo.toml index 024c2fb6ef..bc52ee5fb2 100644 --- a/plugins/zenoh-plugin-example/Cargo.toml +++ b/plugins/zenoh-plugin-example/Cargo.toml @@ -47,6 +47,4 @@ zenoh = { workspace = true, features = [ "internal", "unstable", ] } -zenoh-core = { workspace = true } zenoh-plugin-trait = { workspace = true } -zenoh-result = { workspace = true } diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 4c55b415af..cbd84fb766 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -27,16 +27,17 @@ use futures::select; use tracing::{debug, info}; use zenoh::{ internal::{ + bail, plugins::{RunningPluginTrait, ZenohPlugin}, runtime::Runtime, + zlock, }, key_expr::{keyexpr, KeyExpr}, + prelude::ZResult, sample::Sample, session::SessionDeclarations, }; -use zenoh_core::zlock; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; -use zenoh_result::{bail, ZResult}; // The struct implementing the ZenohPlugin and ZenohPlugin traits pub struct ExamplePlugin {} diff --git a/plugins/zenoh-plugin-rest/Cargo.toml b/plugins/zenoh-plugin-rest/Cargo.toml index db1a0f747e..5f36b5bf34 100644 --- a/plugins/zenoh-plugin-rest/Cargo.toml +++ b/plugins/zenoh-plugin-rest/Cargo.toml @@ -53,8 +53,6 @@ zenoh = { workspace = true, features = [ "unstable", ] } zenoh-plugin-trait = { workspace = true } -zenoh-result = { workspace = true } -zenoh-util = { workspace = true } [build-dependencies] rustc_version = { workspace = true } diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 3823554bea..e39d7c28b2 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -35,7 +35,7 @@ if(typeof(EventSource) !== "undefined") { #[async_std::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let config = parse_args(); let key = keyexpr::new("demo/sse").unwrap(); diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 86742d67eb..107f241a87 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -29,17 +29,19 @@ use zenoh::{ bytes::ZBytes, encoding::Encoding, internal::{ + bail, plugins::{RunningPluginTrait, ZenohPlugin}, runtime::Runtime, + zerror, }, key_expr::{keyexpr, KeyExpr}, + prelude::*, query::{QueryConsolidation, Reply}, sample::{EncodingBuilderTrait, Sample, SampleKind}, selector::{Parameters, Selector, ZenohParameters}, session::{Session, SessionDeclarations}, }; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; -use zenoh_result::{bail, zerror, ZResult}; mod config; pub use config::Config; @@ -226,7 +228,7 @@ impl Plugin for RestPlugin { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); tracing::debug!("REST plugin {}", LONG_VERSION.as_str()); let runtime_conf = runtime.config().lock(); @@ -467,7 +469,7 @@ pub async fn run(runtime: Runtime, conf: Config) -> ZResult<()> { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let zid = runtime.zid().to_string(); let session = zenoh::session::init(runtime).await.unwrap(); diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index 8fc530125e..a6694108db 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -52,7 +52,6 @@ zenoh = { workspace = true, features = [ "unstable", ] } zenoh-plugin-trait = { workspace = true } -zenoh-util = { workspace = true } zenoh_backend_traits = { workspace = true } [build-dependencies] diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 9bff56e791..69557af614 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -37,6 +37,7 @@ use zenoh::{ zlock, LibLoader, }, key_expr::{keyexpr, KeyExpr}, + prelude::Wait, session::Session, }; use zenoh_backend_traits::{ @@ -49,7 +50,6 @@ use zenoh_plugin_trait::{ mod backends_mgt; use backends_mgt::*; -use zenoh::prelude::Wait; mod memory_backend; mod replica; @@ -69,7 +69,7 @@ impl Plugin for StoragesPlugin { type Instance = RunningPlugin; fn start(name: &str, runtime: &Self::StartArgs) -> ZResult { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); tracing::debug!("StorageManager plugin {}", Self::PLUGIN_VERSION); let config = { PluginConfig::try_from((name, runtime.config().lock().plugin(name).unwrap())) }?; @@ -102,7 +102,7 @@ impl StorageRuntimeInner { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let PluginConfig { name, backend_search_dirs, From 53276bcfc1212e657f3fce156fd800ac5c02d67d Mon Sep 17 00:00:00 2001 From: oteffahi <70609372+oteffahi@users.noreply.github.com> Date: Tue, 18 Jun 2024 12:15:25 +0200 Subject: [PATCH 479/598] Add certificate and user/password authentication methods to ACL filters (#1073) * adding test files * testing cert names * testing cert authn * adding basic authID functionality * remove secret files * add extensibility * add extensibility * add extensibility * adding type constraints * adding level abstraction for authentication info * adding username authentication * cleaning code * added cfg checks for auth_usrpwd * adding test files * fix error due to vsock * fix test error * access auth ids in acl interceptor * add authentication support in acl * added Subject * adding test files * add authn features with acl * remove error * add tests for tls and quic * add tests for user-password * remove format error * ignore tests without testfiles * remove shm test errors * remove typos * add testfiles for authn * fix testfiles for authn * Chore: Code format * Change port numbers to allow tests to run concurrently * Fix TLS and Quic test failures due to subsequent sessions on same port number * Format json configs * Remove unused deprecated dependency async-rustls * Chore: format list of cargo dependencies * Fix imports * Fix some styling and format * Fix feature usage * Remove unnecessary redefinition of RecvOpenSynOut * Remove unnecessary clones * Rewrite return value * Fix typo * Implement get_auth_ids for TransportUnicastLowlatency * Fix disabled access control for certain tests * Add lowlatency test * Remove unnecessary warnings * Check only if interfaces list is empty Other subject lists (usernames and cert_common_names) can be empty in the current config schema. * Merge branch 'dev/1.0.0' into authn/testing (squashed) * Move x509-parser dependency to workspace * Revert "Check only if interfaces list is empty" Misinterpreted the code logic when making this change. This reverts commit f4cc8181a70c942d887efca75e06089b819fda03. * Change LinkAuthIdBuilder to return Self instead of &mut Self * Add LinkAuthId::builder() * chore: Reorder tokio features * Add LinkAuthId::none() * Change LinkUnicastTrait.get_auth_identifier to return ref * Change get_cert_common_name(conn) parameter to ref * Add license header * Rename get_auth_identifier to get_auth_id * Rewrite unnecessary match blocs, add evaluation of get_interface_names_by_addr error * Use std::env::temp_dir() instead of hardcoded path in zenoh/authentication test * Change return type with auth_usrpwd feature * Lint test * Fix link-vsock implementation of get_auth_id * Fix authtests filepaths on Ubuntu * Update default config * Optimize collecting AuthIds from Links --------- Co-authored-by: snehilzs Co-authored-by: snehilzs --- Cargo.lock | 114 ++ Cargo.toml | 2 + DEFAULT_CONFIG.json5 | 9 + commons/zenoh-config/src/lib.rs | 4 + io/zenoh-link-commons/src/lib.rs | 3 + io/zenoh-link-commons/src/unicast.rs | 75 + io/zenoh-links/zenoh-link-quic/Cargo.toml | 5 +- io/zenoh-links/zenoh-link-quic/src/unicast.rs | 54 +- .../zenoh-link-serial/src/unicast.rs | 9 +- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 7 +- io/zenoh-links/zenoh-link-tls/Cargo.toml | 1 + io/zenoh-links/zenoh-link-tls/src/unicast.rs | 91 +- io/zenoh-links/zenoh-link-udp/src/unicast.rs | 9 +- .../zenoh-link-unixpipe/src/unix/unicast.rs | 9 +- .../zenoh-link-unixsock_stream/src/unicast.rs | 7 +- .../zenoh-link-vsock/src/unicast.rs | 7 +- io/zenoh-links/zenoh-link-ws/src/unicast.rs | 7 +- .../src/unicast/authentication.rs | 54 + .../src/unicast/establishment/accept.rs | 16 +- .../src/unicast/establishment/ext/auth/mod.rs | 25 +- .../unicast/establishment/ext/auth/usrpwd.rs | 8 +- .../src/unicast/establishment/open.rs | 4 + .../src/unicast/lowlatency/transport.rs | 16 + io/zenoh-transport/src/unicast/mod.rs | 16 +- .../src/unicast/transport_unicast_inner.rs | 1 + .../src/unicast/universal/transport.rs | 14 + zenoh/Cargo.toml | 1 + .../net/routing/interceptor/access_control.rs | 87 +- .../net/routing/interceptor/authorization.rs | 113 +- zenoh/tests/acl.rs | 6 +- zenoh/tests/authentication.rs | 1369 +++++++++++++++++ 31 files changed, 2051 insertions(+), 92 deletions(-) create mode 100644 io/zenoh-transport/src/unicast/authentication.rs create mode 100644 zenoh/tests/authentication.rs diff --git a/Cargo.lock b/Cargo.lock index 81dc081b98..74b0d39344 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -235,6 +235,45 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +[[package]] +name = "asn1-rs" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror", + "time 0.3.28", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] + [[package]] name = "async-attributes" version = "1.1.2" @@ -1004,6 +1043,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "der-parser" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + [[package]] name = "deranged" version = "0.3.8" @@ -1082,6 +1135,17 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" +[[package]] +name = "displaydoc" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] + [[package]] name = "dyn-clone" version = "1.0.13" @@ -2305,6 +2369,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "oid-registry" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" +dependencies = [ + "asn1-rs", +] + [[package]] name = "once_cell" version = "1.19.0" @@ -3149,6 +3222,15 @@ dependencies = [ "semver 1.0.18", ] +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + [[package]] name = "rustix" version = "0.37.25" @@ -3951,6 +4033,17 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] + [[package]] name = "system-configuration" version = "0.5.1" @@ -4073,6 +4166,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" dependencies = [ "deranged", + "itoa", "serde", "time-core", "time-macros 0.2.14", @@ -5048,6 +5142,23 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "x509-parser" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror", + "time 0.3.28", +] + [[package]] name = "yasna" version = "0.5.2" @@ -5084,6 +5195,7 @@ dependencies = [ "futures", "git-version", "lazy_static", + "once_cell", "ordered-float", "paste", "petgraph", @@ -5349,6 +5461,7 @@ dependencies = [ "tokio-util", "tracing", "webpki-roots", + "x509-parser", "zenoh-collections", "zenoh-config", "zenoh-core", @@ -5415,6 +5528,7 @@ dependencies = [ "tokio-util", "tracing", "webpki-roots", + "x509-parser", "zenoh-collections", "zenoh-config", "zenoh-core", diff --git a/Cargo.toml b/Cargo.toml index 935eacb328..37df73e66b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -117,6 +117,7 @@ lz4_flex = "0.11" nix = { version = "0.27", features = ["fs"] } num_cpus = "1.16.0" num-traits = { version = "0.2.17", default-features = false } +once_cell = "1.19.0" ordered-float = "4.1.1" panic-message = "0.3.0" paste = "1.0.12" @@ -178,6 +179,7 @@ validated_struct = "2.1.0" vec_map = "0.8.2" webpki-roots = "0.26.0" winapi = { version = "0.3.9", features = ["iphlpapi"] } +x509-parser = "0.16.0" z-serial = "0.2.3" zenoh-ext = { version = "0.11.0-dev", path = "zenoh-ext" } zenoh-shm = { version = "0.11.0-dev", path = "commons/zenoh-shm" } diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 0dd9f1283b..6a4df381f1 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -196,8 +196,17 @@ // "key_exprs": [ // "test/demo" // ], + // /// Subjects can be interfaces // "interfaces": [ // "lo0" + // ], + // /// Subjects can be cert_common_names when using TLS or Quic + // "cert_common_names": [ + // "example.zenoh.io" + // ], + // /// Subjects can be usernames when using user/password authentication + // "usernames": [ + // "zenoh-example" // ] // }, // ] diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 6b52e250b2..25a049fb68 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -106,6 +106,8 @@ pub struct DownsamplingItemConf { #[derive(Serialize, Debug, Deserialize, Clone)] pub struct AclConfigRules { pub interfaces: Option>, + pub cert_common_names: Option>, + pub usernames: Option>, pub key_exprs: Vec, pub actions: Vec, pub flows: Option>, @@ -126,6 +128,8 @@ pub struct PolicyRule { #[serde(rename_all = "snake_case")] pub enum Subject { Interface(String), + CertCommonName(String), + Username(String), } #[derive(Clone, Copy, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] diff --git a/io/zenoh-link-commons/src/lib.rs b/io/zenoh-link-commons/src/lib.rs index 5a41050e94..56d99806a2 100644 --- a/io/zenoh-link-commons/src/lib.rs +++ b/io/zenoh-link-commons/src/lib.rs @@ -50,6 +50,7 @@ pub struct Link { pub is_reliable: bool, pub is_streamed: bool, pub interfaces: Vec, + pub auth_identifier: LinkAuthId, } #[async_trait] @@ -78,6 +79,7 @@ impl From<&LinkUnicast> for Link { is_reliable: link.is_reliable(), is_streamed: link.is_streamed(), interfaces: link.get_interface_names(), + auth_identifier: link.get_auth_id().clone(), } } } @@ -98,6 +100,7 @@ impl From<&LinkMulticast> for Link { is_reliable: link.is_reliable(), is_streamed: false, interfaces: vec![], + auth_identifier: LinkAuthId::default(), } } } diff --git a/io/zenoh-link-commons/src/unicast.rs b/io/zenoh-link-commons/src/unicast.rs index add4c3a27b..62f39bf86c 100644 --- a/io/zenoh-link-commons/src/unicast.rs +++ b/io/zenoh-link-commons/src/unicast.rs @@ -20,6 +20,7 @@ use core::{ use std::net::SocketAddr; use async_trait::async_trait; +use serde::Serialize; use zenoh_protocol::{ core::{EndPoint, Locator}, transport::BatchSize, @@ -51,6 +52,7 @@ pub trait LinkUnicastTrait: Send + Sync { fn is_reliable(&self) -> bool; fn is_streamed(&self) -> bool; fn get_interface_names(&self) -> Vec; + fn get_auth_id(&self) -> &LinkAuthId; async fn write(&self, buffer: &[u8]) -> ZResult; async fn write_all(&self, buffer: &[u8]) -> ZResult<()>; async fn read(&self, buffer: &mut [u8]) -> ZResult; @@ -118,3 +120,76 @@ pub fn get_ip_interface_names(addr: &SocketAddr) -> Vec { } } } + +#[derive(Clone, Debug, Serialize, Hash, PartialEq, Eq)] +pub enum LinkAuthType { + Tls, + Quic, + None, +} + +#[derive(Clone, Debug, Serialize, Hash, PartialEq, Eq)] +pub struct LinkAuthId { + auth_type: LinkAuthType, + auth_value: Option, +} + +impl LinkAuthId { + pub const NONE: Self = Self { + auth_type: LinkAuthType::None, + auth_value: None, + }; + pub fn get_type(&self) -> &LinkAuthType { + &self.auth_type + } + pub fn get_value(&self) -> &Option { + &self.auth_value + } + pub fn builder() -> LinkAuthIdBuilder { + LinkAuthIdBuilder::new() + } +} + +impl Default for LinkAuthId { + fn default() -> Self { + LinkAuthId::NONE.clone() + } +} + +#[derive(Debug)] +pub struct LinkAuthIdBuilder { + pub auth_type: LinkAuthType, // HAS to be provided when building + pub auth_value: Option, // actual value added to the above type; is None for None type +} + +impl Default for LinkAuthIdBuilder { + fn default() -> Self { + Self::new() + } +} + +impl LinkAuthIdBuilder { + pub fn new() -> LinkAuthIdBuilder { + LinkAuthIdBuilder { + auth_type: LinkAuthType::None, + auth_value: None, + } + } + + pub fn auth_type(mut self, auth_type: LinkAuthType) -> Self { + self.auth_type = auth_type; + self + } + + pub fn auth_value(mut self, auth_value: Option) -> Self { + self.auth_value = auth_value; + self + } + + pub fn build(self) -> LinkAuthId { + LinkAuthId { + auth_type: self.auth_type.clone(), + auth_value: self.auth_value.clone(), + } + } +} diff --git a/io/zenoh-links/zenoh-link-quic/Cargo.toml b/io/zenoh-links/zenoh-link-quic/Cargo.toml index 63bfc1f839..265989b293 100644 --- a/io/zenoh-links/zenoh-link-quic/Cargo.toml +++ b/io/zenoh-links/zenoh-link-quic/Cargo.toml @@ -30,7 +30,7 @@ base64 = { workspace = true } futures = { workspace = true } quinn = { workspace = true } rustls-native-certs = { workspace = true } -rustls-pki-types = { workspace = true } +rustls-pki-types = { workspace = true } rustls-webpki = { workspace = true } secrecy = { workspace = true } tokio = { workspace = true, features = [ @@ -54,5 +54,6 @@ zenoh-sync = { workspace = true } zenoh-util = { workspace = true } # Lock due to quinn not supporting rustls 0.22 yet rustls = { version = "0.21", features = ["dangerous_configuration", "quic"] } -tokio-rustls = "0.24.1" rustls-pemfile = { version = "1" } +tokio-rustls = "0.24.1" +x509-parser = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index a3b2687b6f..812b3ad972 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -22,10 +22,11 @@ use std::{ use async_trait::async_trait; use tokio::sync::Mutex as AsyncMutex; use tokio_util::sync::CancellationToken; +use x509_parser::prelude::*; use zenoh_core::zasynclock; use zenoh_link_commons::{ - get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, - ListenersUnicastIP, NewLinkChannelSender, + get_ip_interface_names, LinkAuthId, LinkAuthType, LinkManagerUnicastTrait, LinkUnicast, + LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -46,6 +47,7 @@ pub struct LinkUnicastQuic { dst_locator: Locator, send: AsyncMutex, recv: AsyncMutex, + auth_identifier: LinkAuthId, } impl LinkUnicastQuic { @@ -55,6 +57,7 @@ impl LinkUnicastQuic { dst_locator: Locator, send: quinn::SendStream, recv: quinn::RecvStream, + auth_identifier: LinkAuthId, ) -> LinkUnicastQuic { // Build the Quic object LinkUnicastQuic { @@ -64,6 +67,7 @@ impl LinkUnicastQuic { dst_locator, send: AsyncMutex::new(send), recv: AsyncMutex::new(recv), + auth_identifier, } } } @@ -156,6 +160,11 @@ impl LinkUnicastTrait for LinkUnicastQuic { fn is_streamed(&self) -> bool { true } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &self.auth_identifier + } } impl Drop for LinkUnicastQuic { @@ -255,12 +264,15 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { .await .map_err(|e| zerror!("Can not create a new QUIC link bound to {}: {}", host, e))?; + let auth_id = get_cert_common_name(&quic_conn)?; + let link = Arc::new(LinkUnicastQuic::new( quic_conn, src_addr, endpoint.into(), send, recv, + auth_id.into(), )); Ok(LinkUnicast(link)) @@ -385,7 +397,10 @@ async fn accept_task( } }; + // Get Quic auth identifier let dst_addr = quic_conn.remote_address(); + let auth_id = get_cert_common_name(&quic_conn)?; + tracing::debug!("Accepted QUIC connection on {:?}: {:?}", src_addr, dst_addr); // Create the new link object let link = Arc::new(LinkUnicastQuic::new( @@ -394,6 +409,7 @@ async fn accept_task( Locator::new(QUIC_LOCATOR_PREFIX, dst_addr.to_string(), "")?, send, recv, + auth_id.into() )); // Communicate the new link to the initial transport manager @@ -418,3 +434,37 @@ async fn accept_task( } Ok(()) } + +fn get_cert_common_name(conn: &quinn::Connection) -> ZResult { + let mut auth_id = QuicAuthId { auth_value: None }; + if let Some(pi) = conn.peer_identity() { + let serv_certs = pi.downcast::>().unwrap(); + if let Some(item) = serv_certs.iter().next() { + let (_, cert) = X509Certificate::from_der(item.as_ref()).unwrap(); + let subject_name = cert + .subject + .iter_common_name() + .next() + .and_then(|cn| cn.as_str().ok()) + .unwrap(); + auth_id = QuicAuthId { + auth_value: Some(subject_name.to_string()), + }; + } + } + Ok(auth_id) +} + +#[derive(Debug, Clone)] +struct QuicAuthId { + auth_value: Option, +} + +impl From for LinkAuthId { + fn from(value: QuicAuthId) -> Self { + LinkAuthId::builder() + .auth_type(LinkAuthType::Quic) + .auth_value(value.auth_value.clone()) + .build() + } +} diff --git a/io/zenoh-links/zenoh-link-serial/src/unicast.rs b/io/zenoh-links/zenoh-link-serial/src/unicast.rs index ca4efacdc6..5711e5fe5c 100644 --- a/io/zenoh-links/zenoh-link-serial/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-serial/src/unicast.rs @@ -32,8 +32,8 @@ use tokio_util::sync::CancellationToken; use z_serial::ZSerial; use zenoh_core::{zasynclock, zasyncread, zasyncwrite}; use zenoh_link_commons::{ - ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, - NewLinkChannelSender, + ConstructibleLinkManagerUnicast, LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, + LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -212,6 +212,11 @@ impl LinkUnicastTrait for LinkUnicastSerial { fn is_streamed(&self) -> bool { false } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &LinkAuthId::NONE + } } impl fmt::Display for LinkUnicastSerial { diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index 79812c526e..1c42e805bb 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -20,7 +20,7 @@ use tokio::{ }; use tokio_util::sync::CancellationToken; use zenoh_link_commons::{ - get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, + get_ip_interface_names, LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; use zenoh_protocol::{ @@ -164,6 +164,11 @@ impl LinkUnicastTrait for LinkUnicastTcp { fn is_streamed(&self) -> bool { true } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &LinkAuthId::NONE + } } // // WARN: This sometimes causes timeout in routing test diff --git a/io/zenoh-links/zenoh-link-tls/Cargo.toml b/io/zenoh-links/zenoh-link-tls/Cargo.toml index 3025e3d7d7..e0f1c6b03d 100644 --- a/io/zenoh-links/zenoh-link-tls/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tls/Cargo.toml @@ -37,6 +37,7 @@ tokio = { workspace = true, features = ["fs", "io-util", "net", "sync"] } tokio-rustls = { workspace = true } tokio-util = { workspace = true, features = ["rt"] } tracing = { workspace = true } +x509-parser = { workspace = true } webpki-roots = { workspace = true } zenoh-collections = { workspace = true } zenoh-config = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 1ced1a26b1..188651d90d 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -21,10 +21,11 @@ use tokio::{ }; use tokio_rustls::{TlsAcceptor, TlsConnector, TlsStream}; use tokio_util::sync::CancellationToken; +use x509_parser::prelude::*; use zenoh_core::zasynclock; use zenoh_link_commons::{ - get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, - ListenersUnicastIP, NewLinkChannelSender, + get_ip_interface_names, LinkAuthId, LinkAuthType, LinkManagerUnicastTrait, LinkUnicast, + LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -37,6 +38,9 @@ use crate::{ TLS_ACCEPT_THROTTLE_TIME, TLS_DEFAULT_MTU, TLS_LINGER_TIMEOUT, TLS_LOCATOR_PREFIX, }; +#[derive(Default, Debug, PartialEq, Eq, Hash)] +pub struct TlsCommonName(String); + pub struct LinkUnicastTls { // The underlying socket as returned from the async-rustls library // NOTE: TlsStream requires &mut for read and write operations. This means @@ -56,6 +60,7 @@ pub struct LinkUnicastTls { // Make sure there are no concurrent read or writes write_mtx: AsyncMutex<()>, read_mtx: AsyncMutex<()>, + auth_identifier: LinkAuthId, } unsafe impl Send for LinkUnicastTls {} @@ -66,6 +71,7 @@ impl LinkUnicastTls { socket: TlsStream, src_addr: SocketAddr, dst_addr: SocketAddr, + auth_identifier: LinkAuthId, ) -> LinkUnicastTls { let (tcp_stream, _) = socket.get_ref(); // Set the TLS nodelay option @@ -99,6 +105,7 @@ impl LinkUnicastTls { dst_locator: Locator::new(TLS_LOCATOR_PREFIX, dst_addr.to_string(), "").unwrap(), write_mtx: AsyncMutex::new(()), read_mtx: AsyncMutex::new(()), + auth_identifier, } } @@ -189,6 +196,11 @@ impl LinkUnicastTrait for LinkUnicastTls { fn is_streamed(&self) -> bool { true } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &self.auth_identifier + } } impl Drop for LinkUnicastTls { @@ -282,9 +294,18 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastTls { e ) })?; + + let (_, tls_conn) = tls_stream.get_ref(); + let auth_identifier = get_server_cert_common_name(tls_conn)?; + let tls_stream = TlsStream::Client(tls_stream); - let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); + let link = Arc::new(LinkUnicastTls::new( + tls_stream, + src_addr, + dst_addr, + auth_identifier.into(), + )); Ok(LinkUnicast(link)) } @@ -383,9 +404,18 @@ async fn accept_task( } }; + // Get TLS auth identifier + let (_, tls_conn) = tls_stream.get_ref(); + let auth_identifier = get_client_cert_common_name(tls_conn)?; + tracing::debug!("Accepted TLS connection on {:?}: {:?}", src_addr, dst_addr); // Create the new link object - let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); + let link = Arc::new(LinkUnicastTls::new( + tls_stream, + src_addr, + dst_addr, + auth_identifier.into(), + )); // Communicate the new link to the initial transport manager if let Err(e) = manager.send_async(LinkUnicast(link)).await { @@ -409,3 +439,56 @@ async fn accept_task( Ok(()) } + +fn get_client_cert_common_name(tls_conn: &rustls::CommonState) -> ZResult { + if let Some(serv_certs) = tls_conn.peer_certificates() { + let (_, cert) = X509Certificate::from_der(serv_certs[0].as_ref())?; + let subject_name = &cert + .subject + .iter_common_name() + .next() + .and_then(|cn| cn.as_str().ok()) + .unwrap(); + + Ok(TlsAuthId { + auth_value: Some(subject_name.to_string()), + }) + } else { + Ok(TlsAuthId { auth_value: None }) + } +} + +fn get_server_cert_common_name(tls_conn: &rustls::ClientConnection) -> ZResult { + let serv_certs = tls_conn.peer_certificates().unwrap(); + let mut auth_id = TlsAuthId { auth_value: None }; + + // Need the first certificate in the chain so no need for looping + if let Some(item) = serv_certs.iter().next() { + let (_, cert) = X509Certificate::from_der(item.as_ref())?; + let subject_name = &cert + .subject + .iter_common_name() + .next() + .and_then(|cn| cn.as_str().ok()) + .unwrap(); + + auth_id = TlsAuthId { + auth_value: Some(subject_name.to_string()), + }; + return Ok(auth_id); + } + Ok(auth_id) +} + +struct TlsAuthId { + auth_value: Option, +} + +impl From for LinkAuthId { + fn from(value: TlsAuthId) -> Self { + LinkAuthId::builder() + .auth_type(LinkAuthType::Tls) + .auth_value(value.auth_value.clone()) + .build() + } +} diff --git a/io/zenoh-links/zenoh-link-udp/src/unicast.rs b/io/zenoh-links/zenoh-link-udp/src/unicast.rs index 79f980ca96..50f3af03ba 100644 --- a/io/zenoh-links/zenoh-link-udp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/unicast.rs @@ -24,8 +24,8 @@ use tokio::{net::UdpSocket, sync::Mutex as AsyncMutex}; use tokio_util::sync::CancellationToken; use zenoh_core::{zasynclock, zlock}; use zenoh_link_commons::{ - get_ip_interface_names, ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, - LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, + get_ip_interface_names, ConstructibleLinkManagerUnicast, LinkAuthId, LinkManagerUnicastTrait, + LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -224,6 +224,11 @@ impl LinkUnicastTrait for LinkUnicastUdp { fn is_streamed(&self) -> bool { false } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &LinkAuthId::NONE + } } impl fmt::Display for LinkUnicastUdp { diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index 1b30ceb553..5e61c7903b 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -36,8 +36,8 @@ use tokio_util::sync::CancellationToken; use unix_named_pipe::{create, open_write}; use zenoh_core::{zasyncread, zasyncwrite, ResolveFuture, Wait}; use zenoh_link_commons::{ - ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, - NewLinkChannelSender, + ConstructibleLinkManagerUnicast, LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, + LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -525,6 +525,11 @@ impl LinkUnicastTrait for UnicastPipe { fn is_streamed(&self) -> bool { true } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &LinkAuthId::NONE + } } impl fmt::Display for UnicastPipe { diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index cc7147c9e0..4795838ba3 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -27,7 +27,7 @@ use tokio_util::sync::CancellationToken; use uuid::Uuid; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ - LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, + LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -143,6 +143,11 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { fn is_streamed(&self) -> bool { true } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &LinkAuthId::NONE + } } impl Drop for LinkUnicastUnixSocketStream { diff --git a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs index 605f114173..6bbd627537 100644 --- a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs @@ -28,7 +28,7 @@ use tokio_vsock::{ }; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ - LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, + LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::{ core::{endpoint::Address, EndPoint, Locator}, @@ -189,6 +189,11 @@ impl LinkUnicastTrait for LinkUnicastVsock { fn is_streamed(&self) -> bool { true } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &LinkAuthId::NONE + } } impl fmt::Display for LinkUnicastVsock { diff --git a/io/zenoh-links/zenoh-link-ws/src/unicast.rs b/io/zenoh-links/zenoh-link-ws/src/unicast.rs index b671bf67f2..2b6424725a 100644 --- a/io/zenoh-links/zenoh-link-ws/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-ws/src/unicast.rs @@ -34,7 +34,7 @@ use tokio_tungstenite::{accept_async, tungstenite::Message, MaybeTlsStream, WebS use tokio_util::sync::CancellationToken; use zenoh_core::{zasynclock, zasyncread, zasyncwrite}; use zenoh_link_commons::{ - LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, + LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::{ core::{EndPoint, Locator}, @@ -226,6 +226,11 @@ impl LinkUnicastTrait for LinkUnicastWs { fn is_streamed(&self) -> bool { false } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &LinkAuthId::NONE + } } impl Drop for LinkUnicastWs { diff --git a/io/zenoh-transport/src/unicast/authentication.rs b/io/zenoh-transport/src/unicast/authentication.rs new file mode 100644 index 0000000000..0654085968 --- /dev/null +++ b/io/zenoh-transport/src/unicast/authentication.rs @@ -0,0 +1,54 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh_link::{LinkAuthId, LinkAuthType}; + +#[cfg(feature = "auth_usrpwd")] +use super::establishment::ext::auth::UsrPwdId; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum AuthId { + CertCommonName(String), + Username(String), + None, +} + +impl From for AuthId { + fn from(lid: LinkAuthId) -> Self { + match (lid.get_type(), lid.get_value()) { + (LinkAuthType::Tls | LinkAuthType::Quic, Some(auth_value)) => { + AuthId::CertCommonName(auth_value.clone()) + } + _ => AuthId::None, + } + } +} + +#[cfg(feature = "auth_usrpwd")] +impl From for AuthId { + fn from(user_password_id: UsrPwdId) -> Self { + match user_password_id.0 { + Some(username) => { + // Convert username from Vec to String + match std::str::from_utf8(&username) { + Ok(name) => AuthId::Username(name.to_owned()), + Err(e) => { + tracing::error!("Error in extracting username {}", e); + AuthId::None + } + } + } + None => AuthId::None, + } + } +} diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index ed4890d7d2..57a5eb1602 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -31,6 +31,8 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; +#[cfg(feature = "auth_usrpwd")] +use super::ext::auth::UsrPwdId; #[cfg(feature = "shared-memory")] use super::ext::shm::AuthSegment; #[cfg(feature = "shared-memory")] @@ -111,6 +113,8 @@ struct RecvOpenSynOut { other_whatami: WhatAmI, other_lease: Duration, other_initial_sn: TransportSn, + #[cfg(feature = "auth_usrpwd")] + other_auth_id: UsrPwdId, } // OpenAck @@ -486,11 +490,13 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { } // Extension Auth - #[cfg(feature = "transport_auth")] - self.ext_auth + #[cfg(feature = "auth_usrpwd")] + let user_password_id = self + .ext_auth .recv_open_syn((&mut state.link.ext_auth, open_syn.ext_auth)) .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; + .map_err(|e| (e, Some(close::reason::GENERIC)))? + .auth_id; // Extension MultiLink #[cfg(feature = "transport_multilink")] @@ -517,6 +523,8 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { other_whatami: cookie.whatami, other_lease: open_syn.lease, other_initial_sn: open_syn.initial_sn, + #[cfg(feature = "auth_usrpwd")] + other_auth_id: user_password_id, }; Ok((state, output)) } @@ -735,6 +743,8 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - false => None, }, is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), + #[cfg(feature = "auth_usrpwd")] + auth_id: osyn_out.other_auth_id, }; let a_config = TransportLinkUnicastConfig { diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs index 8d57434bc3..8b7125de6d 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs @@ -475,6 +475,11 @@ impl<'a> OpenFsm for &'a AuthFsm<'a> { /*************************************/ /* ACCEPT */ /*************************************/ +pub(crate) struct RecvOpenSynOut { + #[cfg(feature = "auth_usrpwd")] + pub(crate) auth_id: UsrPwdId, +} + #[async_trait] impl<'a> AcceptFsm for &'a AuthFsm<'a> { type Error = ZError; @@ -571,7 +576,9 @@ impl<'a> AcceptFsm for &'a AuthFsm<'a> { } type RecvOpenSynIn = (&'a mut StateAccept, Option); - type RecvOpenSynOut = (); + + type RecvOpenSynOut = RecvOpenSynOut; + async fn recv_open_syn( self, input: Self::RecvOpenSynIn, @@ -599,19 +606,27 @@ impl<'a> AcceptFsm for &'a AuthFsm<'a> { } } + #[cfg(feature = "auth_usrpwd")] + let auth_id: UsrPwdId; + #[cfg(feature = "auth_usrpwd")] { match (self.usrpwd.as_ref(), state.usrpwd.as_mut()) { (Some(e), Some(s)) => { let x = ztake!(exts, id::USRPWD); - e.recv_open_syn((s, ztryinto!(x, S))).await?; + let username = e.recv_open_syn((s, ztryinto!(x, S))).await?; + auth_id = UsrPwdId(Some(username)); + } + (None, None) => { + auth_id = UsrPwdId(None); } - (None, None) => {} _ => bail!("{S} Invalid UsrPwd configuration."), } } - - Ok(()) + Ok(RecvOpenSynOut { + #[cfg(feature = "auth_usrpwd")] + auth_id, + }) } type SendOpenAckIn = &'a StateAccept; diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs index be24337fad..22d7a86817 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs @@ -162,6 +162,8 @@ impl StateOpen { pub(crate) struct StateAccept { nonce: u64, } +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct UsrPwdId(pub Option>); impl StateAccept { pub(crate) fn new(prng: &mut R) -> Self @@ -406,7 +408,7 @@ impl<'a> AcceptFsm for &'a AuthUsrPwdFsm<'a> { } type RecvOpenSynIn = (&'a mut StateAccept, Option); - type RecvOpenSynOut = (); + type RecvOpenSynOut = Vec; //value of userid is returned if recvopensynout is processed as valid async fn recv_open_syn( self, input: Self::RecvOpenSynIn, @@ -436,8 +438,8 @@ impl<'a> AcceptFsm for &'a AuthUsrPwdFsm<'a> { if hmac != open_syn.hmac { bail!("{S} Invalid password."); } - - Ok(()) + let username = open_syn.user.to_owned(); + Ok(username) } type SendOpenAckIn = &'a StateAccept; diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index 2857d1cc75..9f6f2e61a7 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -32,6 +32,8 @@ use zenoh_result::ZResult; use super::ext::shm::AuthSegment; #[cfg(feature = "shared-memory")] use crate::shm::TransportShmConfig; +#[cfg(feature = "auth_usrpwd")] +use crate::unicast::establishment::ext::auth::UsrPwdId; use crate::{ common::batch::BatchConfig, unicast::{ @@ -644,6 +646,8 @@ pub(crate) async fn open_link( false => None, }, is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), + #[cfg(feature = "auth_usrpwd")] + auth_id: UsrPwdId(None), }; let o_config = TransportLinkUnicastConfig { diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index 749c5507aa..73d2e61398 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -32,6 +32,7 @@ use zenoh_result::{zerror, ZResult}; use crate::stats::TransportStats; use crate::{ unicast::{ + authentication::AuthId, link::{LinkUnicastWithOpenAck, TransportLinkUnicast}, transport_unicast_inner::{AddLinkResult, TransportUnicastTrait}, TransportConfigUnicast, @@ -187,6 +188,21 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { self.config.zid } + fn get_auth_ids(&self) -> Vec { + // Convert LinkUnicast auth id to AuthId + let mut auth_ids: Vec = vec![]; + let handle = tokio::runtime::Handle::current(); + let guard = + tokio::task::block_in_place(|| handle.block_on(async { zasyncread!(self.link) })); + if let Some(val) = guard.as_ref() { + auth_ids.push(val.link.get_auth_id().to_owned().into()); + } + // Convert usrpwd auth id to AuthId + #[cfg(feature = "auth_usrpwd")] + auth_ids.push(self.config.auth_id.clone().into()); + auth_ids + } + fn get_whatami(&self) -> WhatAmI { self.config.whatami } diff --git a/io/zenoh-transport/src/unicast/mod.rs b/io/zenoh-transport/src/unicast/mod.rs index 04162de10a..4539135fe9 100644 --- a/io/zenoh-transport/src/unicast/mod.rs +++ b/io/zenoh-transport/src/unicast/mod.rs @@ -11,15 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // +pub mod authentication; pub mod establishment; pub(crate) mod link; pub(crate) mod lowlatency; pub(crate) mod manager; -pub(crate) mod transport_unicast_inner; -pub(crate) mod universal; - #[cfg(feature = "test")] pub mod test_helpers; +pub(crate) mod transport_unicast_inner; +pub(crate) mod universal; use std::{ fmt, @@ -42,6 +42,9 @@ use self::transport_unicast_inner::TransportUnicastTrait; use super::{TransportPeer, TransportPeerEventHandler}; #[cfg(feature = "shared-memory")] use crate::shm::TransportShmConfig; +use crate::unicast::authentication::AuthId; +#[cfg(feature = "auth_usrpwd")] +use crate::unicast::establishment::ext::auth::UsrPwdId; /*************************************/ /* TRANSPORT UNICAST */ @@ -58,6 +61,8 @@ pub(crate) struct TransportConfigUnicast { #[cfg(feature = "shared-memory")] pub(crate) shm: Option, pub(crate) is_lowlatency: bool, + #[cfg(feature = "auth_usrpwd")] + pub(crate) auth_id: UsrPwdId, } /// [`TransportUnicast`] is the transport handler returned @@ -117,6 +122,11 @@ impl TransportUnicast { Ok(transport.get_links()) } + pub fn get_auth_ids(&self) -> ZResult> { + let transport = self.get_inner()?; + Ok(transport.get_auth_ids()) + } + #[inline(always)] pub fn schedule(&self, message: NetworkMessage) -> ZResult<()> { let transport = self.get_inner()?; diff --git a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs index c7821aac9c..0ea3b979ca 100644 --- a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs +++ b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs @@ -56,6 +56,7 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { fn get_whatami(&self) -> WhatAmI; fn get_callback(&self) -> Option>; fn get_links(&self) -> Vec; + fn get_auth_ids(&self) -> Vec; #[cfg(feature = "shared-memory")] fn is_shm(&self) -> bool; fn is_qos(&self) -> bool; diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index 6a122d258e..eab047460f 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -33,6 +33,7 @@ use crate::stats::TransportStats; use crate::{ common::priority::{TransportPriorityRx, TransportPriorityTx}, unicast::{ + authentication::AuthId, link::{LinkUnicastWithOpenAck, TransportLinkUnicastDirection}, transport_unicast_inner::{AddLinkResult, TransportUnicastTrait}, universal::link::TransportLinkUnicastUniversal, @@ -381,6 +382,19 @@ impl TransportUnicastTrait for TransportUnicastUniversal { zread!(self.links).iter().map(|l| l.link.link()).collect() } + fn get_auth_ids(&self) -> Vec { + // Convert LinkUnicast auth ids to AuthId + #[allow(unused_mut)] + let mut auth_ids: Vec = zread!(self.links) + .iter() + .map(|l| l.link.link.get_auth_id().to_owned().into()) + .collect(); + // Convert usrpwd auth id to AuthId + #[cfg(feature = "auth_usrpwd")] + auth_ids.push(self.config.auth_id.clone().into()); + auth_ids + } + /*************************************/ /* TX */ /*************************************/ diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 559220e734..5ed4664f69 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -114,6 +114,7 @@ zenoh-transport = { workspace = true } zenoh-util = { workspace = true } zenoh-runtime = { workspace = true } zenoh-task = { workspace = true } +once_cell = { workspace = true } [dev-dependencies] tokio = { workspace = true } diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index b237ac1b78..1e95104967 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -27,7 +27,10 @@ use zenoh_protocol::{ zenoh::{PushBody, RequestBody}, }; use zenoh_result::ZResult; -use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; +use zenoh_transport::{ + multicast::TransportMulticast, + unicast::{authentication::AuthId, TransportUnicast}, +}; use super::{ authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, @@ -38,18 +41,20 @@ pub struct AclEnforcer { enforcer: Arc, } #[derive(Clone, Debug)] -pub struct Interface { +pub struct AuthSubject { id: usize, name: String, } + struct EgressAclEnforcer { policy_enforcer: Arc, - interface_list: Vec, + subject: Vec, zid: ZenohIdProto, } + struct IngressAclEnforcer { policy_enforcer: Arc, - interface_list: Vec, + subject: Vec, zid: ZenohIdProto, } @@ -81,17 +86,35 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, transport: &TransportUnicast, ) -> (Option, Option) { + let mut authn_ids = vec![]; + if let Ok(ids) = transport.get_auth_ids() { + for auth_id in ids { + match auth_id { + AuthId::CertCommonName(name) => { + let subject = &Subject::CertCommonName(name.clone()); + if let Some(val) = self.enforcer.subject_map.get(subject) { + authn_ids.push(AuthSubject { id: *val, name }); + } + } + AuthId::Username(name) => { + let subject = &Subject::Username(name.clone()); + if let Some(val) = self.enforcer.subject_map.get(subject) { + authn_ids.push(AuthSubject { id: *val, name }); + } + } + AuthId::None => {} + } + } + } match transport.get_zid() { Ok(zid) => { - let mut interface_list: Vec = Vec::new(); match transport.get_links() { Ok(links) => { for link in links { - let enforcer = self.enforcer.clone(); for face in link.interfaces { let subject = &Subject::Interface(face.clone()); - if let Some(val) = enforcer.subject_map.get(subject) { - interface_list.push(Interface { + if let Some(val) = self.enforcer.subject_map.get(subject) { + authn_ids.push(AuthSubject { id: *val, name: face, }); @@ -106,23 +129,24 @@ impl InterceptorFactoryTrait for AclEnforcer { } let ingress_interceptor = Box::new(IngressAclEnforcer { policy_enforcer: self.enforcer.clone(), - interface_list: interface_list.clone(), zid, + subject: authn_ids.clone(), }); let egress_interceptor = Box::new(EgressAclEnforcer { policy_enforcer: self.enforcer.clone(), - interface_list: interface_list.clone(), zid, + subject: authn_ids, }); - match ( - self.enforcer.interface_enabled.ingress, - self.enforcer.interface_enabled.egress, - ) { - (true, true) => (Some(ingress_interceptor), Some(egress_interceptor)), - (true, false) => (Some(ingress_interceptor), None), - (false, true) => (None, Some(egress_interceptor)), - (false, false) => (None, None), - } + ( + self.enforcer + .interface_enabled + .ingress + .then_some(ingress_interceptor), + self.enforcer + .interface_enabled + .egress + .then_some(egress_interceptor), + ) } Err(e) => { tracing::error!("Failed to get zid with error :{}", e); @@ -218,6 +242,7 @@ impl InterceptorTrait for EgressAclEnforcer { fn compute_keyexpr_cache(&self, key_expr: &KeyExpr<'_>) -> Option> { Some(Box::new(key_expr.to_string())) } + fn intercept( &self, ctx: RoutingContext, @@ -283,15 +308,15 @@ impl InterceptorTrait for EgressAclEnforcer { } pub trait AclActionMethods { fn policy_enforcer(&self) -> Arc; - fn interface_list(&self) -> Vec; fn zid(&self) -> ZenohIdProto; fn flow(&self) -> InterceptorFlow; + fn authn_ids(&self) -> Vec; fn action(&self, action: Action, log_msg: &str, key_expr: &str) -> Permission { let policy_enforcer = self.policy_enforcer(); - let interface_list = self.interface_list(); + let authn_ids: Vec = self.authn_ids(); let zid = self.zid(); let mut decision = policy_enforcer.default_permission; - for subject in &interface_list { + for subject in &authn_ids { match policy_enforcer.policy_decision_point(subject.id, self.flow(), action, key_expr) { Ok(Permission::Allow) => { tracing::trace!( @@ -338,16 +363,17 @@ impl AclActionMethods for EgressAclEnforcer { self.policy_enforcer.clone() } - fn interface_list(&self) -> Vec { - self.interface_list.clone() - } - fn zid(&self) -> ZenohIdProto { self.zid } + fn flow(&self) -> InterceptorFlow { InterceptorFlow::Egress } + + fn authn_ids(&self) -> Vec { + self.subject.clone() + } } impl AclActionMethods for IngressAclEnforcer { @@ -355,14 +381,15 @@ impl AclActionMethods for IngressAclEnforcer { self.policy_enforcer.clone() } - fn interface_list(&self) -> Vec { - self.interface_list.clone() - } - fn zid(&self) -> ZenohIdProto { self.zid } + fn flow(&self) -> InterceptorFlow { InterceptorFlow::Ingress } + + fn authn_ids(&self) -> Vec { + self.subject.clone() + } } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 4ff36b1ce3..283a02248b 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -157,25 +157,21 @@ impl PolicyEnforcer { } else { // check for undefined values in rules and initialize them to defaults for (rule_offset, rule) in rules.iter_mut().enumerate() { - match rule.interfaces { - Some(_) => (), - None => { - tracing::warn!("ACL config interfaces list is empty. Applying rule #{} to all network interfaces", rule_offset); - if let Ok(all_interfaces) = - get_interface_names_by_addr(Ipv4Addr::UNSPECIFIED.into()) - { - rule.interfaces = Some(all_interfaces); - } - } + if rule.interfaces.is_none() { + tracing::warn!("ACL config interfaces list is empty. Applying rule #{} to all network interfaces", rule_offset); + rule.interfaces = + Some(get_interface_names_by_addr(Ipv4Addr::UNSPECIFIED.into())?); } - match rule.flows { - Some(_) => (), - None => { - tracing::warn!("ACL config flows list is empty. Applying rule #{} to both Ingress and Egress flows", rule_offset); - rule.flows = Some( - [InterceptorFlow::Ingress, InterceptorFlow::Egress].into(), - ); - } + if rule.flows.is_none() { + tracing::warn!("ACL config flows list is empty. Applying rule #{} to both Ingress and Egress flows", rule_offset); + rule.flows = + Some([InterceptorFlow::Ingress, InterceptorFlow::Egress].into()); + } + if rule.usernames.is_none() { + rule.usernames = Some(Vec::new()); + } + if rule.cert_common_names.is_none() { + rule.cert_common_names = Some(Vec::new()); } } let policy_information = self.policy_information_point(&rules)?; @@ -227,11 +223,8 @@ impl PolicyEnforcer { ) -> ZResult { let mut policy_rules: Vec = Vec::new(); for config_rule in config_rule_set { - // config validation + // Config validation let mut validation_err = String::new(); - if config_rule.interfaces.as_ref().unwrap().is_empty() { - validation_err.push_str("ACL config interfaces list is empty. "); - } if config_rule.actions.is_empty() { validation_err.push_str("ACL config actions list is empty. "); } @@ -244,6 +237,28 @@ impl PolicyEnforcer { if !validation_err.is_empty() { bail!("{}", validation_err); } + + // At least one must not be empty + let mut subject_validation_err: usize = 0; + validation_err = String::new(); + + if config_rule.interfaces.as_ref().unwrap().is_empty() { + subject_validation_err += 1; + validation_err.push_str("ACL config interfaces list is empty. "); + } + if config_rule.cert_common_names.as_ref().unwrap().is_empty() { + subject_validation_err += 1; + validation_err.push_str("ACL config certificate common names list is empty. "); + } + if config_rule.usernames.as_ref().unwrap().is_empty() { + subject_validation_err += 1; + validation_err.push_str("ACL config usernames list is empty. "); + } + + if subject_validation_err == 3 { + bail!("{}", validation_err); + } + for subject in config_rule.interfaces.as_ref().unwrap() { if subject.trim().is_empty() { bail!("found an empty interface value in interfaces list"); @@ -265,10 +280,52 @@ impl PolicyEnforcer { } } } + for subject in config_rule.cert_common_names.as_ref().unwrap() { + if subject.trim().is_empty() { + bail!("found an empty value in certificate common names list"); + } + for flow in config_rule.flows.as_ref().unwrap() { + for action in &config_rule.actions { + for key_expr in &config_rule.key_exprs { + if key_expr.trim().is_empty() { + bail!("found an empty key-expression value in key_exprs list"); + } + policy_rules.push(PolicyRule { + subject: Subject::CertCommonName(subject.clone()), + key_expr: key_expr.clone(), + action: *action, + permission: config_rule.permission, + flow: *flow, + }) + } + } + } + } + for subject in config_rule.usernames.as_ref().unwrap() { + if subject.trim().is_empty() { + bail!("found an empty value in usernames list"); + } + for flow in config_rule.flows.as_ref().unwrap() { + for action in &config_rule.actions { + for key_expr in &config_rule.key_exprs { + if key_expr.trim().is_empty() { + bail!("found an empty key-expression value in key_exprs list"); + } + policy_rules.push(PolicyRule { + subject: Subject::Username(subject.clone()), + key_expr: key_expr.clone(), + action: *action, + permission: config_rule.permission, + flow: *flow, + }) + } + } + } + } } let mut subject_map = SubjectMap::default(); let mut counter = 1; - //starting at 1 since 0 is the init value and should not match anything + // Starting at 1 since 0 is the init value and should not match anything for rule in policy_rules.iter() { if !subject_map.contains_key(&rule.subject) { subject_map.insert(rule.subject.clone(), counter); @@ -281,10 +338,9 @@ impl PolicyEnforcer { }) } - /* - checks each msg against the ACL ruleset for allow/deny - */ - + /** + * Check each msg against the ACL ruleset for allow/deny + */ pub fn policy_decision_point( &self, subject: usize, @@ -293,6 +349,9 @@ impl PolicyEnforcer { key_expr: &str, ) -> ZResult { let policy_map = &self.policy_map; + if policy_map.is_empty() { + return Ok(self.default_permission); + } match policy_map.get(&subject) { Some(single_policy) => { let deny_result = single_policy diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 6086a048ee..bbadd0dcf3 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -47,7 +47,7 @@ mod test { async fn get_basic_router_config() -> Config { let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec!["tcp/127.0.0.1:7447".parse().unwrap()]; + config.listen.endpoints = vec!["tcp/127.0.0.1:27447".parse().unwrap()]; config.scouting.multicast.set_enabled(Some(false)).unwrap(); config } @@ -59,9 +59,9 @@ mod test { async fn get_client_sessions() -> (Session, Session) { println!("Opening client sessions"); - let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); + let config = config::client(["tcp/127.0.0.1:27447".parse::().unwrap()]); let s01 = ztimeout!(zenoh::open(config)).unwrap(); - let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); + let config = config::client(["tcp/127.0.0.1:27447".parse::().unwrap()]); let s02 = ztimeout!(zenoh::open(config)).unwrap(); (s01, s02) } diff --git a/zenoh/tests/authentication.rs b/zenoh/tests/authentication.rs new file mode 100644 index 0000000000..f8dcf74bc4 --- /dev/null +++ b/zenoh/tests/authentication.rs @@ -0,0 +1,1369 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +mod test { + use std::{ + fs, + path::PathBuf, + sync::{Arc, Mutex}, + time::Duration, + }; + + use once_cell::sync::Lazy; + use tokio::runtime::Handle; + use zenoh::{ + config, + config::{EndPoint, WhatAmI}, + prelude::*, + Config, Session, + }; + use zenoh_core::{zlock, ztimeout}; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const KEY_EXPR: &str = "test/demo"; + const VALUE: &str = "zenoh"; + static TESTFILES_PATH: Lazy = Lazy::new(std::env::temp_dir); + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_authentication() { + zenoh_util::try_init_log_from_env(); + create_new_files(TESTFILES_PATH.to_path_buf()) + .await + .unwrap(); + println!("testfiles created successfully."); + + test_pub_sub_deny_then_allow_usrpswd().await; + test_pub_sub_allow_then_deny_usrpswd().await; + test_get_qbl_allow_then_deny_usrpswd().await; + test_get_qbl_deny_then_allow_usrpswd().await; + + test_pub_sub_deny_then_allow_tls(3774).await; + test_pub_sub_allow_then_deny_tls(3775).await; + test_get_qbl_allow_then_deny_tls(3776).await; + test_get_qbl_deny_then_allow_tls(3777).await; + + test_pub_sub_deny_then_allow_quic(3774, false).await; + test_pub_sub_allow_then_deny_quic(3775).await; + test_get_qbl_deny_then_allow_quic(3776).await; + test_get_qbl_allow_then_deny_quic(3777).await; + + // Test link AuthIds accessibility for lowlatency transport + test_pub_sub_deny_then_allow_quic(3778, true).await; + } + + #[allow(clippy::all)] + async fn create_new_files(certs_dir: std::path::PathBuf) -> std::io::Result<()> { + use std::io::prelude::*; + let ca_pem = b"-----BEGIN CERTIFICATE----- +MIIDiTCCAnGgAwIBAgIUO1x6LAlICgKs5+pYUTo4CughfKEwDQYJKoZIhvcNAQEL +BQAwVDELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G +A1UECgwIenMsIEluYy4xGDAWBgNVBAMMD3pzX3Rlc3Rfcm9vdF9jYTAeFw0yNDAz +MTExNDM0MjNaFw0yNTAzMTExNDM0MjNaMFQxCzAJBgNVBAYTAkZSMQswCQYDVQQI +DAJJRjELMAkGA1UEBwwCUFIxETAPBgNVBAoMCHpzLCBJbmMuMRgwFgYDVQQDDA96 +c190ZXN0X3Jvb3RfY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3 +pFWM+IJNsRCYHt1v/TliecppwVZV+ZHfFw9JKN9ev4K/fWHUiAOwp91MOLxbaYKd +C6dxW28YVGltoGz3kUZJZcJRQVso1jXv24Op4muOsiYXukLc4TU2F6dG1XqkLt5t +svsYAQFf1uK3//QZFVRBosJEn+jjiJ4XCvt49mnPRolp1pNKX0z31mZO6bSly6c9 +OVlJMjWpDCYSOuf6qZZ36fa9eSut2bRJIPY0QCsgnqYBTnIEhksS+3jy6Qt+QpLz +95pFdLbW/MW4XKpaDltyYkO6QrBekF6uWRlvyAHU+NqvXZ4F/3Z5l26qLuBcsLPJ +kyawkO+yNIDxORmQgMczAgMBAAGjUzBRMB0GA1UdDgQWBBThgotd9ws2ryEEaKp2 ++RMOWV8D7jAfBgNVHSMEGDAWgBThgotd9ws2ryEEaKp2+RMOWV8D7jAPBgNVHRMB +Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQA9QoPv78hGmvmqF4GZeqrOBKQB +N/H5wL7f8H6BXU/wpNo2nnWOJn3u37lT+zivAdGEv+x+GeKekcugKBCSluhBLpVb +VNXe4WwMm5FBuO2NRBN2nblTMm1kEO00nVk1/yNo4hI8mj7d4YLU62d7324osNpF +wHqu6B0/c99JeKRvODGswyff1i8rJ1jpcgk/JmHg7UQBHEIkn0cRR0f9W3Mxv6b5 +ZeowRe81neWNkC6IMiMmzA0iHGkhoUMA15qG1ZKOr1XR364LH5BfNNpzAWYwkvJs +0JFrrdw+rm+cRJWs55yiyCCs7pyg1IJkY/o8bifdCOUgIyonzffwREk3+kZR +-----END CERTIFICATE-----"; + + let client_side_pem = b"-----BEGIN CERTIFICATE----- +MIIDjDCCAnSgAwIBAgIUOi9jKILrOzfRNGIkQ48S90NehpkwDQYJKoZIhvcNAQEL +BQAwVDELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G +A1UECgwIenMsIEluYy4xGDAWBgNVBAMMD3pzX3Rlc3Rfcm9vdF9jYTAeFw0yNDAz +MTkxMTMxNDhaFw0yNTAzMTkxMTMxNDhaMFAxCzAJBgNVBAYTAkZSMQswCQYDVQQI +DAJJRjELMAkGA1UEBwwCUFIxETAPBgNVBAoMCHpzLCBJbmMuMRQwEgYDVQQDDAtj +bGllbnRfc2lkZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMzU2p1a +ly/1bi2TDZ8+Qlvk9/3KyHqrg2BGZUxB3Pj/lufDuYNwOHkss99wp8gzMsT28mD4 +y6X7nCgEN8WeHl+/xfLuGsWIBa1OOr6dz0qewoWFsor01cQ8+nwAKlgnz6IvHfkQ +OJZD/QYSdyn6c1AcIyS60vo4qMjyI4OVb1Dl4WpC4vCmWvDT0WjBZ5GckCnuQ8wS +wZ5MtPuMQf8kYX95ll7eBtDfEXF9Oja0l1/5SmlHuKyqDy4sIKovxtFHTqgb8PUc +yT33pUHOsBXruNBxl1MKq1outdMqcQknT6FAC+aVZ7bTlwhnH8p5Apn57g+dJYTI +9dCr1e2oK5NohhkCAwEAAaNaMFgwFgYDVR0RBA8wDYILY2xpZW50X3NpZGUwHQYD +VR0OBBYEFHDUYYfQacLj1tp49OG9NbPuL0N/MB8GA1UdIwQYMBaAFOGCi133Czav +IQRoqnb5Ew5ZXwPuMA0GCSqGSIb3DQEBCwUAA4IBAQB+nFAe6QyD2AaFdgrFOyEE +MeYb97sy9p5ylhMYyU62AYsIzzpTY74wBG78qYPIw3lAYzNcN0L6T6kBQ4lu6gFm +XB0SqCZ2AkwvV8tTlbLkZeoO6rONeke6c8cJsxYN7NiknDvTMrkTTgiyvbCWfEVX +Htnc4j/KzSBX3UjVcbPM3L/6KwMRw050/6RCiOIPFjTOCfTGoDx5fIyBk3ch/Plw +TkH2juHxX0/aCxr8hRE1v9+pXXlGnGoKbsDMLN9Aziu6xzdT/kD7BvyoM8rh7CE5 +ae7/R4sd13cZ2WGDPimqO0z1kItMOIdiYvk4DgOg+J8hZSkKT56erafdDa2LPBE6 +-----END CERTIFICATE-----"; + + let client_side_key = b"-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDM1NqdWpcv9W4t +kw2fPkJb5Pf9ysh6q4NgRmVMQdz4/5bnw7mDcDh5LLPfcKfIMzLE9vJg+Mul+5wo +BDfFnh5fv8Xy7hrFiAWtTjq+nc9KnsKFhbKK9NXEPPp8ACpYJ8+iLx35EDiWQ/0G +Encp+nNQHCMkutL6OKjI8iODlW9Q5eFqQuLwplrw09FowWeRnJAp7kPMEsGeTLT7 +jEH/JGF/eZZe3gbQ3xFxfTo2tJdf+UppR7isqg8uLCCqL8bRR06oG/D1HMk996VB +zrAV67jQcZdTCqtaLrXTKnEJJ0+hQAvmlWe205cIZx/KeQKZ+e4PnSWEyPXQq9Xt +qCuTaIYZAgMBAAECggEAAlqVVw7UEzLjtN4eX1S6tD3jvCzFBETdjgENF7TfjlR4 +lln9UyV6Xqkc+Y28vdwZwqHwW90sEPCc5ShUQD7+jBzi8FVcZSX4o7rVCbz8RXgg +1eI5EKf632YQflWNpwTxGcTnGCY/sjleil/yst6sDdD+9eR4OXQme2Wt8wyH8pLm +bf1OensGrFu3kJaPMOfP6jXnqEqkUPqmaCNW7+Ans8E+4J9oksRVPQJEuxwSjdJu +BlG50KKpl0XwZ/u/hkkj8/BlRDa62YMGJkFOwaaGUu2/0UU139XaJiMSPoL6t/BU +1H15dtW9liEtnHIssXMRzc9cg+xPgCs79ABXSZaFUQKBgQD4mH/DcEFwkZQcr08i +GUk0RE5arAqHui4eiujcPZVV6j/L7PHHmabKRPBlsndFP7KUCtvzNRmHq7JWDkpF +S36OE4e94CBYb0CIrO8OO5zl1vGAn5qa9ckefSFz9AMWW+hSuo185hFjt67BMaI0 +8CxfYDH+QY5D4JE5RhSwsOmiUQKBgQDS7qjq+MQKPHHTztyHK8IbAfEGlrBdCAjf +K1bDX2BdfbRJMZ+y8LgK5HxDPlNx2/VauBLsIyU1Zirepd8hOsbCVoK1fOq+T7bY +KdB1oqLK1Rq1sMBc26F24LBaZ3Pw5XgYEcvaOW0JFQ9Oc4VjcIXKjTNhobNOegfK +QDnw8fEtSQKBgQDrCuTh2GVHFZ3AcVCUoOvB60NaH4flRHcOkbARbHihvtWK7gC8 +A97bJ8tTnCWA5/TkXFAR54a36/K1wtUeJ38Evhp9wEdU1ftiPn/YKSzzcwLr5fu7 +v9/kX9MdWv0ASu2iKphUGwMeETG9oDwJaXvKwZ0DFOB59P3Z9RTi6qI7wQKBgQCp +uBZ6WgeDJPeBsaSHrpHUIU/KOV1WvaxFxR1evlNPZmG1sxQIat/rA8VoZbHGn3Ff +uVSgY/cAbGB6HYTXu+9JV0p8tTI8Ru+cJqjwvhe2lJmVL87X6HCWsluzoiIL5tcm +pssbn7E36ZYTTag6RsOgItUA7ZbUwiOafOsiD8o64QKBgE6nOkAfy5mbp7X+q9uD +J5y6IXpY/Oia/RwveLWFbI/aum4Nnhb6L9Y0XlrYjm4cJOchQyDR7FF6f4EuAiYb +wdxBbkxXpwXnfKCtNvMF/wZMvPVaS5HTQga8hXMrtlW6jtTJ4HmkTTB/MILAXVkJ +EHi+N70PcrYg6li415TGfgDz +-----END PRIVATE KEY-----"; + + let server_side_pem = b"-----BEGIN CERTIFICATE----- +MIIDjDCCAnSgAwIBAgIUOi9jKILrOzfRNGIkQ48S90NehpgwDQYJKoZIhvcNAQEL +BQAwVDELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G +A1UECgwIenMsIEluYy4xGDAWBgNVBAMMD3pzX3Rlc3Rfcm9vdF9jYTAeFw0yNDAz +MTkxMTMxMDRaFw0yNTAzMTkxMTMxMDRaMFAxCzAJBgNVBAYTAkZSMQswCQYDVQQI +DAJJRjELMAkGA1UEBwwCUFIxETAPBgNVBAoMCHpzLCBJbmMuMRQwEgYDVQQDDAtz +ZXJ2ZXJfc2lkZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKw4eKzt +T1inzuEIPBaPksWyjoD9n6uJx9jAQ2wRB6rXiAsXVLRSuczdGDpb1MwAqoIi6ozw +tzDRwkr58vUNaTCswxadlAmB44JEVYKZoublHjlVj5ygr0R4R5F2T9tIV+jpqZuK +HR4dHe8PiDCiWVzWvYwOLVKXQKSeaE2Z143ukVIJ85qmNykJ066AVhgWnIYSCR9c +s7WPBdTWAW3L4yNlast9hfvxdQNDs5AtUnJKfAX+7DylPAm8V7YjU1k9AtTNPbpy +kb9X97ErsB8891MmZaGZp0J6tnuucDkk0dlowMVvi2aUCsYoKF5DgGxtyVAeLhTP +70GenaLe2uwG8fMCAwEAAaNaMFgwFgYDVR0RBA8wDYILc2VydmVyX3NpZGUwHQYD +VR0OBBYEFBKms1sOw8nM/O5SN1EZIH+LsWaPMB8GA1UdIwQYMBaAFOGCi133Czav +IQRoqnb5Ew5ZXwPuMA0GCSqGSIb3DQEBCwUAA4IBAQA6H/sfm8YUn86+GwxNR9i9 +MCL7WHVRx3gS9ENK87+HtZNL2TVvhPJtupG3Pjgqi33FOHrM4rMUcWSZeCEycVgy +5cjimQLwfDljIBRQE6sem3gKf0obdWl5AlPDLTL/iKj5Su7NycrjZFYqkjZjn+58 +fe8lzHNeP/3RQTgjJ98lQI0bdzGDG1+QoxTgPEc77vgN0P4MHJYx2auz/7jYBqNJ +ko8nugIQsd4kOhmOIBUQ8aXkXFktSQIerEGB8uw5iF2cCdH/sTCvhzhxLb4IWo/O +0cAZ+Vs4FW3KUn/Y44yrVAWl1H6xdFsNXBqbzVEMzlt/RV3rH70RDCc20XhP+w+g +-----END CERTIFICATE-----"; + + let server_side_key = b"-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCsOHis7U9Yp87h +CDwWj5LFso6A/Z+ricfYwENsEQeq14gLF1S0UrnM3Rg6W9TMAKqCIuqM8Lcw0cJK ++fL1DWkwrMMWnZQJgeOCRFWCmaLm5R45VY+coK9EeEeRdk/bSFfo6ambih0eHR3v +D4gwollc1r2MDi1Sl0CknmhNmdeN7pFSCfOapjcpCdOugFYYFpyGEgkfXLO1jwXU +1gFty+MjZWrLfYX78XUDQ7OQLVJySnwF/uw8pTwJvFe2I1NZPQLUzT26cpG/V/ex +K7AfPPdTJmWhmadCerZ7rnA5JNHZaMDFb4tmlArGKCheQ4BsbclQHi4Uz+9Bnp2i +3trsBvHzAgMBAAECggEAUjpIS/CmkOLWYRVoczEr197QMYBnCyUm2TO7PU7IRWbR +GtKR6+MPuWPbHIoaCSlMQARhztdj8BhG1zuOKDi1/7qNDzA/rWZp9RmhZlDquamt +i5xxjEwgQuXW7fn6WO2qo5dlFtGT43vtfeYBlY7+cdhJ+iQOub9j6vWDQYHxrF7x +yM8xvNzomHThvLFzWXJV/nGjX5pqPraMmwJUW+MGX0YaEr6tClqsc1Kmxhs3iIUo +1JCqh3FpVu2i/mR9fdcQ0ONT/s1UHzy+1Bhmh3j2Fuk4+ZeLMfxTfFxk5U0BeMQY +sES3qmd+pG5iqPW+AmXy299G89jf5+1Q4J2Km5KOUQKBgQDidifoeknpi9hRHLLD +w/7KMMe8yYg3c3dv5p0iUQQ2pXd1lJIFQ+B2/D+hfOXhnN/iCDap89ll2LoQ2Q9L +38kQXH06HCM2q11RP0BEsZCG0CnluS+JVNnjs/ALi+yc4HSpzKPs3zXIC3dLOUbq +ov5Esa5h/RU6+NO+DH72TWTv6wKBgQDCryPKtOcLp1eqdwIBRoXdPZeUdZdnwT8+ +70DnC+YdOjFkqTbaoYE5ePa3ziGOZyTFhJbPgiwEdj9Ez1JSgqLLv5hBc4s6FigK +D7fOnn7Q7+al/kEW7+X5yoSl1bFuPCqGL1xxzxmpDY8Gf3nyZ+QGfWIenbk3nq12 +nTgINyWMGQKBgQDSrxBDxXl8EMGH/MYHQRGKs8UvSuMyi3bjoU4w/eSInno75qPO +yC5NJDJin9sSgar8E54fkSCBExdP01DayvC5CwLqDAFqvBTOIKU/A18tPP6tnRKv +lkQ8Bkxdwai47k07J4qeNa9IU/qA/mGOq2MZL6DHwvd8bMA5gFCh/rDYTwKBgAPm +gGASScK5Ao+evMKLyCjLkBrgVD026O542qMGYQDa5pxuq3Or4qvlGYRLM+7ncBwo +8OCNahZYzCGzyaFvjpVobEN7biGmyfyRngwcrsu+0q8mreUov0HG5etwoZJk0DFK +B58cGBaD+AaYTTgnDrF2l52naUuM+Uq0EahQeocZAoGBAMJEGUFyEdm1JATkNhBv +ruDzj07PCjdvq3lUJix2ZlKlabsi5V+oYxMmrUSU8Nkaxy6O+qETNRNWQeWbPQHL +IZx/qrP32PmWX0IVj3pbfKHQSpOKNGzL9xUJ/FIycZWyT3yGf24KBuJwIx7xSrRx +qNsoty1gY/y3n7SN/iMZo8lO +-----END PRIVATE KEY-----"; + + let credentials_txt = b"client1name:client1passwd +client2name:client2passwd"; + + struct Testfile<'a> { + name: &'a str, + value: &'a [u8], + } + + let test_files = vec![ + Testfile { + name: "ca.pem", + value: ca_pem, + }, + Testfile { + name: "clientsidekey.pem", + value: client_side_key, + }, + Testfile { + name: "clientside.pem", + value: client_side_pem, + }, + Testfile { + name: "serversidekey.pem", + value: server_side_key, + }, + Testfile { + name: "serverside.pem", + value: server_side_pem, + }, + Testfile { + name: "credentials.txt", + value: credentials_txt, + }, + ]; + for test_file in test_files { + let file_path = certs_dir.join(test_file.name); + let mut file = fs::File::create(&file_path)?; + file.write_all(test_file.value)?; + } + + Ok(()) + } + + async fn get_basic_router_config_tls(port: u16) -> Config { + let cert_path = TESTFILES_PATH.to_string_lossy(); + let mut config = config::default(); + config.set_mode(Some(WhatAmI::Router)).unwrap(); + config.listen.endpoints = vec![format!("tls/127.0.0.1:{}", port).parse().unwrap()]; + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "tls" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + }, + }, + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_server_private_key(Some(format!("{}/serversidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_server_certificate(Some(format!("{}/serverside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + config + } + async fn get_basic_router_config_quic(port: u16, lowlatency: bool) -> Config { + let cert_path = TESTFILES_PATH.to_string_lossy(); + let mut config = config::default(); + config.set_mode(Some(WhatAmI::Router)).unwrap(); + config.listen.endpoints = vec![format!("quic/127.0.0.1:{}", port).parse().unwrap()]; + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "quic" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + }, + }, + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_server_private_key(Some(format!("{}/serversidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_server_certificate(Some(format!("{}/serverside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + config.transport.unicast.set_lowlatency(lowlatency).unwrap(); + config + .transport + .unicast + .qos + .set_enabled(!lowlatency) + .unwrap(); + config + } + + async fn get_basic_router_config_usrpswd() -> Config { + let mut config = config::default(); + config.set_mode(Some(WhatAmI::Router)).unwrap(); + config.listen.endpoints = vec!["tcp/127.0.0.1:37447".parse().unwrap()]; + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config + .insert_json5( + "transport", + r#"{ + "auth": { + usrpwd: { + user: "routername", + password: "routerpasswd", + }, + }, + }"#, + ) + .unwrap(); + config + .transport + .auth + .usrpwd + .set_dictionary_file(Some(format!( + "{}/credentials.txt", + TESTFILES_PATH.to_string_lossy() + ))) + .unwrap(); + config + } + async fn close_router_session(s: Session) { + println!("Closing router session"); + ztimeout!(s.close()).unwrap(); + } + + async fn get_client_sessions_tls(port: u16) -> (Session, Session) { + let cert_path = TESTFILES_PATH.to_string_lossy(); + println!("Opening client sessions"); + let mut config = config::client([format!("tls/127.0.0.1:{}", port) + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "tls" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + } + } + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_client_private_key(Some(format!("{}/clientsidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_client_certificate(Some(format!("{}/clientside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); + + let mut config = config::client([format!("tls/127.0.0.1:{}", port) + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "tls" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + } + } + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_client_private_key(Some(format!("{}/clientsidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_client_certificate(Some(format!("{}/clientside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); + (s01, s02) + } + + async fn get_client_sessions_quic(port: u16, lowlatency: bool) -> (Session, Session) { + let cert_path = TESTFILES_PATH.to_string_lossy(); + println!("Opening client sessions"); + let mut config = config::client([format!("quic/127.0.0.1:{}", port) + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "quic" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + } + } + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_client_private_key(Some(format!("{}/clientsidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_client_certificate(Some(format!("{}/clientside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + config.transport.unicast.set_lowlatency(lowlatency).unwrap(); + config + .transport + .unicast + .qos + .set_enabled(!lowlatency) + .unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); + let mut config = config::client([format!("quic/127.0.0.1:{}", port) + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "quic" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + } + } + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_client_private_key(Some(format!("{}/clientsidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_client_certificate(Some(format!("{}/clientside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + config.transport.unicast.set_lowlatency(lowlatency).unwrap(); + config + .transport + .unicast + .qos + .set_enabled(!lowlatency) + .unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); + (s01, s02) + } + + async fn get_client_sessions_usrpswd() -> (Session, Session) { + println!("Opening client sessions"); + let mut config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "auth": { + usrpwd: { + user: "client1name", + password: "client1passwd", + }, + } + }"#, + ) + .unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); + let mut config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "auth": { + usrpwd: { + user: "client2name", + password: "client2passwd", + }, + } + }"#, + ) + .unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); + (s01, s02) + } + + async fn close_sessions(s01: Session, s02: Session) { + println!("Closing client sessions"); + ztimeout!(s01.close()).unwrap(); + ztimeout!(s02.close()).unwrap(); + } + + async fn test_pub_sub_deny_then_allow_tls(port: u16) { + println!("test_pub_sub_deny_then_allow_tls"); + + let mut config_router = get_basic_router_config_tls(port).await; + + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "permission": "allow", + "flows": ["ingress","egress"], + "actions": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (sub_session, pub_session) = get_client_sessions_tls(port).await; + { + let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + }) + .await + .unwrap(); + + tokio::time::sleep(SLEEP).await; + publisher.put(VALUE).await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + async fn test_pub_sub_allow_then_deny_tls(port: u16) { + println!("test_pub_sub_allow_then_deny_tls"); + let mut config_router = get_basic_router_config_tls(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "permission": "deny", + "flows": ["egress"], + "actions": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions_tls(port).await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + async fn test_get_qbl_deny_then_allow_tls(port: u16) { + println!("test_get_qbl_deny_then_allow_tls"); + + let mut config_router = get_basic_router_config_tls(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "permission": "allow", + "flows": ["egress","ingress"], + "actions": [ + "get", + "declare_queryable" + ], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_tls(port).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_eq!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + async fn test_get_qbl_allow_then_deny_tls(port: u16) { + println!("test_get_qbl_allow_then_deny_tls"); + + let mut config_router = get_basic_router_config_tls(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "permission": "deny", + "flows": ["egress"], + "actions": [ + "get", + "declare_queryable" + ], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_tls(port).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + async fn test_pub_sub_deny_then_allow_quic(port: u16, lowlatency: bool) { + println!("test_pub_sub_deny_then_allow_quic"); + + let mut config_router = get_basic_router_config_quic(port, lowlatency).await; + + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "permission": "allow", + "flows": ["ingress","egress"], + "actions": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (sub_session, pub_session) = get_client_sessions_quic(port, lowlatency).await; + { + let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + }) + .await + .unwrap(); + + tokio::time::sleep(SLEEP).await; + publisher.put(VALUE).await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + #[allow(unused)] + async fn test_pub_sub_allow_then_deny_quic(port: u16) { + println!("test_pub_sub_allow_then_deny_quic"); + + let mut config_router = get_basic_router_config_quic(port, false).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "permission": "deny", + "flows": ["egress"], + "actions": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions_quic(port, false).await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + #[allow(unused)] + async fn test_get_qbl_deny_then_allow_quic(port: u16) { + println!("test_get_qbl_deny_then_allow_quic"); + + let mut config_router = get_basic_router_config_quic(port, false).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "permission": "allow", + "flows": ["egress","ingress"], + "actions": [ + "get", + "declare_queryable"], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_quic(port, false).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_eq!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + #[allow(unused)] + async fn test_get_qbl_allow_then_deny_quic(port: u16) { + println!("test_get_qbl_allow_then_deny_quic"); + + let mut config_router = get_basic_router_config_quic(port, false).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": + [ + { + "permission": "deny", + "flows": ["egress"], + "actions": [ + "get", + "declare_queryable" + ], + "key_exprs": [ + "test/demo" + ], + "cert_common_names": [ + "client_side" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_quic(port, false).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + async fn test_pub_sub_deny_then_allow_usrpswd() { + println!("test_pub_sub_deny_then_allow_usrpswd"); + + let mut config_router = get_basic_router_config_usrpswd().await; + + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "permission": "allow", + "flows": ["ingress","egress"], + "actions": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + "usernames": [ + "client1name", + "client2name" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (sub_session, pub_session) = get_client_sessions_usrpswd().await; + { + let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + }) + .await + .unwrap(); + + tokio::time::sleep(SLEEP).await; + publisher.put(VALUE).await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + async fn test_pub_sub_allow_then_deny_usrpswd() { + println!("test_pub_sub_allow_then_deny_usrpswd"); + + let mut config_router = get_basic_router_config_usrpswd().await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "permission": "deny", + "flows": ["egress"], + "actions": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + "usernames": [ + "client1name", + "client2name" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions_usrpswd().await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + async fn test_get_qbl_deny_then_allow_usrpswd() { + println!("test_get_qbl_deny_then_allow_usrpswd"); + + let mut config_router = get_basic_router_config_usrpswd().await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "permission": "allow", + "flows": ["egress","ingress"], + "actions": [ + "get", + "declare_queryable" + ], + "key_exprs": [ + "test/demo" + ], + "usernames": [ + "client1name", + "client2name" + ] + }, + ] + }"#, + ) + .unwrap(); + + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_usrpswd().await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_eq!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + async fn test_get_qbl_allow_then_deny_usrpswd() { + println!("test_get_qbl_allow_then_deny_usrpswd"); + + let mut config_router = get_basic_router_config_usrpswd().await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "permission": "deny", + "flows": ["egress"], + "actions": [ + "get", + "declare_queryable" + ], + "key_exprs": [ + "test/demo" + ], + "usernames": [ + "client1name", + "client2name" + ] + }, + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_usrpswd().await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } +} From e9612346469503eee4d850f714b07205c7a8657a Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Tue, 18 Jun 2024 12:35:03 +0200 Subject: [PATCH 480/598] refactor: remove outdated `no_std` part of zenoh_util (#1164) * refactor: remove outdated `no_std` part of zenoh_util * fix: remove useless `const_format` dependency --- Cargo.lock | 3 +- commons/zenoh-util/Cargo.toml | 7 +-- .../zenoh-util/src/{std_only => }/ffi/mod.rs | 0 .../zenoh-util/src/{std_only => }/ffi/win.rs | 0 commons/zenoh-util/src/lib.rs | 59 ++++++++++++++----- .../src/{std_only => }/lib_loader.rs | 0 commons/zenoh-util/src/{std_only => }/log.rs | 0 .../zenoh-util/src/{std_only => }/net/mod.rs | 0 commons/zenoh-util/src/std_only/mod.rs | 35 ----------- .../src/{std_only => }/time_range.rs | 0 .../zenoh-util/src/{std_only => }/timer.rs | 0 plugins/zenoh-plugin-trait/Cargo.toml | 5 +- zenoh/Cargo.toml | 1 - 13 files changed, 50 insertions(+), 60 deletions(-) rename commons/zenoh-util/src/{std_only => }/ffi/mod.rs (100%) rename commons/zenoh-util/src/{std_only => }/ffi/win.rs (100%) rename commons/zenoh-util/src/{std_only => }/lib_loader.rs (100%) rename commons/zenoh-util/src/{std_only => }/log.rs (100%) rename commons/zenoh-util/src/{std_only => }/net/mod.rs (100%) delete mode 100644 commons/zenoh-util/src/std_only/mod.rs rename commons/zenoh-util/src/{std_only => }/time_range.rs (100%) rename commons/zenoh-util/src/{std_only => }/timer.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 74b0d39344..a6e0754166 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5188,7 +5188,6 @@ dependencies = [ "ahash", "async-trait", "base64 0.21.4", - "const_format", "event-listener 4.0.0", "flume", "form_urlencoded", @@ -5721,7 +5720,6 @@ dependencies = [ name = "zenoh-plugin-trait" version = "0.11.0-dev" dependencies = [ - "const_format", "libloading", "serde", "serde_json", @@ -5861,6 +5859,7 @@ version = "0.11.0-dev" dependencies = [ "async-std", "async-trait", + "const_format", "flume", "home", "humantime", diff --git a/commons/zenoh-util/Cargo.toml b/commons/zenoh-util/Cargo.toml index 48bbb17d04..e41433b85f 100644 --- a/commons/zenoh-util/Cargo.toml +++ b/commons/zenoh-util/Cargo.toml @@ -32,9 +32,7 @@ description = "Internal crate for zenoh." maintenance = { status = "actively-developed" } [features] -std = [] test = [] -default = ["std"] [dependencies] async-std = { workspace = true, features = ["default", "unstable"] } @@ -45,11 +43,12 @@ home = { workspace = true } humantime = { workspace = true } lazy_static = { workspace = true } libloading = { workspace = true } -tracing = {workspace = true} -tracing-subscriber = {workspace = true} +tracing = { workspace = true } +tracing-subscriber = { workspace = true } shellexpand = { workspace = true } zenoh-core = { workspace = true } zenoh-result = { workspace = true, features = ["default"] } +const_format = { workspace = true } [target.'cfg(windows)'.dependencies] winapi = { workspace = true } diff --git a/commons/zenoh-util/src/std_only/ffi/mod.rs b/commons/zenoh-util/src/ffi/mod.rs similarity index 100% rename from commons/zenoh-util/src/std_only/ffi/mod.rs rename to commons/zenoh-util/src/ffi/mod.rs diff --git a/commons/zenoh-util/src/std_only/ffi/win.rs b/commons/zenoh-util/src/ffi/win.rs similarity index 100% rename from commons/zenoh-util/src/std_only/ffi/win.rs rename to commons/zenoh-util/src/ffi/win.rs diff --git a/commons/zenoh-util/src/lib.rs b/commons/zenoh-util/src/lib.rs index 4b5da75548..7b5bb2e592 100644 --- a/commons/zenoh-util/src/lib.rs +++ b/commons/zenoh-util/src/lib.rs @@ -17,25 +17,54 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -#![cfg_attr(not(feature = "std"), no_std)] -extern crate alloc; -#[cfg_attr(feature = "std", macro_use)] -extern crate lazy_static; +use lazy_static::lazy_static; +pub mod ffi; +mod lib_loader; +pub mod net; +pub mod time_range; + +pub use lib_loader::*; +pub mod timer; +pub use timer::*; +pub mod log; +pub use log::*; + +/// The "ZENOH_HOME" environement variable name +pub const ZENOH_HOME_ENV_VAR: &str = "ZENOH_HOME"; + +const DEFAULT_ZENOH_HOME_DIRNAME: &str = ".zenoh"; + +/// Return the path to the ${ZENOH_HOME} directory (~/.zenoh by default). +pub fn zenoh_home() -> &'static std::path::Path { + use std::path::PathBuf; + lazy_static! { + static ref ROOT: PathBuf = { + if let Some(dir) = std::env::var_os(ZENOH_HOME_ENV_VAR) { + PathBuf::from(dir) + } else { + match home::home_dir() { + Some(mut dir) => { + dir.push(DEFAULT_ZENOH_HOME_DIRNAME); + dir + } + None => PathBuf::from(DEFAULT_ZENOH_HOME_DIRNAME), + } + } + }; + } + ROOT.as_path() +} + +#[doc(hidden)] +pub use const_format::concatcp as __concatcp; #[macro_export] macro_rules! concat_enabled_features { - (prefix = $prefix:literal, features = [$($feature:literal),*]) => { + (prefix = $prefix:literal, features = [$($feature:literal),* $(,)?]) => { { - use const_format::concatcp; - concatcp!("" $(, - if cfg!(feature = $feature) { concatcp!(" ", concatcp!($prefix, "/", $feature)) } else { "" } - )*) + $crate::__concatcp!($( + if cfg!(feature = $feature) { $crate::__concatcp!(" ", $prefix, "/", $feature) } else { "" } + ),*) } }; } - -#[cfg(feature = "std")] -mod std_only; - -#[cfg(feature = "std")] -pub use std_only::*; diff --git a/commons/zenoh-util/src/std_only/lib_loader.rs b/commons/zenoh-util/src/lib_loader.rs similarity index 100% rename from commons/zenoh-util/src/std_only/lib_loader.rs rename to commons/zenoh-util/src/lib_loader.rs diff --git a/commons/zenoh-util/src/std_only/log.rs b/commons/zenoh-util/src/log.rs similarity index 100% rename from commons/zenoh-util/src/std_only/log.rs rename to commons/zenoh-util/src/log.rs diff --git a/commons/zenoh-util/src/std_only/net/mod.rs b/commons/zenoh-util/src/net/mod.rs similarity index 100% rename from commons/zenoh-util/src/std_only/net/mod.rs rename to commons/zenoh-util/src/net/mod.rs diff --git a/commons/zenoh-util/src/std_only/mod.rs b/commons/zenoh-util/src/std_only/mod.rs deleted file mode 100644 index 1cb406374c..0000000000 --- a/commons/zenoh-util/src/std_only/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -pub mod ffi; -mod lib_loader; -pub mod net; -pub mod time_range; -pub use lib_loader::*; -pub mod timer; -pub use timer::*; -pub mod log; -pub use log::*; - -/// The "ZENOH_HOME" environement variable name -pub const ZENOH_HOME_ENV_VAR: &str = "ZENOH_HOME"; - -const DEFAULT_ZENOH_HOME_DIRNAME: &str = ".zenoh"; - -/// Return the path to the ${ZENOH_HOME} directory (~/.zenoh by default). -pub fn zenoh_home() -> &'static std::path::Path { - use std::path::PathBuf; - lazy_static! { - static ref ROOT: PathBuf = { - if let Some(dir) = std::env::var_os(ZENOH_HOME_ENV_VAR) { - PathBuf::from(dir) - } else { - match home::home_dir() { - Some(mut dir) => { - dir.push(DEFAULT_ZENOH_HOME_DIRNAME); - dir - } - None => PathBuf::from(DEFAULT_ZENOH_HOME_DIRNAME), - } - } - }; - } - ROOT.as_path() -} diff --git a/commons/zenoh-util/src/std_only/time_range.rs b/commons/zenoh-util/src/time_range.rs similarity index 100% rename from commons/zenoh-util/src/std_only/time_range.rs rename to commons/zenoh-util/src/time_range.rs diff --git a/commons/zenoh-util/src/std_only/timer.rs b/commons/zenoh-util/src/timer.rs similarity index 100% rename from commons/zenoh-util/src/std_only/timer.rs rename to commons/zenoh-util/src/timer.rs diff --git a/plugins/zenoh-plugin-trait/Cargo.toml b/plugins/zenoh-plugin-trait/Cargo.toml index b184f5f4e9..8a355f6e47 100644 --- a/plugins/zenoh-plugin-trait/Cargo.toml +++ b/plugins/zenoh-plugin-trait/Cargo.toml @@ -28,11 +28,10 @@ name = "zenoh_plugin_trait" [dependencies] libloading = { workspace = true } -tracing = {workspace = true} +tracing = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } zenoh-macros = { workspace = true } zenoh-result = { workspace = true } zenoh-util = { workspace = true } -zenoh-keyexpr = { workspace = true, features = ["internal", "unstable"] } -const_format = { workspace = true } \ No newline at end of file +zenoh-keyexpr = { workspace = true, features = ["internal", "unstable"] } \ No newline at end of file diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 5ed4664f69..7961c787eb 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -71,7 +71,6 @@ tokio-util = { workspace = true } ahash = { workspace = true } async-trait = { workspace = true } base64 = { workspace = true } -const_format = { workspace = true } event-listener = { workspace = true } flume = { workspace = true } form_urlencoded = { workspace = true } From 86490c17e2ad8977b14e98b33f7b42482f75c433 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Tue, 18 Jun 2024 14:30:11 +0200 Subject: [PATCH 481/598] feat: add enabled callback to `init_log_with_callback` (#1162) --- commons/zenoh-util/src/log.rs | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/commons/zenoh-util/src/log.rs b/commons/zenoh-util/src/log.rs index d9498b9cb3..67f1a45df7 100644 --- a/commons/zenoh-util/src/log.rs +++ b/commons/zenoh-util/src/log.rs @@ -13,7 +13,7 @@ // use std::{fmt, thread, thread::ThreadId}; -use tracing::{field::Field, span, Event, Subscriber}; +use tracing::{field::Field, span, Event, Metadata, Subscriber}; use tracing_subscriber::{ layer::{Context, SubscriberExt}, registry::LookupSpan, @@ -76,13 +76,21 @@ pub struct LogRecord { #[derive(Clone)] struct SpanFields(Vec<(&'static str, String)>); -struct Layer(F); +struct Layer { + enabled: Enabled, + callback: Callback, +} -impl tracing_subscriber::Layer for Layer +impl tracing_subscriber::Layer for Layer where S: Subscriber + for<'a> LookupSpan<'a>, - F: Fn(LogRecord) + 'static, + E: Fn(&Metadata) -> bool + 'static, + C: Fn(LogRecord) + 'static, { + fn enabled(&self, metadata: &Metadata<'_>, _: Context<'_, S>) -> bool { + (self.enabled)(metadata) + } + fn on_new_span(&self, attrs: &span::Attributes<'_>, id: &span::Id, ctx: Context<'_, S>) { let span = ctx.span(id).unwrap(); let mut extensions = span.extensions_mut(); @@ -92,6 +100,7 @@ where }); extensions.insert(SpanFields(fields)); } + fn on_record(&self, id: &span::Id, values: &span::Record<'_>, ctx: Context<'_, S>) { let span = ctx.span(id).unwrap(); let mut extensions = span.extensions_mut(); @@ -100,6 +109,7 @@ where fields.0.push((field.name(), format!("{value:?}"))) }); } + fn on_event(&self, event: &Event<'_>, ctx: Context<'_, S>) { let thread = thread::current(); let mut record = LogRecord { @@ -126,12 +136,15 @@ where record.attributes.push((field.name(), format!("{value:?}"))) } }); - self.0(record); + (self.callback)(record); } } -pub fn init_log_with_callback(cb: impl Fn(LogRecord) + Send + Sync + 'static) { - let subscriber = tracing_subscriber::registry().with(Layer(cb)); +pub fn init_log_with_callback( + enabled: impl Fn(&Metadata) -> bool + Send + Sync + 'static, + callback: impl Fn(LogRecord) + Send + Sync + 'static, +) { + let subscriber = tracing_subscriber::registry().with(Layer { enabled, callback }); let _ = tracing::subscriber::set_global_default(subscriber); } From e543dce904b1e4d01e0b7078974a7c9259e8295c Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 19 Jun 2024 10:16:25 +0200 Subject: [PATCH 482/598] Add remaining method to ZBytesReader (#1168) --- zenoh/src/api/bytes.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 27cfdc3e3f..d911fc8b65 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -22,7 +22,7 @@ use uhlc::Timestamp; use unwrap_infallible::UnwrapInfallible; use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, - reader::HasReader, + reader::{HasReader, Reader}, writer::HasWriter, ZBuf, ZBufReader, ZBufWriter, ZSlice, }; @@ -185,6 +185,16 @@ impl ZBytes { pub struct ZBytesReader<'a>(ZBufReader<'a>); impl ZBytesReader<'_> { + /// Returns the number of bytes that can still be read + pub fn remaining(&self) -> usize { + self.0.remaining() + } + + /// Returns true if no more bytes can be read + pub fn is_empty(&self) -> bool { + self.remaining() == 0 + } + /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. pub fn deserialize(&mut self) -> ZResult where From a612fd1d93e0dc832eaa9ce13b05e21e8b6013ad Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 19 Jun 2024 11:03:11 +0200 Subject: [PATCH 483/598] Merge/into/dev/1.0.0/from/main (#1169) * Add NOTE for LowLatency transport. (#1088) Signed-off-by: ChenYing Kuo * fix: Improve debug messages in `zenoh-transport` (#1090) * fix: Improve debug messages for failing RX/TX tasks * fix: Improve debug message for `accept_link` timeout * chore: Fix `clippy::redundant_pattern_matching` error * Improve pipeline backoff (#1097) * Yield task for backoff * Improve comments and error handling in backoff * Simplify pipeline pull * Consider backoff configuration * Add typos check to CI (#1065) * Fix typos * Add typos check to CI * Start link tx_task before notifying router (#1098) * Fix typos (#1110) * bump quinn & rustls (#1086) * bump quinn & rustls * fix ci windows check * add comments * Fix interface name scanning when listening on IP unspecified for TCP/TLS/QUIC/WS (#1123) Co-authored-by: Julien Enoch * Enable releasing from any branch (#1136) * Fix cargo clippy (#1145) * Release tables locks before propagating subscribers and queryables declarations to void dead locks (#1150) * Send simple sub and qabl declarations using a given function * Send simple sub and qabl declarations after releasing tables lock * Send simple sub and qabl declarations after releasing tables lock (missing places) * Update async-io * Update base64 dependency * Update event-listener dependency * Update jsonschema dependency * Update keyed-set dependency * Update console-subscriber dependency * Update pnet dependency * Update rcgen dependency * Update tokio-tungstenite dependency * Update thread-priority dependency * Fix typos * Fix typos * Add Unicode-3.0 to allowed licenses --------- Signed-off-by: ChenYing Kuo Co-authored-by: ChenYing Kuo (CY) Co-authored-by: Mahmoud Mazouz Co-authored-by: Luca Cominardi Co-authored-by: Tavo Annus Co-authored-by: JLer Co-authored-by: Julien Enoch --- .github/workflows/ci.yml | 12 +- .github/workflows/release.yml | 5 + Cargo.lock | 656 ++++++++++++------ Cargo.toml | 34 +- DEFAULT_CONFIG.json5 | 20 +- README.md | 6 +- _typos.toml | 12 + commons/zenoh-buffers/src/zbuf.rs | 2 +- commons/zenoh-buffers/src/zslice.rs | 2 +- commons/zenoh-codec/src/core/zint.rs | 4 +- commons/zenoh-config/src/connection_retry.rs | 2 +- commons/zenoh-config/src/include.rs | 2 +- commons/zenoh-config/src/lib.rs | 14 +- .../zenoh-keyexpr/src/key_expr/borrowed.rs | 12 +- .../zenoh-keyexpr/src/key_expr/format/mod.rs | 4 +- .../src/key_expr/intersect/classical.rs | 2 +- .../src/keyexpr_tree/arc_tree.rs | 4 +- .../src/keyexpr_tree/impls/keyed_set_impl.rs | 3 +- commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs | 2 +- .../src/keyexpr_tree/traits/mod.rs | 2 +- commons/zenoh-macros/src/lib.rs | 2 +- commons/zenoh-protocol/src/core/parameters.rs | 4 +- commons/zenoh-protocol/src/core/resolution.rs | 2 +- commons/zenoh-protocol/src/lib.rs | 2 +- .../zenoh-protocol/src/network/interest.rs | 2 +- commons/zenoh-protocol/src/transport/close.rs | 2 +- .../zenoh-protocol/src/transport/fragment.rs | 2 +- commons/zenoh-protocol/src/transport/frame.rs | 2 +- .../zenoh-protocol/src/transport/keepalive.rs | 2 +- commons/zenoh-protocol/src/transport/open.rs | 2 +- commons/zenoh-protocol/src/zenoh/err.rs | 2 +- commons/zenoh-protocol/src/zenoh/put.rs | 2 +- .../src/api/provider/shm_provider_backend.rs | 2 +- commons/zenoh-shm/src/api/provider/types.rs | 4 +- commons/zenoh-shm/src/lib.rs | 4 +- .../zenoh-shm/src/watchdog/periodic_task.rs | 6 +- commons/zenoh-shm/tests/common/mod.rs | 2 +- commons/zenoh-shm/tests/header.rs | 2 +- commons/zenoh-shm/tests/periodic_task.rs | 12 +- commons/zenoh-shm/tests/watchdog.rs | 2 +- commons/zenoh-sync/src/condition.rs | 6 +- commons/zenoh-util/src/lib.rs | 2 +- commons/zenoh-util/src/lib_loader.rs | 4 +- commons/zenoh-util/src/net/mod.rs | 12 +- commons/zenoh-util/src/time_range.rs | 4 +- commons/zenoh-util/src/timer.rs | 4 +- deny.toml | 1 + examples/README.md | 4 +- examples/examples/z_alloc_shm.rs | 8 +- examples/examples/z_sub_thr.rs | 8 +- io/zenoh-links/zenoh-link-quic/Cargo.toml | 12 +- io/zenoh-links/zenoh-link-quic/src/lib.rs | 1 - io/zenoh-links/zenoh-link-quic/src/unicast.rs | 54 +- io/zenoh-links/zenoh-link-quic/src/utils.rs | 158 ++--- io/zenoh-links/zenoh-link-quic/src/verify.rs | 42 -- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 9 + io/zenoh-links/zenoh-link-tls/src/unicast.rs | 9 + io/zenoh-links/zenoh-link-tls/src/utils.rs | 20 + io/zenoh-links/zenoh-link-udp/src/lib.rs | 2 +- .../zenoh-link-udp/src/multicast.rs | 2 +- io/zenoh-links/zenoh-link-udp/src/unicast.rs | 4 + .../zenoh-link-unixpipe/src/unix/unicast.rs | 20 +- .../zenoh-link-unixsock_stream/src/unicast.rs | 2 +- io/zenoh-links/zenoh-link-ws/src/unicast.rs | 11 +- io/zenoh-transport/src/common/batch.rs | 2 +- io/zenoh-transport/src/common/pipeline.rs | 85 ++- io/zenoh-transport/src/common/seq_num.rs | 4 +- io/zenoh-transport/src/manager.rs | 2 +- io/zenoh-transport/src/multicast/link.rs | 4 +- io/zenoh-transport/src/multicast/rx.rs | 12 +- .../src/unicast/establishment/accept.rs | 2 +- .../src/unicast/lowlatency/transport.rs | 6 +- io/zenoh-transport/src/unicast/manager.rs | 23 +- .../src/unicast/transport_unicast_inner.rs | 10 +- .../src/unicast/universal/link.rs | 4 +- .../src/unicast/universal/transport.rs | 10 +- io/zenoh-transport/tests/unicast_transport.rs | 2 +- plugins/zenoh-backend-traits/src/config.rs | 2 +- plugins/zenoh-backend-traits/src/lib.rs | 2 +- .../zenoh-plugin-storage-manager/src/lib.rs | 2 +- .../src/replica/storage.rs | 2 +- plugins/zenoh-plugin-trait/src/lib.rs | 4 +- plugins/zenoh-plugin-trait/src/vtable.rs | 2 +- zenoh-ext/examples/examples/README.md | 2 +- zenoh-ext/examples/examples/z_pub_cache.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh-ext/src/publication_cache.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 8 +- zenoh-ext/src/subscriber_ext.rs | 12 +- zenoh/src/api/builders/publisher.rs | 2 +- zenoh/src/api/bytes.rs | 2 +- zenoh/src/api/encoding.rs | 8 +- zenoh/src/api/info.rs | 2 +- zenoh/src/api/key_expr.rs | 6 +- zenoh/src/api/plugins.rs | 6 +- zenoh/src/api/publisher.rs | 2 +- zenoh/src/api/sample.rs | 2 +- zenoh/src/api/selector.rs | 2 +- zenoh/src/api/session.rs | 26 +- zenoh/src/lib.rs | 2 +- zenoh/src/net/primitives/demux.rs | 24 +- zenoh/src/net/routing/dispatcher/face.rs | 55 +- zenoh/src/net/routing/dispatcher/interests.rs | 50 +- zenoh/src/net/routing/dispatcher/pubsub.rs | 21 +- zenoh/src/net/routing/dispatcher/queries.rs | 24 +- zenoh/src/net/routing/dispatcher/resource.rs | 44 +- zenoh/src/net/routing/dispatcher/tables.rs | 11 +- zenoh/src/net/routing/dispatcher/token.rs | 21 +- zenoh/src/net/routing/hat/client/interests.rs | 32 +- zenoh/src/net/routing/hat/client/mod.rs | 31 +- zenoh/src/net/routing/hat/client/pubsub.rs | 138 ++-- zenoh/src/net/routing/hat/client/queries.rs | 123 ++-- zenoh/src/net/routing/hat/client/token.rs | 246 ++++--- .../routing/hat/linkstate_peer/interests.rs | 23 +- .../src/net/routing/hat/linkstate_peer/mod.rs | 39 +- .../net/routing/hat/linkstate_peer/network.rs | 27 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 373 +++++----- .../net/routing/hat/linkstate_peer/queries.rs | 332 +++++---- .../net/routing/hat/linkstate_peer/token.rs | 342 +++++---- zenoh/src/net/routing/hat/mod.rs | 26 +- .../src/net/routing/hat/p2p_peer/interests.rs | 17 +- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 40 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 317 +++++---- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 286 ++++---- zenoh/src/net/routing/hat/p2p_peer/token.rs | 356 ++++++---- zenoh/src/net/routing/hat/router/interests.rs | 23 +- zenoh/src/net/routing/hat/router/mod.rs | 102 ++- zenoh/src/net/routing/hat/router/network.rs | 27 +- zenoh/src/net/routing/hat/router/pubsub.rs | 455 +++++++----- zenoh/src/net/routing/hat/router/queries.rs | 464 +++++++------ zenoh/src/net/routing/hat/router/token.rs | 505 ++++++++------ .../net/routing/interceptor/downsampling.rs | 4 +- zenoh/src/net/routing/router.rs | 22 +- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/net/runtime/mod.rs | 12 +- zenoh/src/net/runtime/orchestrator.rs | 8 +- zenoh/src/net/tests/tables.rs | 36 +- zenoh/tests/routing.rs | 2 +- zenohd/src/main.rs | 2 +- 139 files changed, 3717 insertions(+), 2472 deletions(-) create mode 100644 _typos.toml delete mode 100644 io/zenoh-links/zenoh-link-quic/src/verify.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5f153a2d32..35250bdec5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -152,6 +152,16 @@ jobs: run: ci/valgrind-check/run.sh shell: bash + typos: + name: Typos Check + runs-on: ubuntu-latest + steps: + - name: Clone this repository + uses: actions/checkout@v4 + + - name: Check spelling + uses: crate-ci/typos@master + # NOTE: In GitHub repository settings, the "Require status checks to pass # before merging" branch protection rule ensures that commits are only merged # from branches where specific status checks have passed. These checks are @@ -160,7 +170,7 @@ jobs: ci: name: CI status checks runs-on: ubuntu-latest - needs: [check, test, valgrind] + needs: [check, test, valgrind, typos] if: always() steps: - name: Check whether all jobs pass diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 25553c2b0a..f8e614fa12 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -27,6 +27,10 @@ on: type: string description: Release number required: false + branch: + type: string + description: Release branch + required: false jobs: tag: @@ -42,6 +46,7 @@ jobs: repo: ${{ github.repository }} live-run: ${{ inputs.live-run || false }} version: ${{ inputs.version }} + branch: ${{ inputs.branch }} github-token: ${{ secrets.BOT_TOKEN_WORKFLOW }} - uses: eclipse-zenoh/ci/bump-crates@main diff --git a/Cargo.lock b/Cargo.lock index a6e0754166..e794fd289c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -529,6 +529,12 @@ version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + [[package]] name = "base64ct" version = "1.6.0" @@ -567,9 +573,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" dependencies = [ "serde", ] @@ -667,6 +673,12 @@ dependencies = [ "libc", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cfg-if" version = "0.1.10" @@ -792,11 +804,21 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + [[package]] name = "concurrent-queue" -version = "2.2.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] @@ -864,9 +886,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -874,9 +896,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" @@ -1221,9 +1243,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "4.0.0" +version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770d968249b5d99410d61f5bf89057f3199a077a04d087092f58e7d10692baae" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" dependencies = [ "concurrent-queue", "parking", @@ -1232,12 +1254,13 @@ dependencies = [ [[package]] name = "fancy-regex" -version = "0.11.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b95f7c0680e4142284cf8b22c14a476e87d61b004a3a0861872b32ef7ead40a2" +checksum = "531e46835a22af56d1e3b66f04844bed63158bc094a628bec1d321d9b4c44bf2" dependencies = [ "bit-set", - "regex", + "regex-automata 0.4.7", + "regex-syntax 0.8.4", ] [[package]] @@ -1322,18 +1345,18 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] [[package]] name = "fraction" -version = "0.13.1" +version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3027ae1df8d41b4bed2241c8fdad4acc1e7af60c8e17743534b545e77182d678" +checksum = "0f158e3ff0a1b334408dc9fb811cd99b446986f4d8b741bb08f9df1604085ae7" dependencies = [ "lazy_static", "num", @@ -1554,18 +1577,9 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "hashbrown" -version = "0.13.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" -dependencies = [ - "ahash", -] - -[[package]] -name = "hashbrown" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", @@ -1776,14 +1790,134 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f8ac670d7422d7f76b32e17a5db556510825b29ec9154f235977c9caba61036" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] + [[package]] name = "idna" -version = "0.4.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "4716a3a0933a1d01c2f72450e89596eb51dd34ef3c211ccd875acdf1f8fe47ed" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "icu_normalizer", + "icu_properties", + "smallvec", + "utf8_iter", ] [[package]] @@ -1793,7 +1927,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown", ] [[package]] @@ -1887,6 +2021,26 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +[[package]] +name = "jni" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6df18c2e3db7e453d3c6ac5b3e9d5182664d28788126d39b91f2d1e22b017ec" +dependencies = [ + "cesu8", + "combine", + "jni-sys", + "log", + "thiserror", + "walkdir", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "js-sys" version = "0.3.64" @@ -1909,13 +2063,13 @@ dependencies = [ [[package]] name = "jsonschema" -version = "0.17.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a071f4f7efc9a9118dfb627a0a94ef247986e1ab8606a4c806ae2b3aa3b6978" +checksum = "ec0afd06142c9bcb03f4a8787c77897a87b6be9c4918f1946c33caa714c27578" dependencies = [ "ahash", "anyhow", - "base64 0.21.4", + "base64 0.22.1", "bytecount", "fancy-regex", "fraction", @@ -1946,11 +2100,11 @@ dependencies = [ [[package]] name = "keyed-set" -version = "0.4.5" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b79e110283e09081809ca488cf3a9709270c6d4d4c4a32674c39cc438366615a" +checksum = "0a3ec39d2dc17953a1540d63906a112088f79b2e46833b4ed65bc9de3904ae34" dependencies = [ - "hashbrown 0.13.2", + "hashbrown", ] [[package]] @@ -1973,9 +2127,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.153" +version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" @@ -2005,6 +2159,12 @@ version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +[[package]] +name = "litemap" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" + [[package]] name = "lock_api" version = "0.4.10" @@ -2082,9 +2242,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.3" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memoffset" @@ -2218,7 +2378,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if 1.0.0", "libc", "memoffset 0.9.0", @@ -2252,9 +2412,9 @@ dependencies = [ [[package]] name = "num" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" +checksum = "3135b08af27d103b0a51f2ae0f8632117b7b185ccf931445affa8df530576a41" dependencies = [ "num-bigint", "num-complex", @@ -2300,28 +2460,27 @@ checksum = "63335b2e2c34fae2fb0aa2cecfd9f0832a1e24b3b32ecec612c3426d46dc8aaa" [[package]] name = "num-complex" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" dependencies = [ "num-traits", ] [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-iter" -version = "0.1.43" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", @@ -2402,7 +2561,7 @@ version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if 1.0.0", "foreign-types", "libc", @@ -2504,11 +2663,11 @@ checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "pem" -version = "2.0.1" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.21.4", + "base64 0.22.1", "serde", ] @@ -2523,9 +2682,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" @@ -2719,18 +2878,18 @@ dependencies = [ [[package]] name = "pnet_base" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe4cf6fb3ab38b68d01ab2aea03ed3d1132b4868fa4e06285f29f16da01c5f4c" +checksum = "ffc190d4067df16af3aba49b3b74c469e611cad6314676eaf1157f31aa0fb2f7" dependencies = [ "no-std-net", ] [[package]] name = "pnet_datalink" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad5854abf0067ebbd3967f7d45ebc8976ff577ff0c7bd101c4973ae3c70f98fe" +checksum = "e79e70ec0be163102a332e1d2d5586d362ad76b01cec86f830241f2b6452a7b7" dependencies = [ "ipnetwork", "libc", @@ -2741,9 +2900,9 @@ dependencies = [ [[package]] name = "pnet_sys" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "417c0becd1b573f6d544f73671070b039051e5ad819cc64aa96377b536128d00" +checksum = "7d4643d3d4db6b08741050c2f3afa9a892c4244c085a72fcda93c9c2c9a00f4b" dependencies = [ "libc", "winapi", @@ -2840,16 +2999,16 @@ dependencies = [ [[package]] name = "quinn" -version = "0.10.2" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" +checksum = "e4ceeeeabace7857413798eb1ffa1e9c905a9946a57d81fb69b4b71c4d8eb3ad" dependencies = [ "bytes", "pin-project-lite 0.2.13", "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.21.7", + "rustls", "thiserror", "tokio", "tracing", @@ -2857,16 +3016,16 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.10.4" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13f81c9a9d574310b8351f8666f5a93ac3b0069c45c28ad52c10291389a7cf9" +checksum = "ddf517c03a109db8100448a4be38d498df8a210a99fe0e1b9eaf39e78c640efe" dependencies = [ "bytes", "rand 0.8.5", - "ring 0.16.20", + "ring", "rustc-hash", - "rustls 0.21.7", - "rustls-native-certs 0.6.3", + "rustls", + "rustls-platform-verifier", "slab", "thiserror", "tinyvec", @@ -2875,15 +3034,15 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.4.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" +checksum = "9096629c45860fc7fb143e125eb826b5e721e10be3263160c7d60ca832cf8c46" dependencies = [ - "bytes", "libc", + "once_cell", "socket2 0.5.6", "tracing", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2990,12 +3149,13 @@ dependencies = [ [[package]] name = "rcgen" -version = "0.11.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4954fbc00dcd4d8282c987710e50ba513d351400dbdd00e803a05172a90d8976" +checksum = "54077e1872c46788540de1ea3d7f4ccb1983d12f9aa909b234468676c1a36779" dependencies = [ "pem", - "ring 0.16.20", + "ring", + "rustls-pki-types", "time 0.3.28", "yasna", ] @@ -3031,14 +3191,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.5" +version = "1.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" +checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.8", - "regex-syntax 0.7.5", + "regex-automata 0.4.7", + "regex-syntax 0.8.4", ] [[package]] @@ -3052,13 +3212,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.8" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.5", + "regex-syntax 0.8.4", ] [[package]] @@ -3069,9 +3229,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.5" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "reqwest" @@ -3113,21 +3273,6 @@ dependencies = [ "winreg", ] -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi", -] - [[package]] name = "ring" version = "0.17.6" @@ -3138,7 +3283,7 @@ dependencies = [ "getrandom 0.2.10", "libc", "spin 0.9.8", - "untrusted 0.9.0", + "untrusted", "windows-sys 0.48.0", ] @@ -3159,7 +3304,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ "base64 0.21.4", - "bitflags 2.4.2", + "bitflags 2.5.0", "serde", "serde_derive", ] @@ -3251,7 +3396,7 @@ version = "0.38.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "errno 0.3.8", "libc", "linux-raw-sys 0.4.13", @@ -3260,42 +3405,19 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" -dependencies = [ - "log", - "ring 0.16.20", - "rustls-webpki 0.101.5", - "sct", -] - -[[package]] -name = "rustls" -version = "0.22.4" +version = "0.23.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" dependencies = [ "log", - "ring 0.17.6", + "once_cell", + "ring", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki", "subtle", "zeroize", ] -[[package]] -name = "rustls-native-certs" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" -dependencies = [ - "openssl-probe", - "rustls-pemfile 1.0.3", - "schannel", - "security-framework", -] - [[package]] name = "rustls-native-certs" version = "0.7.0" @@ -3330,29 +3452,46 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.3.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] -name = "rustls-webpki" -version = "0.101.5" +name = "rustls-platform-verifier" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a27e3b59326c16e23d30aeb7a36a24cc0d29e71d68ff611cdfb4a01d013bed" +checksum = "b5f0d26fa1ce3c790f9590868f0109289a044acb954525f933e2aa3b871c157d" dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", + "core-foundation", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-roots", + "winapi", ] +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" + [[package]] name = "rustls-webpki" -version = "0.102.2" +version = "0.102.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" dependencies = [ - "ring 0.17.6", + "ring", "rustls-pki-types", - "untrusted 0.9.0", + "untrusted", ] [[package]] @@ -3415,16 +3554,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" -dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", -] - [[package]] name = "secrecy" version = "0.8.0" @@ -3437,22 +3566,23 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "core-foundation", "core-foundation-sys", "libc", + "num-bigint", "security-framework-sys", ] [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -3758,9 +3888,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "snap" @@ -3849,6 +3979,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "standback" version = "0.2.17" @@ -4099,11 +4235,11 @@ dependencies = [ [[package]] name = "thread-priority" -version = "0.15.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b72cb4958060ee2d9540cef68bb3871fd1e547037772c7fe7650d5d1cbec53b3" +checksum = "0d3b04d33c9633b8662b167b847c7ab521f83d1ae20f2321b65b5b925e532e36" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "cfg-if 1.0.0", "libc", "log", @@ -4210,6 +4346,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -4284,21 +4430,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.7", - "tokio", -] - -[[package]] -name = "tokio-rustls" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.22.4", + "rustls", "rustls-pki-types", "tokio", ] @@ -4329,9 +4465,9 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.21.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" +checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd" dependencies = [ "futures-util", "log", @@ -4349,7 +4485,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "hashbrown 0.14.0", + "hashbrown", "pin-project-lite 0.2.13", "tokio", "tracing", @@ -4507,9 +4643,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "tungstenite" -version = "0.21.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" +checksum = "6e2e2ce1e47ed2994fd43b04c8f618008d4cabdd5ee34027cf14f9d918edd9c8" dependencies = [ "byteorder", "bytes", @@ -4520,7 +4656,6 @@ dependencies = [ "rand 0.8.5", "sha1 0.10.5", "thiserror", - "url", "utf-8", ] @@ -4560,27 +4695,12 @@ dependencies = [ "spin 0.9.8", ] -[[package]] -name = "unicode-bidi" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" - [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" -[[package]] -name = "unicode-normalization" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" -dependencies = [ - "tinyvec", -] - [[package]] name = "unicode-xid" version = "0.2.4" @@ -4613,12 +4733,6 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - [[package]] name = "untrusted" version = "0.9.0" @@ -4644,9 +4758,9 @@ dependencies = [ [[package]] name = "url" -version = "2.4.1" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "f7c25da092f0a868cdf09e8674cd3b7ef3a7d92a24253e663a2fb85e2496de56" dependencies = [ "form_urlencoded", "idna", @@ -4666,6 +4780,18 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.1" @@ -5142,6 +5268,18 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "x509-parser" version = "0.16.0" @@ -5168,6 +5306,30 @@ dependencies = [ "time 0.3.28", ] +[[package]] +name = "yoke" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", + "synstructure", +] + [[package]] name = "z-serial" version = "0.2.3" @@ -5187,8 +5349,8 @@ version = "0.11.0-dev" dependencies = [ "ahash", "async-trait", - "base64 0.21.4", - "event-listener 4.0.0", + "base64 0.22.1", + "event-listener 5.3.1", "flume", "form_urlencoded", "futures", @@ -5385,7 +5547,7 @@ version = "0.11.0-dev" dependencies = [ "ahash", "criterion", - "hashbrown 0.14.0", + "hashbrown", "keyed-set", "lazy_static", "rand 0.8.5", @@ -5421,11 +5583,11 @@ name = "zenoh-link-commons" version = "0.11.0-dev" dependencies = [ "async-trait", - "base64 0.21.4", + "base64 0.22.1", "flume", "futures", - "rustls 0.22.4", - "rustls-webpki 0.102.2", + "rustls", + "rustls-webpki", "serde", "tokio", "tokio-util", @@ -5446,17 +5608,16 @@ name = "zenoh-link-quic" version = "0.11.0-dev" dependencies = [ "async-trait", - "base64 0.21.4", + "base64 0.22.1", "futures", "quinn", - "rustls 0.21.7", - "rustls-native-certs 0.7.0", - "rustls-pemfile 1.0.3", + "rustls", + "rustls-pemfile 2.0.0", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki", "secrecy", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "tokio-util", "tracing", "webpki-roots", @@ -5515,15 +5676,15 @@ name = "zenoh-link-tls" version = "0.11.0-dev" dependencies = [ "async-trait", - "base64 0.21.4", + "base64 0.22.1", "futures", - "rustls 0.22.4", + "rustls", "rustls-pemfile 2.0.0", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki", "secrecy", "tokio", - "tokio-rustls 0.25.0", + "tokio-rustls", "tokio-util", "tracing", "webpki-roots", @@ -5671,7 +5832,7 @@ version = "0.11.0-dev" dependencies = [ "anyhow", "async-std", - "base64 0.21.4", + "base64 0.22.1", "clap", "const_format", "flume", @@ -5796,7 +5957,7 @@ dependencies = [ name = "zenoh-sync" version = "0.11.0-dev" dependencies = [ - "event-listener 4.0.0", + "event-listener 5.3.1", "futures", "tokio", "zenoh-buffers", @@ -5932,8 +6093,51 @@ dependencies = [ "syn 2.0.52", ] +[[package]] +name = "zerofrom" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", + "synstructure", +] + [[package]] name = "zeroize" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" + +[[package]] +name = "zerovec" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb2cc8827d6c0994478a15c53f374f46fbd41bea663d809b14744bc42e6b109c" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97cf56601ee5052b4417d90c8755c6683473c926039908196cf35d99f893ebe7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] diff --git a/Cargo.toml b/Cargo.toml index 37df73e66b..a1820cb495 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,12 +80,13 @@ ahash = "0.8.7" anyhow = { version = "1.0.69", default-features = false } # Default features are disabled due to usage in no_std crates async-executor = "1.5.0" async-global-executor = "2.3.1" -async-io = "1.13.0" +async-io = "2.3.3" async-std = { version = "=1.12.0", default-features = false } # Default features are disabled due to some crates' requirements async-trait = "0.1.60" -base64 = "0.21.4" +base64 = "0.22.1" bincode = "1.3.3" clap = { version = "4.4.11", features = ["derive"] } +console-subscriber = "0.3.0" const_format = "0.2.30" crc = "3.0.1" criterion = "0.5" @@ -93,7 +94,7 @@ derive_more = "0.99.17" derive-new = "0.6.0" tracing-subscriber = { version = "0.3", features = ["json", "env-filter"] } tracing-loki = "0.2" -event-listener = "4.0.0" +event-listener = "5.3.1" flume = "0.11" form_urlencoded = "1.1.0" futures = "0.3.25" @@ -106,15 +107,15 @@ home = "0.5.4" http-types = "2.12.0" humantime = "2.1.0" json5 = "0.4.1" -jsonschema = { version = "0.17.1", default-features = false } -keyed-set = "0.4.4" +jsonschema = { version = "0.18.0", default-features = false } +keyed-set = "1.0.0" lazy_static = "1.4.0" libc = "0.2.139" libloading = "0.8" tracing = "0.1" lockfree = "0.5" lz4_flex = "0.11" -nix = { version = "0.27", features = ["fs"] } +nix = { version = "0.27.0", features = ["fs"] } num_cpus = "1.16.0" num-traits = { version = "0.2.17", default-features = false } once_cell = "1.19.0" @@ -123,20 +124,24 @@ panic-message = "0.3.0" paste = "1.0.12" petgraph = "0.6.3" phf = { version = "0.11.2", features = ["macros"] } -pnet = "0.34" -pnet_datalink = "0.34" +pnet = "0.35.0" +pnet_datalink = "0.35.0" proc-macro2 = "1.0.51" -quinn = "0.10.1" +quinn = "0.11.1" quote = "1.0.23" rand = { version = "0.8.5", default-features = false } # Default features are disabled due to usage in no_std crates rand_chacha = "0.3.1" -rcgen = "0.11" +rcgen = "0.13.1" regex = "1.7.1" ron = "0.8.1" ringbuffer-spsc = "0.1.9" rsa = "0.9" rustc_version = "0.4.0" -rustls = "0.22.2" +rustls = { version = "0.23.9", default-features = false, features = [ + "logging", + "tls12", + "ring", +] } rustls-native-certs = "0.7.0" rustls-pemfile = "2.0.0" rustls-webpki = "0.102.0" @@ -161,11 +166,10 @@ tide = "0.16.0" token-cell = { version = "1.4.2", default-features = false } tokio = { version = "1.35.1", default-features = false } # Default features are disabled due to some crates' requirements tokio-util = "0.7.10" -tokio-tungstenite = "0.21" -tokio-rustls = "0.25.0" +tokio-tungstenite = "0.23.1" +tokio-rustls = { version = "0.26.0", default-features = false } # tokio-vsock = see: io/zenoh-links/zenoh-link-vsock/Cargo.toml (workspaces does not support platform dependent dependencies) -thread-priority = "0.15" -console-subscriber = "0.2" +thread-priority = "1.1.0" typenum = "1.16.0" uhlc = { version = "0.7.0", default-features = false } # Default features are disabled due to usage in no_std crates unwrap-infallible = "0.1.5" diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 6a4df381f1..7a14eba2bd 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -40,7 +40,7 @@ exit_on_failure: { router: false, peer: false, client: true }, /// connect establishing retry configuration retry: { - /// intial wait timeout until next connect try + /// initial wait timeout until next connect try period_init_ms: 1000, /// maximum wait timeout until next connect try period_max_ms: 4000, @@ -73,7 +73,7 @@ exit_on_failure: true, /// listen retry configuration retry: { - /// intial wait timeout until next try + /// initial wait timeout until next try period_init_ms: 1000, /// maximum wait timeout until next try period_max_ms: 4000, @@ -108,8 +108,8 @@ gossip: { /// Whether gossip scouting is enabled or not enabled: true, - /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. - /// When false, gossip scouting informations are only propagated to the next hop. + /// When true, gossip scouting information are propagated multiple hops to all nodes in the local network. + /// When false, gossip scouting information are only propagated to the next hop. /// Activating multihop gossip implies more scouting traffic and a lower scalability. /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have /// direct connectivity with each other. @@ -230,6 +230,8 @@ /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to /// enable 'lowlatency' you need to explicitly disable 'qos'. + /// NOTE: LowLatency transport does not support the fragmentation, so the message size should be + /// smaller than the tx batch_size. lowlatency: false, /// Enables QoS on unicast communications. qos: { @@ -274,7 +276,7 @@ /// set the actual keep_alive interval to one fourth of the lease time: i.e. send /// 4 keep_alive messages in a lease period. Changing the lease time will have the /// keep_alive messages sent more or less often. - /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + /// This is in-line with the ITU-T G.8013/Y.1731 specification on continuous connectivity /// check which considers a link as failed when no messages are received in 3.5 times the /// target interval. keep_alive: 4, @@ -300,7 +302,7 @@ background: 4, }, /// Congestion occurs when the queue is empty (no available batch). - /// Using CongestionControl::Block the caller is blocked until a batch is available and re-insterted into the queue. + /// Using CongestionControl::Block the caller is blocked until a batch is available and re-inserted into the queue. /// Using CongestionControl::Drop the message might be dropped, depending on conditions configured here. congestion_control: { /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. @@ -315,7 +317,7 @@ rx: { /// Receiving buffer size in bytes for each link /// The default the rx_buffer_size value is the same as the default batch size: 65335. - /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. buffer_size: 65535, @@ -352,7 +354,7 @@ enabled: false, }, auth: { - /// The configuration of authentification. + /// The configuration of authentication. /// A password implies a username is required. usrpwd: { user: null, @@ -405,7 +407,7 @@ // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively // /// This is used in the 'storage_manager' which supports subplugins, each with it's own config // /// - // /// See below exapmle of plugin configuration using `__config__` property + // /// See below example of plugin configuration using `__config__` property // // /// Configure the REST API plugin // rest: { diff --git a/README.md b/README.md index b09ea73d86..af08db7260 100644 --- a/README.md +++ b/README.md @@ -62,9 +62,9 @@ Then you can start run `zenohd`. ## How to build it > [!WARNING] -> Zenoh and its ecosystem are under active development. When you build from git, make sure you also build from git any other Zenoh repository you plan to use (e.g. binding, plugin, backend, etc.). It may happen that some changes in git are not compatible with the most recent packaged Zenoh release (e.g. deb, docker, pip). We put particular effort in mantaining compatibility between the various git repositories in the Zenoh project. +> Zenoh and its ecosystem are under active development. When you build from git, make sure you also build from git any other Zenoh repository you plan to use (e.g. binding, plugin, backend, etc.). It may happen that some changes in git are not compatible with the most recent packaged Zenoh release (e.g. deb, docker, pip). We put particular effort in maintaining compatibility between the various git repositories in the Zenoh project. -Install [Cargo and Rust](https://doc.rust-lang.org/cargo/getting-started/installation.html). Zenoh can be succesfully compiled with Rust stable (>= 1.71.0), so no special configuration is required from your side. If you already have the Rust toolchain installed, make sure it is up-to-date with: +Install [Cargo and Rust](https://doc.rust-lang.org/cargo/getting-started/installation.html). Zenoh can be successfully compiled with Rust stable (>= 1.71.0), so no special configuration is required from your side. If you already have the Rust toolchain installed, make sure it is up-to-date with: ```bash $ rustup update @@ -170,7 +170,7 @@ See other examples of Zenoh usage in [examples/](examples) * `--rest-http-port `: Configures the [REST plugin](https://zenoh.io/docs/manual/plugin-http/)'s HTTP port. Accepted values: - a port number - a string with format `:` (to bind the HTTP server to a specific interface) - - `"None"` to desactivate the REST plugin + - `"None"` to deactivate the REST plugin If not specified, the REST plugin will be active on any interface (`[::]`) and port `8000`. diff --git a/_typos.toml b/_typos.toml new file mode 100644 index 0000000000..182770db32 --- /dev/null +++ b/_typos.toml @@ -0,0 +1,12 @@ +[files] +extend-exclude = [ + # Ignore files containing hexa. + "io/zenoh-transport/tests/*.rs", + "zenoh/tests/open_time.rs", + "zenoh/tests/authentication.rs", +] + + +[default.extend-words] +mis = "mis" # mismatch +thr = "thr" # throughput diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index 50eb54c923..a198c654d2 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -69,7 +69,7 @@ impl ZBuf { let mut slices = self.zslices(); match self.slices.len() { 0 => ZSlice::empty(), - // SAFETY: it's safe to use unwrap_unchecked() beacuse we are explicitly checking the length is 1. + // SAFETY: it's safe to use unwrap_unchecked() because we are explicitly checking the length is 1. 1 => unsafe { slices.next().unwrap_unchecked().clone() }, _ => slices .fold(Vec::new(), |mut acc, it| { diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index 43a273c4ad..42babb8b88 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -126,7 +126,7 @@ impl ZSlice { } /// # Safety - /// This function does not verify wether the `start` and `end` indexes are within the buffer boundaries. + /// This function does not verify whether the `start` and `end` indexes are within the buffer boundaries. /// If a [`ZSlice`] is built via this constructor, a later access may panic if `start` and `end` indexes are out-of-bound. pub unsafe fn new_unchecked(buf: Arc, start: usize, end: usize) -> Self { ZSlice { diff --git a/commons/zenoh-codec/src/core/zint.rs b/commons/zenoh-codec/src/core/zint.rs index a29f88f3d5..20c0a0a4f6 100644 --- a/commons/zenoh-codec/src/core/zint.rs +++ b/commons/zenoh-codec/src/core/zint.rs @@ -287,7 +287,7 @@ zint_impl!(usize); // // guarantees at this point that `x` is never `0`. Since `x` is 64bit, // // then `n` is guaranteed to have a value between 1 and 8, both inclusives. // // `into` is guaranteed to be exactly 9 bytes long. Therefore, copying at most -// // 8 bytes with a pointer offest of 1 is actually safe. +// // 8 bytes with a pointer offset of 1 is actually safe. // let n = 8 - (x.leading_zeros() / 8) as usize; // unsafe { // core::ptr::copy_nonoverlapping( @@ -361,7 +361,7 @@ zint_impl!(usize); // macro_rules! non_zero_array { // ($($i: expr,)*) => { -// [$(match NonZeroU8::new($i) {Some(x) => x, None => panic!("Attempted to place 0 in an array of non-zeros litteral")}),*] +// [$(match NonZeroU8::new($i) {Some(x) => x, None => panic!("Attempted to place 0 in an array of non-zeros literal")}),*] // }; // } diff --git a/commons/zenoh-config/src/connection_retry.rs b/commons/zenoh-config/src/connection_retry.rs index 77db48e31c..55234dcc91 100644 --- a/commons/zenoh-config/src/connection_retry.rs +++ b/commons/zenoh-config/src/connection_retry.rs @@ -27,7 +27,7 @@ use crate::{ #[derive(Debug, Deserialize, Serialize, Clone)] pub struct ConnectionRetryModeDependentConf { - // intial wait timeout until next try + // initial wait timeout until next try pub period_init_ms: Option>, // maximum wait timeout until next try pub period_max_ms: Option>, diff --git a/commons/zenoh-config/src/include.rs b/commons/zenoh-config/src/include.rs index 709cd7c29f..b89d78d1c0 100644 --- a/commons/zenoh-config/src/include.rs +++ b/commons/zenoh-config/src/include.rs @@ -65,7 +65,7 @@ pub(crate) fn recursive_include

( where P: AsRef, { - // if include property is present, read the file and remove properites found in file from values + // if include property is present, read the file and remove properties found in file from values let include_object = if let Some(include_path) = values.get(include_property_name) { let Some(include_path) = include_path.as_str() else { bail!( diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 25a049fb68..b7530e91a6 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -160,7 +160,7 @@ pub trait ConfigValidator: Send + Sync { } } -// Necessary to allow to set default emplty weak referece value to plugin.validator field +// Necessary to allow to set default emplty weak reference value to plugin.validator field // because empty weak value is not allowed for Arc impl ConfigValidator for () {} @@ -274,8 +274,8 @@ validated_struct::validator! { GossipConf { /// Whether gossip scouting is enabled or not. enabled: Option, - /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. - /// When false, gossip scouting informations are only propagated to the next hop. + /// When true, gossip scouting information are propagated multiple hops to all nodes in the local network. + /// When false, gossip scouting information are only propagated to the next hop. /// Activating multihop gossip implies more scouting traffic and a lower scalability. /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have /// direct connectivity with each other. @@ -382,7 +382,7 @@ validated_struct::validator! { sequence_number_resolution: Bits where (sequence_number_resolution_validator), /// Link lease duration in milliseconds (default: 10000) lease: u64, - /// Number fo keep-alive messages in a link lease duration (default: 4) + /// Number of keep-alive messages in a link lease duration (default: 4) keep_alive: usize, /// Zenoh's MTU equivalent (default: 2^16-1) batch_size: BatchSize, @@ -403,7 +403,7 @@ validated_struct::validator! { background: usize, } where (queue_size_validator), /// Congestion occurs when the queue is empty (no available batch). - /// Using CongestionControl::Block the caller is blocked until a batch is available and re-insterted into the queue. + /// Using CongestionControl::Block the caller is blocked until a batch is available and re-inserted into the queue. /// Using CongestionControl::Drop the message might be dropped, depending on conditions configured here. pub congestion_control: CongestionControlConf { /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. @@ -419,7 +419,7 @@ validated_struct::validator! { pub rx: LinkRxConf { /// Receiving buffer size in bytes for each link /// The default the rx_buffer_size value is the same as the default batch size: 65335. - /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. buffer_size: usize, @@ -462,7 +462,7 @@ validated_struct::validator! { }, pub auth: #[derive(Default)] AuthConf { - /// The configuration of authentification. + /// The configuration of authentication. /// A password implies a username is required. pub usrpwd: #[derive(Default)] UsrPwdConf { diff --git a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs index 215d4e2d9e..6d1774bcd8 100644 --- a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs +++ b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs @@ -111,7 +111,7 @@ impl keyexpr { /// Joins both sides, inserting a `/` in between them. /// - /// This should be your prefered method when concatenating path segments. + /// This should be your preferred method when concatenating path segments. /// /// This is notably useful for workspaces: /// ```rust @@ -146,7 +146,7 @@ impl keyexpr { /// /// NOTE: this operation can typically be used in a backend implementation, at creation of a Storage to get the keys prefix, /// and then in `zenoh_backend_traits::Storage::on_sample()` this prefix has to be stripped from all received - /// `Sample::key_expr` to retrieve the corrsponding key. + /// `Sample::key_expr` to retrieve the corresponding key. /// /// # Examples: /// ``` @@ -183,12 +183,12 @@ impl keyexpr { } /// Remove the specified `prefix` from `self`. - /// The result is a list of `keyexpr`, since there might be several ways for the prefix to match the begining of the `self` key expression. + /// The result is a list of `keyexpr`, since there might be several ways for the prefix to match the beginning of the `self` key expression. /// For instance, if `self` is `"a/**/c/*" and `prefix` is `a/b/c` then: /// - the `prefix` matches `"a/**/c"` leading to a result of `"*"` when stripped from `self` /// - the `prefix` matches `"a/**"` leading to a result of `"**/c/*"` when stripped from `self` /// So the result is `["*", "**/c/*"]`. - /// If `prefix` cannot match the begining of `self`, an empty list is reuturned. + /// If `prefix` cannot match the beginning of `self`, an empty list is reuturned. /// /// See below more examples. /// @@ -601,7 +601,7 @@ enum KeyExprConstructionError { LoneDollarStar = -1, SingleStarAfterDoubleStar = -2, DoubleStarAfterDoubleStar = -3, - EmpyChunk = -4, + EmptyChunk = -4, StarsInChunk = -5, DollarAfterDollarOrStar = -6, ContainsSharpOrQMark = -7, @@ -615,7 +615,7 @@ impl<'a> TryFrom<&'a str> for &'a keyexpr { let mut in_big_wild = false; for chunk in value.split('/') { if chunk.is_empty() { - bail!((KeyExprConstructionError::EmpyChunk) "Invalid Key Expr `{}`: empty chunks are forbidden, as well as leading and trailing slashes", value) + bail!((KeyExprConstructionError::EmptyChunk) "Invalid Key Expr `{}`: empty chunks are forbidden, as well as leading and trailing slashes", value) } if chunk == "$*" { bail!((KeyExprConstructionError::LoneDollarStar) diff --git a/commons/zenoh-keyexpr/src/key_expr/format/mod.rs b/commons/zenoh-keyexpr/src/key_expr/format/mod.rs index 3a03d8a515..bf5536ec63 100644 --- a/commons/zenoh-keyexpr/src/key_expr/format/mod.rs +++ b/commons/zenoh-keyexpr/src/key_expr/format/mod.rs @@ -23,7 +23,7 @@ //! ## The format syntax //! KE formats are defined following a syntax that extends the [`keyexpr`] syntax. In addition to existing chunk types, KE formmats support "specification" chunks. //! These chunks must follow the one of the following syntaxes: `${id:pattern}`, `${id:pattern#default}`, `$#{id:pattern}#`, or `$#{id:pattern#default}#`, where: -//! - `id` is the chunk identifer: it cannot contain the `:` character, and is used to name the chunk in accessors. +//! - `id` is the chunk identifier: it cannot contain the `:` character, and is used to name the chunk in accessors. //! - `pattern` must be a valid KE (and therefore cannot contain `#`) and defines the range of values that the chunk may adopt. //! - `default` (optional) is used as the chunk value when formatting if the builder wasn't supplied with a value for `id`. //! @@ -73,7 +73,7 @@ use support::{IterativeConstructor, Spec}; /// ## The format syntax /// KE formats are defined following a syntax that extends the [`keyexpr`] syntax. In addition to existing chunk types, KE formmats support "specification" chunks. /// These chunks must follow the one of the following syntaxes: `${id:pattern}`, `${id:pattern#default}`, `$#{id:pattern}#`, or `$#{id:pattern#default}#`, where: -/// - `id` is the chunk identifer: it cannot contain the `:` character, and is used to name the chunk in accessors. +/// - `id` is the chunk identifier: it cannot contain the `:` character, and is used to name the chunk in accessors. /// - `pattern` must be a valid KE (and therefore cannot contain `#`) and defines the range of values that the chunk may adopt. /// - `default` (optional) is used as the chunk value when formatting if the builder wasn't supplied with a value for `id`. /// diff --git a/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs b/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs index 77388a55c9..aea554a2f1 100644 --- a/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs +++ b/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs @@ -110,7 +110,7 @@ fn it_intersect(mut it1: &[u8], mut it2: &[u8]) -> bool { } (it1.is_empty() || it1 == b"**") && (it2.is_empty() || it2 == b"**") } -/// Retruns `true` if the given key expressions intersect. +/// Returns `true` if the given key expressions intersect. /// /// I.e. if it exists a resource key (with no wildcards) that matches /// both given key expressions. diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs index 5e2deb206c..35197a26da 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs @@ -89,10 +89,10 @@ impl< /// # Type inference papercut /// Despite some of `KeArcTree`'s generic parameters having default values, those are only taken into /// account by the compiler when a type is named with some parameters omitted, and not when a type is - /// infered with the same parameters unconstrained. + /// inferred with the same parameters unconstrained. /// /// The simplest way to resolve this is to eventually assign to tree part of the return value - /// to a variable or field whose type is named `KeArcTree<_>` (the `Weight` parameter can generally be infered). + /// to a variable or field whose type is named `KeArcTree<_>` (the `Weight` parameter can generally be inferred). pub fn new() -> Result<(Self, DefaultToken), ::ConstructionError> { let token = DefaultToken::new()?; Ok((Self::with_token(&token), token)) diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs index a6b1847697..f61e509ead 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs @@ -53,7 +53,8 @@ impl + AsNodeMut> IChildren for KeyedSet Option<&mut T> { - self.get_mut_unguarded(&chunk) + // Unicity is guaranteed by &mut self + unsafe { self.get_mut_unguarded(&chunk) } } fn remove(&mut self, chunk: &keyexpr) -> Option { self.remove(&chunk) diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs index e2833a912f..5d7991289e 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs @@ -42,7 +42,7 @@ //! KeTrees were designed to maximize code reuse. As such, their core properties are reflected through the [`IKeyExprTree`] and [`IKeyExprTreeMut`] traits. //! //! KeTrees are made up of node, where nodes may or may not have a value (called `weight`) associated with them. To access these weighs, as well as other -//! properties of a node, you can go throught the [`IKeyExprTreeNode`] and [`IKeyExprTreeNodeMut`] traits. +//! properties of a node, you can go through the [`IKeyExprTreeNode`] and [`IKeyExprTreeNodeMut`] traits. //! //! # Iterators //! KeTrees provide iterators for the following operations: diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs index 69fe6efde3..e880dae9c0 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs @@ -195,7 +195,7 @@ pub trait IKeyExprTreeMut<'a, Weight>: IKeyExprTree<'a, Weight> { self.prune_where(|node| node.weight().is_none()) } } -/// The basic operations of a KeTree when a Token is necessary to acess data. +/// The basic operations of a KeTree when a Token is necessary to access data. pub trait ITokenKeyExprTree<'a, Weight, Token> { /// An immutable guard to a node of the tree. type Node: IKeyExprTreeNode; diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index c9e327364e..c1c58d725e 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -492,7 +492,7 @@ mod zenoh_runtime_derive; use syn::DeriveInput; use zenoh_runtime_derive::{derive_generic_runtime_param, derive_register_param}; -/// Make the underlying struct `Param` be generic over any `T` satifying a generated `trait DefaultParam { fn param() -> Param; }` +/// Make the underlying struct `Param` be generic over any `T` satisfying a generated `trait DefaultParam { fn param() -> Param; }` /// ```rust,ignore /// #[derive(GenericRuntimeParam)] /// struct Param { diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs index 20bcdd9aef..e44f2f6284 100644 --- a/commons/zenoh-protocol/src/core/parameters.rs +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // -/// Module provides a set of utility functions whic allows to manipulate &str` which follows the format `a=b;c=d|e;f=g`. +/// Module provides a set of utility functions which allows to manipulate &str` which follows the format `a=b;c=d|e;f=g`. /// and structure `Parameters` which provides `HashMap<&str, &str>`-like view over a string of such format. /// /// `;` is the separator between the key-value `(&str, &str)` elements. @@ -215,7 +215,7 @@ pub fn rand(into: &mut String) { /// A map of key/value (String,String) parameters. /// It can be parsed from a String, using `;` or `` as separator between each parameters -/// and `=` as separator between a key and its value. Keys and values are trimed. +/// and `=` as separator between a key and its value. Keys and values are trimmed. /// /// Example: /// ``` diff --git a/commons/zenoh-protocol/src/core/resolution.rs b/commons/zenoh-protocol/src/core/resolution.rs index fb16a5c713..5756fd2a53 100644 --- a/commons/zenoh-protocol/src/core/resolution.rs +++ b/commons/zenoh-protocol/src/core/resolution.rs @@ -113,7 +113,7 @@ impl fmt::Display for Bits { } #[repr(u8)] -// The value indicates the bit offest +// The value indicates the bit offset #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Field { FrameSN = 0, diff --git a/commons/zenoh-protocol/src/lib.rs b/commons/zenoh-protocol/src/lib.rs index 8d26f52ed9..fbfefa7c09 100644 --- a/commons/zenoh-protocol/src/lib.rs +++ b/commons/zenoh-protocol/src/lib.rs @@ -73,7 +73,7 @@ pub const VERSION: u8 = 0x09; // # Array field // // An array contains a fixed number of elements whose number is known a priori or indicated by -// another field. Each element can be either a single byte field or a variable legnth field. +// another field. Each element can be either a single byte field or a variable length field. // // ```text // 7 6 5 4 3 2 1 0 diff --git a/commons/zenoh-protocol/src/network/interest.rs b/commons/zenoh-protocol/src/network/interest.rs index b36080be28..29ed7e4c29 100644 --- a/commons/zenoh-protocol/src/network/interest.rs +++ b/commons/zenoh-protocol/src/network/interest.rs @@ -27,7 +27,7 @@ pub mod flag { /// The INTEREST message is sent to request the transmission of current and optionally future /// declarations of a given kind matching a target keyexpr. E.g., an interest could be -/// sent to request the transmisison of all current subscriptions matching `a/*`. +/// sent to request the transmission of all current subscriptions matching `a/*`. /// /// The behaviour of a INTEREST depends on the INTEREST MODE. /// diff --git a/commons/zenoh-protocol/src/transport/close.rs b/commons/zenoh-protocol/src/transport/close.rs index 4e760400b7..b93fe6d6b6 100644 --- a/commons/zenoh-protocol/src/transport/close.rs +++ b/commons/zenoh-protocol/src/transport/close.rs @@ -16,7 +16,7 @@ /// /// The [`Close`] message is sent in any of the following two cases: /// 1) in response to an INIT or OPEN message which are not accepted; -/// 2) at any time to arbitrarly close the transport with the corresponding zenoh node. +/// 2) at any time to arbitrarily close the transport with the corresponding zenoh node. /// /// The [`Close`] message flow is the following: /// diff --git a/commons/zenoh-protocol/src/transport/fragment.rs b/commons/zenoh-protocol/src/transport/fragment.rs index 0a1df1fdf5..d60df23227 100644 --- a/commons/zenoh-protocol/src/transport/fragment.rs +++ b/commons/zenoh-protocol/src/transport/fragment.rs @@ -19,7 +19,7 @@ pub use crate::transport::TransportSn; /// # Fragment message /// /// The [`Fragment`] message is used to transmit on the wire large [`crate::zenoh::ZenohMessage`] -/// that require fragmentation because they are larger thatn the maximum batch size +/// that require fragmentation because they are larger than the maximum batch size /// (i.e. 2^16-1) and/or the link MTU. /// /// The [`Fragment`] message flow is the following: diff --git a/commons/zenoh-protocol/src/transport/frame.rs b/commons/zenoh-protocol/src/transport/frame.rs index 02a4ead48f..480bebe08e 100644 --- a/commons/zenoh-protocol/src/transport/frame.rs +++ b/commons/zenoh-protocol/src/transport/frame.rs @@ -21,7 +21,7 @@ use crate::{core::Reliability, network::NetworkMessage, transport::TransportSn}; /// [`crate::net::protocol::message::ZenohMessage`]. I.e., the total length of the /// serialized [`crate::net::protocol::message::ZenohMessage`] (s) MUST be smaller /// than the maximum batch size (i.e. 2^16-1) and the link MTU. -/// The [`Frame`] message is used as means to aggreate multiple +/// The [`Frame`] message is used as means to aggregate multiple /// [`crate::net::protocol::message::ZenohMessage`] in a single atomic message that /// goes on the wire. By doing so, many small messages can be batched together and /// share common information like the sequence number. diff --git a/commons/zenoh-protocol/src/transport/keepalive.rs b/commons/zenoh-protocol/src/transport/keepalive.rs index 927b0cd46b..cc9ccfad99 100644 --- a/commons/zenoh-protocol/src/transport/keepalive.rs +++ b/commons/zenoh-protocol/src/transport/keepalive.rs @@ -49,7 +49,7 @@ /// /// NOTE: In order to consider eventual packet loss, transmission latency and jitter, the time /// interval between two subsequent [`KeepAlive`] messages SHOULD be set to one fourth of -/// the lease time. This is in-line with the ITU-T G.8013/Y.1731 specification on continous +/// the lease time. This is in-line with the ITU-T G.8013/Y.1731 specification on continuous /// connectivity check which considers a link as failed when no messages are received in /// 3.5 times the target keep alive interval. /// diff --git a/commons/zenoh-protocol/src/transport/open.rs b/commons/zenoh-protocol/src/transport/open.rs index 8c2e1429ec..8042eeb634 100644 --- a/commons/zenoh-protocol/src/transport/open.rs +++ b/commons/zenoh-protocol/src/transport/open.rs @@ -19,7 +19,7 @@ use crate::transport::TransportSn; /// # Open message /// -/// After having succesfully complete the [`super::InitSyn`]-[`super::InitAck`] message exchange, +/// After having successfully complete the [`super::InitSyn`]-[`super::InitAck`] message exchange, /// the OPEN message is sent on a link to finalize the initialization of the link and /// associated transport with a zenoh node. /// For convenience, we call [`OpenSyn`] and [`OpenAck`] an OPEN message with the A flag diff --git a/commons/zenoh-protocol/src/zenoh/err.rs b/commons/zenoh-protocol/src/zenoh/err.rs index b8808d96d7..ab02885eac 100644 --- a/commons/zenoh-protocol/src/zenoh/err.rs +++ b/commons/zenoh-protocol/src/zenoh/err.rs @@ -63,7 +63,7 @@ pub mod ext { pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; /// # Shared Memory extension - /// Used to carry additional information about the shared-memory layour of data + /// Used to carry additional information about the shared-memory layout of data #[cfg(feature = "shared-memory")] pub type Shm = zextunit!(0x2, true); #[cfg(feature = "shared-memory")] diff --git a/commons/zenoh-protocol/src/zenoh/put.rs b/commons/zenoh-protocol/src/zenoh/put.rs index 91a0a8f50b..ac45b1cc1b 100644 --- a/commons/zenoh-protocol/src/zenoh/put.rs +++ b/commons/zenoh-protocol/src/zenoh/put.rs @@ -68,7 +68,7 @@ pub mod ext { pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; /// # Shared Memory extension - /// Used to carry additional information about the shared-memory layour of data + /// Used to carry additional information about the shared-memory layout of data #[cfg(feature = "shared-memory")] pub type Shm = zextunit!(0x2, true); #[cfg(feature = "shared-memory")] diff --git a/commons/zenoh-shm/src/api/provider/shm_provider_backend.rs b/commons/zenoh-shm/src/api/provider/shm_provider_backend.rs index 0487981e5c..933940cac1 100644 --- a/commons/zenoh-shm/src/api/provider/shm_provider_backend.rs +++ b/commons/zenoh-shm/src/api/provider/shm_provider_backend.rs @@ -20,7 +20,7 @@ use super::{ }; /// The provider backend trait -/// Implemet this interface to create a Zenoh-compatible shared memory provider +/// Implement this interface to create a Zenoh-compatible shared memory provider #[zenoh_macros::unstable_doc] pub trait ShmProviderBackend { /// Allocate the chunk of desired size. diff --git a/commons/zenoh-shm/src/api/provider/types.rs b/commons/zenoh-shm/src/api/provider/types.rs index 6e8ced7fc8..603c4a481a 100644 --- a/commons/zenoh-shm/src/api/provider/types.rs +++ b/commons/zenoh-shm/src/api/provider/types.rs @@ -38,7 +38,7 @@ impl From for ZAllocError { } } -/// alignemnt in powers of 2: 0 == 1-byte alignment, 1 == 2byte, 2 == 4byte, 3 == 8byte etc +/// alignment in powers of 2: 0 == 1-byte alignment, 1 == 2byte, 2 == 4byte, 3 == 8byte etc #[zenoh_macros::unstable_doc] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct AllocAlignment { @@ -93,7 +93,7 @@ impl AllocAlignment { } } -/// Memory layout representation: alignemnt and size aligned for this alignment +/// Memory layout representation: alignment and size aligned for this alignment #[zenoh_macros::unstable_doc] #[derive(Debug)] pub struct MemoryLayout { diff --git a/commons/zenoh-shm/src/lib.rs b/commons/zenoh-shm/src/lib.rs index eec962a7e4..19f8a1c76f 100644 --- a/commons/zenoh-shm/src/lib.rs +++ b/commons/zenoh-shm/src/lib.rs @@ -50,7 +50,7 @@ pub mod posix_shm; pub mod reader; pub mod watchdog; -/// Informations about a [`ShmBufInner`]. +/// Information about a [`ShmBufInner`]. /// /// This that can be serialized and can be used to retrieve the [`ShmBufInner`] in a remote process. #[derive(Clone, Debug, PartialEq, Eq)] @@ -104,7 +104,7 @@ pub struct ShmBufInner { impl PartialEq for ShmBufInner { fn eq(&self, other: &Self) -> bool { // currently there is no API to resize an SHM buffer, but it is intended in the future, - // so I add size comparsion here to avoid future bugs :) + // so I add size comparison here to avoid future bugs :) self.buf.load(Ordering::Relaxed) == other.buf.load(Ordering::Relaxed) && self.info.data_len == other.info.data_len } diff --git a/commons/zenoh-shm/src/watchdog/periodic_task.rs b/commons/zenoh-shm/src/watchdog/periodic_task.rs index 08a6ee18d3..a41f601cfe 100644 --- a/commons/zenoh-shm/src/watchdog/periodic_task.rs +++ b/commons/zenoh-shm/src/watchdog/periodic_task.rs @@ -63,10 +63,10 @@ impl PeriodicTask { #[cfg(unix)] { tracing::warn!("{:?}: error setting realtime FIFO scheduling policy for thread: {:?}, will run with the default one...", std::thread::current().name(), e); - for priotity in (ThreadPriorityValue::MIN..ThreadPriorityValue::MAX).rev() { - if let Ok(p) = priotity.try_into() { + for priority in (ThreadPriorityValue::MIN..ThreadPriorityValue::MAX).rev() { + if let Ok(p) = priority.try_into() { if set_current_thread_priority(ThreadPriority::Crossplatform(p)).is_ok() { - tracing::warn!("{:?}: will use priority {}", std::thread::current().name(), priotity); + tracing::warn!("{:?}: will use priority {}", std::thread::current().name(), priority); break; } } diff --git a/commons/zenoh-shm/tests/common/mod.rs b/commons/zenoh-shm/tests/common/mod.rs index a97773f686..23f55d8c2a 100644 --- a/commons/zenoh-shm/tests/common/mod.rs +++ b/commons/zenoh-shm/tests/common/mod.rs @@ -80,7 +80,7 @@ impl Drop for CpuLoad { } impl CpuLoad { - pub fn exessive() -> Self { + pub fn excessive() -> Self { Self::new(1000) } diff --git a/commons/zenoh-shm/tests/header.rs b/commons/zenoh-shm/tests/header.rs index a734abf108..1feb06dba9 100644 --- a/commons/zenoh-shm/tests/header.rs +++ b/commons/zenoh-shm/tests/header.rs @@ -70,7 +70,7 @@ fn header_link_failure_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Se // Some comments on this behaviour... // Even though the allocated_header is dropped, it's SHM segment still exists in GLOBAL_HEADER_STORAGE, // so there is no way to detect that header is "deallocated" and the code below succeeds. The invalidation - // funcionality is implemented on higher level by means of generation mechanism and protects from both header + // functionality is implemented on higher level by means of generation mechanism and protects from both header // and watchdog link-to-deallocated issues. This generation mechanism depends on the behaviour below, so // everything is fair :) let _linked_header = GLOBAL_HEADER_SUBSCRIPTION.link(&descr)?; diff --git a/commons/zenoh-shm/tests/periodic_task.rs b/commons/zenoh-shm/tests/periodic_task.rs index dcfd560d7d..701fe742ca 100644 --- a/commons/zenoh-shm/tests/periodic_task.rs +++ b/commons/zenoh-shm/tests/periodic_task.rs @@ -152,21 +152,21 @@ fn periodic_task_optimal_high_load_intensive() { #[test] #[ignore] -fn periodic_task_exessive_load_lightweight() { - let _load = CpuLoad::exessive(); +fn periodic_task_excessive_load_lightweight() { + let _load = CpuLoad::excessive(); check_task(|| {}); } #[test] #[ignore] -fn periodic_task_exessive_load_blocking() { - let _load = CpuLoad::exessive(); +fn periodic_task_excessive_load_blocking() { + let _load = CpuLoad::excessive(); check_task(blocking_payload(TEST_TASK)); } #[test] #[ignore] -fn periodic_task_exessive_load_intensive() { - let _load = CpuLoad::exessive(); +fn periodic_task_excessive_load_intensive() { + let _load = CpuLoad::excessive(); check_task(intensive_payload(TEST_TASK)); } diff --git a/commons/zenoh-shm/tests/watchdog.rs b/commons/zenoh-shm/tests/watchdog.rs index fe1ccd2ab2..7f55e4a92d 100644 --- a/commons/zenoh-shm/tests/watchdog.rs +++ b/commons/zenoh-shm/tests/watchdog.rs @@ -306,6 +306,6 @@ fn watchdog_validated_high_load() { #[test] #[ignore] fn watchdog_validated_overloaded_system() { - let _load = CpuLoad::exessive(); + let _load = CpuLoad::excessive(); execute_concurrent(1000, 10, watchdog_validated_overloaded_system_fn()); } diff --git a/commons/zenoh-sync/src/condition.rs b/commons/zenoh-sync/src/condition.rs index 99ba6d4ca2..f824976b87 100644 --- a/commons/zenoh-sync/src/condition.rs +++ b/commons/zenoh-sync/src/condition.rs @@ -14,7 +14,7 @@ use std::{pin::Pin, sync::MutexGuard}; use event_listener::{Event, EventListener}; -use tokio::sync::MutexGuard as AysncMutexGuard; +use tokio::sync::MutexGuard as AsyncMutexGuard; pub type ConditionWaiter = Pin>; /// This is a Condition Variable similar to that provided by POSIX. @@ -45,7 +45,7 @@ impl Condition { /// Waits for the condition to be notified #[inline] - pub async fn wait(&self, guard: AysncMutexGuard<'_, T>) { + pub async fn wait(&self, guard: AsyncMutexGuard<'_, T>) { let listener = self.event.listen(); drop(guard); listener.await; @@ -55,7 +55,7 @@ impl Condition { pub fn waiter(&self, guard: MutexGuard<'_, T>) -> ConditionWaiter { let listener = self.event.listen(); drop(guard); - listener + Box::pin(listener) } /// Notifies one pending listener diff --git a/commons/zenoh-util/src/lib.rs b/commons/zenoh-util/src/lib.rs index 7b5bb2e592..745e790711 100644 --- a/commons/zenoh-util/src/lib.rs +++ b/commons/zenoh-util/src/lib.rs @@ -30,7 +30,7 @@ pub use timer::*; pub mod log; pub use log::*; -/// The "ZENOH_HOME" environement variable name +/// The "ZENOH_HOME" environment variable name pub const ZENOH_HOME_ENV_VAR: &str = "ZENOH_HOME"; const DEFAULT_ZENOH_HOME_DIRNAME: &str = ".zenoh"; diff --git a/commons/zenoh-util/src/lib_loader.rs b/commons/zenoh-util/src/lib_loader.rs index d6b254eb35..a2fb98da23 100644 --- a/commons/zenoh-util/src/lib_loader.rs +++ b/commons/zenoh-util/src/lib_loader.rs @@ -32,7 +32,7 @@ zconfigurable! { pub static ref LIB_DEFAULT_SEARCH_PATHS: String = ".:~/.zenoh/lib:/opt/homebrew/lib:/usr/local/lib:/usr/lib".to_string(); } -/// LibLoader allows search for librairies and to load them. +/// LibLoader allows search for libraries and to load them. #[derive(Clone, Debug)] pub struct LibLoader { search_paths: Vec, @@ -145,7 +145,7 @@ impl LibLoader { bail!("Library file '{}' not found", filename) } - /// Search and load all librairies with filename starting with [struct@LIB_PREFIX]+`prefix` and ending with [struct@LIB_SUFFIX]. + /// Search and load all libraries with filename starting with [struct@LIB_PREFIX]+`prefix` and ending with [struct@LIB_SUFFIX]. /// The result is a list of tuple with: /// * the [Library] /// * its full path diff --git a/commons/zenoh-util/src/net/mod.rs b/commons/zenoh-util/src/net/mod.rs index 65b665d31b..65577ac61d 100644 --- a/commons/zenoh-util/src/net/mod.rs +++ b/commons/zenoh-util/src/net/mod.rs @@ -25,7 +25,7 @@ zconfigurable! { } #[cfg(windows)] -unsafe fn get_adapters_adresses(af_spec: i32) -> ZResult> { +unsafe fn get_adapters_addresses(af_spec: i32) -> ZResult> { use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; let mut ret; @@ -83,7 +83,7 @@ pub fn get_interface(name: &str) -> ZResult> { use crate::ffi; - let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_INET)?; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_INET)?; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); while let Some(iface) = next_iface { @@ -168,7 +168,7 @@ pub fn get_local_addresses(interface: Option<&str>) -> ZResult> { use crate::ffi; - let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_UNSPEC)?; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_UNSPEC)?; let mut result = vec![]; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); @@ -249,7 +249,7 @@ pub fn get_unicast_addresses_of_interface(name: &str) -> ZResult> { use crate::ffi; - let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_INET)?; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_INET)?; let mut addrs = vec![]; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); @@ -289,7 +289,7 @@ pub fn get_index_of_interface(addr: IpAddr) -> ZResult { use crate::ffi; - let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_INET)?; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_INET)?; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); while let Some(iface) = next_iface { @@ -333,7 +333,7 @@ pub fn get_interface_names_by_addr(addr: IpAddr) -> ZResult> { use crate::ffi; - let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_UNSPEC)?; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_UNSPEC)?; if addr.is_unspecified() { let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); diff --git a/commons/zenoh-util/src/time_range.rs b/commons/zenoh-util/src/time_range.rs index 51bff157ba..ad91f2bd92 100644 --- a/commons/zenoh-util/src/time_range.rs +++ b/commons/zenoh-util/src/time_range.rs @@ -42,7 +42,7 @@ const W_TO_SECS: f64 = D_TO_SECS * 7.0; /// - the "offset" syntax, which is written `now()`, and allows to specify a target instant as /// an offset applied to an instant of evaluation. These offset are resolved at the evaluation site. /// -/// In range syntax, omiting `` and/or `` implies that the range is unbounded in that direction. +/// In range syntax, omitting `` and/or `` implies that the range is unbounded in that direction. /// /// Exclusive bounds are represented by their respective delimiters pointing towards the exterior. /// Interior bounds are represented by the opposite. @@ -296,7 +296,7 @@ impl TimeExpr { }), } } - /// Substracts `duration` from `self`, returning `None` if `self` is a `Fixed(SystemTime)` and subsctracting the duration is not possible + /// Subtracts `duration` from `self`, returning `None` if `self` is a `Fixed(SystemTime)` and subtracting the duration is not possible /// because the result would be outside the bounds of the underlying data structure (see [`SystemTime::checked_sub`]). /// Otherwise returns `Some(time_expr)`. pub fn checked_sub(&self, duration: f64) -> Option { diff --git a/commons/zenoh-util/src/timer.rs b/commons/zenoh-util/src/timer.rs index d18b9192a4..7fd059b0cf 100644 --- a/commons/zenoh-util/src/timer.rs +++ b/commons/zenoh-util/src/timer.rs @@ -89,8 +89,8 @@ impl Eq for TimedEvent {} impl Ord for TimedEvent { fn cmp(&self, other: &Self) -> ComparisonOrdering { // The usual cmp is defined as: self.when.cmp(&other.when) - // This would make the events odered from largets to the smallest in the heap. - // However, we want the events to be ordered from the smallets to the largest. + // This would make the events ordered from largest to the smallest in the heap. + // However, we want the events to be ordered from the smallest to the largest. // As a consequence of this, we swap the comparison terms, converting the heap // from a max-heap into a min-heap. other.when.cmp(&self.when) diff --git a/deny.toml b/deny.toml index 1a4a14f763..02c6caeb32 100644 --- a/deny.toml +++ b/deny.toml @@ -7,6 +7,7 @@ allow = [ "EPL-2.0", "ISC", "Unicode-DFS-2016", + "Unicode-3.0", "Zlib", "BSD-2-Clause", "BSD-3-Clause", diff --git a/examples/README.md b/examples/README.md index 1ecda78cc4..7776561ef8 100644 --- a/examples/README.md +++ b/examples/README.md @@ -213,7 +213,7 @@ Declares a liveliness token on a given key expression (`group1/zenoh-rs` by default). This token will be seen alive byt the `z_get_liveliness` and `z_sub_liveliness` until - user explicitely drops the token by pressing `'d'` or implicitely dropped by terminating + user explicitly drops the token by pressing `'d'` or implicitly dropped by terminating or killing the `z_liveliness` example. Typical usage: @@ -245,7 +245,7 @@ liveliness tokens being dropped) that match a given key expression (`group1/**` by default). Those tokens could be declared by the `z_liveliness` example. - Note: the `z_sub_liveliness` example will not receive informations about + Note: the `z_sub_liveliness` example will not receive information about matching liveliness tokens that were alive before it's start. Typical usage: diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index 1beabaebd8..eceb74f35b 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -44,14 +44,14 @@ async fn run() -> ZResult<()> { // There are two API-defined ways of making shm buffer allocations: direct and through the layout... // Direct allocation - // The direct allocation calcualtes all layouting checks on each allocation. It is good for making + // The direct allocation calculates all layouting checks on each allocation. It is good for making // uniquely-layouted allocations. For making series of similar allocations, please refer to layout // allocation API which is shown later in this example... let _direct_allocation = { // OPTION: Simple allocation let simple = provider.alloc(512).wait().unwrap(); - // OPTION: Allocation with custom alignemnt and alloc policy customization + // OPTION: Allocation with custom alignment and alloc policy customization let _comprehensive = provider .alloc(512) .with_alignment(AllocAlignment::new(2)) @@ -60,7 +60,7 @@ async fn run() -> ZResult<()> { .wait() .unwrap(); - // OPTION: Allocation with custom alignemnt and async alloc policy + // OPTION: Allocation with custom alignment and async alloc policy let _async = provider .alloc(512) .with_alignment(AllocAlignment::new(2)) @@ -92,7 +92,7 @@ async fn run() -> ZResult<()> { // Allocate ShmBufInner // Policy is a generics-based API to describe necessary allocation behaviour - // that will be higly optimized at compile-time. + // that will be highly optimized at compile-time. // Policy resolvable can be sync and async. // The basic policies are: // -JustAlloc (sync) diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index 0e9f53f36b..18514b3ba8 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -51,8 +51,8 @@ impl Stats { } fn print_round(&self) { let elapsed = self.round_start.elapsed().as_secs_f64(); - let throughtput = (self.round_size as f64) / elapsed; - println!("{throughtput} msg/s"); + let throughput = (self.round_size as f64) / elapsed; + println!("{throughput} msg/s"); } } impl Drop for Stats { @@ -62,8 +62,8 @@ impl Drop for Stats { }; let elapsed = global_start.elapsed().as_secs_f64(); let total = self.round_size * self.finished_rounds + self.round_count; - let throughtput = total as f64 / elapsed; - println!("Received {total} messages over {elapsed:.2}s: {throughtput}msg/s"); + let throughput = total as f64 / elapsed; + println!("Received {total} messages over {elapsed:.2}s: {throughput}msg/s"); } } diff --git a/io/zenoh-links/zenoh-link-quic/Cargo.toml b/io/zenoh-links/zenoh-link-quic/Cargo.toml index 265989b293..1af2a253b8 100644 --- a/io/zenoh-links/zenoh-link-quic/Cargo.toml +++ b/io/zenoh-links/zenoh-link-quic/Cargo.toml @@ -29,7 +29,8 @@ async-trait = { workspace = true } base64 = { workspace = true } futures = { workspace = true } quinn = { workspace = true } -rustls-native-certs = { workspace = true } +rustls = { workspace = true } +rustls-pemfile = { workspace = true } rustls-pki-types = { workspace = true } rustls-webpki = { workspace = true } secrecy = { workspace = true } @@ -40,10 +41,12 @@ tokio = { workspace = true, features = [ "sync", "time", ] } +tokio-rustls = { workspace = true } tokio-util = { workspace = true, features = ["rt"] } -zenoh-collections = { workspace = true } tracing = { workspace = true } webpki-roots = { workspace = true } +x509-parser = { workspace = true } +zenoh-collections = { workspace = true } zenoh-config = { workspace = true } zenoh-core = { workspace = true } zenoh-link-commons = { workspace = true } @@ -52,8 +55,3 @@ zenoh-result = { workspace = true } zenoh-runtime = { workspace = true } zenoh-sync = { workspace = true } zenoh-util = { workspace = true } -# Lock due to quinn not supporting rustls 0.22 yet -rustls = { version = "0.21", features = ["dangerous_configuration", "quic"] } -rustls-pemfile = { version = "1" } -tokio-rustls = "0.24.1" -x509-parser = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-quic/src/lib.rs b/io/zenoh-links/zenoh-link-quic/src/lib.rs index a7303a9622..cde9c589a3 100644 --- a/io/zenoh-links/zenoh-link-quic/src/lib.rs +++ b/io/zenoh-links/zenoh-link-quic/src/lib.rs @@ -25,7 +25,6 @@ use zenoh_result::ZResult; mod unicast; mod utils; -mod verify; pub use unicast::*; pub use utils::TlsConfigurator as QuicConfigurator; diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index 812b3ad972..2e0d9e0a19 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -20,6 +20,7 @@ use std::{ }; use async_trait::async_trait; +use quinn::crypto::rustls::{QuicClientConfig, QuicServerConfig}; use tokio::sync::Mutex as AsyncMutex; use tokio_util::sync::CancellationToken; use x509_parser::prelude::*; @@ -35,7 +36,6 @@ use zenoh_protocol::{ use zenoh_result::{bail, zerror, ZResult}; use crate::{ - config::*, utils::{get_quic_addr, TlsClientConfig, TlsServerConfig}, ALPN_QUIC_HTTP, QUIC_ACCEPT_THROTTLE_TIME, QUIC_DEFAULT_MTU, QUIC_LOCATOR_PREFIX, }; @@ -78,7 +78,7 @@ impl LinkUnicastTrait for LinkUnicastQuic { tracing::trace!("Closing QUIC link: {}", self); // Flush the QUIC stream let mut guard = zasynclock!(self.send); - if let Err(e) = guard.finish().await { + if let Err(e) = guard.finish() { tracing::trace!("Error closing QUIC stream {}: {}", self, e); } self.connection.close(quinn::VarInt::from_u32(0), &[0]); @@ -221,15 +221,6 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { let addr = get_quic_addr(&epaddr).await?; - let server_name_verification: bool = epconf - .get(TLS_SERVER_NAME_VERIFICATION) - .unwrap_or(TLS_SERVER_NAME_VERIFICATION_DEFAULT) - .parse()?; - - if !server_name_verification { - tracing::warn!("Skipping name verification of servers"); - } - // Initialize the QUIC connection let mut client_crypto = TlsClientConfig::new(&epconf) .await @@ -245,9 +236,12 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { }; let mut quic_endpoint = quinn::Endpoint::client(SocketAddr::new(ip_addr, 0)) .map_err(|e| zerror!("Can not create a new QUIC link bound to {}: {}", host, e))?; - quic_endpoint.set_default_client_config(quinn::ClientConfig::new(Arc::new( - client_crypto.client_config, - ))); + + let quic_config: QuicClientConfig = client_crypto + .client_config + .try_into() + .map_err(|e| zerror!("Can not create a new QUIC link bound to {host}: {e}"))?; + quic_endpoint.set_default_client_config(quinn::ClientConfig::new(Arc::new(quic_config))); let src_addr = quic_endpoint .local_addr() @@ -294,8 +288,22 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { .map_err(|e| zerror!("Cannot create a new QUIC listener on {addr}: {e}"))?; server_crypto.server_config.alpn_protocols = ALPN_QUIC_HTTP.iter().map(|&x| x.into()).collect(); - let mut server_config = - quinn::ServerConfig::with_crypto(Arc::new(server_crypto.server_config)); + + // Install ring based rustls CryptoProvider. + rustls::crypto::ring::default_provider() + // This can be called successfully at most once in any process execution. + // Call this early in your process to configure which provider is used for the provider. + // The configuration should happen before any use of ClientConfig::builder() or ServerConfig::builder(). + .install_default() + // Ignore the error here, because `rustls::crypto::ring::default_provider().install_default()` will inevitably be executed multiple times + // when there are multiple quic links, and all but the first execution will fail. + .ok(); + + let quic_config: QuicServerConfig = server_crypto + .server_config + .try_into() + .map_err(|e| zerror!("Can not create a new QUIC listener on {addr}: {e}"))?; + let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(quic_config)); // We do not accept unidireactional streams. Arc::get_mut(&mut server_config.transport) @@ -397,8 +405,16 @@ async fn accept_task( } }; - // Get Quic auth identifier + // Get the right source address in case an unsepecified IP (i.e. 0.0.0.0 or [::]) is used + let src_addr = match quic_conn.local_ip() { + Some(ip) => SocketAddr::new(ip, src_addr.port()), + None => { + tracing::debug!("Can not accept QUIC connection: empty local IP"); + continue; + } + }; let dst_addr = quic_conn.remote_address(); + // Get Quic auth identifier let auth_id = get_cert_common_name(&quic_conn)?; tracing::debug!("Accepted QUIC connection on {:?}: {:?}", src_addr, dst_addr); @@ -438,7 +454,9 @@ async fn accept_task( fn get_cert_common_name(conn: &quinn::Connection) -> ZResult { let mut auth_id = QuicAuthId { auth_value: None }; if let Some(pi) = conn.peer_identity() { - let serv_certs = pi.downcast::>().unwrap(); + let serv_certs = pi + .downcast::>() + .unwrap(); if let Some(item) = serv_certs.iter().next() { let (_, cert) = X509Certificate::from_der(item.as_ref()).unwrap(); let subject_name = cert diff --git a/io/zenoh-links/zenoh-link-quic/src/utils.rs b/io/zenoh-links/zenoh-link-quic/src/utils.rs index f8c151cdd7..b5cc7c49f8 100644 --- a/io/zenoh-links/zenoh-link-quic/src/utils.rs +++ b/io/zenoh-links/zenoh-link-quic/src/utils.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -// use rustls_pki_types::{CertificateDer, PrivateKeyDer, TrustAnchor}; use std::{ fs::File, io, @@ -21,21 +20,22 @@ use std::{ }; use rustls::{ - server::AllowAnyAuthenticatedClient, version::TLS13, Certificate, ClientConfig, - OwnedTrustAnchor, PrivateKey, RootCertStore, ServerConfig, + pki_types::{CertificateDer, PrivateKeyDer, TrustAnchor}, + server::WebPkiClientVerifier, + version::TLS13, + ClientConfig, RootCertStore, ServerConfig, }; -use rustls_pki_types::{CertificateDer, TrustAnchor}; use secrecy::ExposeSecret; use webpki::anchor_from_trusted_cert; use zenoh_config::Config as ZenohConfig; -use zenoh_link_commons::ConfigurationInspector; +use zenoh_link_commons::{tls::WebPkiVerifierAnyServerName, ConfigurationInspector}; use zenoh_protocol::core::{ endpoint::{Address, Config}, parameters, }; use zenoh_result::{bail, zerror, ZError, ZResult}; -use crate::{config::*, verify::WebPkiVerifierAnyServerName}; +use crate::config::*; #[derive(Default, Clone, Copy, Debug)] pub struct TlsConfigurator; @@ -159,40 +159,45 @@ impl TlsServerConfig { let tls_server_private_key = TlsServerConfig::load_tls_private_key(config).await?; let tls_server_certificate = TlsServerConfig::load_tls_certificate(config).await?; - let certs: Vec = + let certs: Vec = rustls_pemfile::certs(&mut Cursor::new(&tls_server_certificate)) - .map_err(|err| zerror!("Error processing server certificate: {err}."))? - .into_iter() - .map(Certificate) - .collect(); + .collect::>() + .map_err(|err| zerror!("Error processing server certificate: {err}."))?; - let mut keys: Vec = + let mut keys: Vec = rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map_err(|err| zerror!("Error processing server key: {err}."))? - .into_iter() - .map(PrivateKey) - .collect(); + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing server key: {err}."))?; if keys.is_empty() { keys = rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map_err(|err| zerror!("Error processing server key: {err}."))? - .into_iter() - .map(PrivateKey) - .collect(); + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing server key: {err}."))?; } if keys.is_empty() { keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map_err(|err| zerror!("Error processing server key: {err}."))? - .into_iter() - .map(PrivateKey) - .collect(); + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing server key: {err}."))?; } if keys.is_empty() { bail!("No private key found for TLS server."); } + // Install ring based rustls CryptoProvider. + rustls::crypto::ring::default_provider() + // This can be called successfully at most once in any process execution. + // Call this early in your process to configure which provider is used for the provider. + // The configuration should happen before any use of ClientConfig::builder() or ServerConfig::builder(). + .install_default() + // Ignore the error here, because `rustls::crypto::ring::default_provider().install_default()` will inevitably be executed multiple times + // when there are multiple quic links, and all but the first execution will fail. + .ok(); + let sc = if tls_server_client_auth { let root_cert_store = load_trust_anchors(config)?.map_or_else( || { @@ -202,17 +207,13 @@ impl TlsServerConfig { }, Ok, )?; - let client_auth = AllowAnyAuthenticatedClient::new(root_cert_store); - ServerConfig::builder() - .with_safe_default_cipher_suites() - .with_safe_default_kx_groups() - .with_protocol_versions(&[&TLS13])? - .with_client_cert_verifier(Arc::new(client_auth)) + let client_auth = WebPkiClientVerifier::builder(root_cert_store.into()).build()?; + ServerConfig::builder_with_protocol_versions(&[&TLS13]) + .with_client_cert_verifier(client_auth) .with_single_cert(certs, keys.remove(0)) .map_err(|e| zerror!(e))? } else { ServerConfig::builder() - .with_safe_defaults() .with_no_client_auth() .with_single_cert(certs, keys.remove(0)) .map_err(|e| zerror!(e))? @@ -270,68 +271,60 @@ impl TlsClientConfig { // Allows mixed user-generated CA and webPKI CA tracing::debug!("Loading default Web PKI certificates."); let mut root_cert_store = RootCertStore { - roots: webpki_roots::TLS_SERVER_ROOTS - .iter() - .map(|ta| ta.to_owned()) - .map(|ta| { - OwnedTrustAnchor::from_subject_spki_name_constraints( - ta.subject.to_vec(), - ta.subject_public_key_info.to_vec(), - ta.name_constraints.map(|nc| nc.to_vec()), - ) - }) - .collect(), + roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(), }; if let Some(custom_root_cert) = load_trust_anchors(config)? { tracing::debug!("Loading user-generated certificates."); - root_cert_store.roots.extend(custom_root_cert.roots); + root_cert_store.extend(custom_root_cert.roots); } + // Install ring based rustls CryptoProvider. + rustls::crypto::ring::default_provider() + // This can be called successfully at most once in any process execution. + // Call this early in your process to configure which provider is used for the provider. + // The configuration should happen before any use of ClientConfig::builder() or ServerConfig::builder(). + .install_default() + // Ignore the error here, because `rustls::crypto::ring::default_provider().install_default()` will inevitably be executed multiple times + // when there are multiple quic links, and all but the first execution will fail. + .ok(); + let cc = if tls_client_server_auth { tracing::debug!("Loading client authentication key and certificate..."); let tls_client_private_key = TlsClientConfig::load_tls_private_key(config).await?; let tls_client_certificate = TlsClientConfig::load_tls_certificate(config).await?; - let certs: Vec = + let certs: Vec = rustls_pemfile::certs(&mut Cursor::new(&tls_client_certificate)) - .map_err(|err| zerror!("Error processing client certificate: {err}."))? - .into_iter() - .map(Certificate) - .collect(); + .collect::>() + .map_err(|err| zerror!("Error processing client certificate: {err}."))?; - let mut keys: Vec = + let mut keys: Vec = rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map_err(|err| zerror!("Error processing client key: {err}."))? - .into_iter() - .map(PrivateKey) - .collect(); + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing client key: {err}."))?; if keys.is_empty() { keys = rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map_err(|err| zerror!("Error processing client key: {err}."))? - .into_iter() - .map(PrivateKey) - .collect(); + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing client key: {err}."))?; } if keys.is_empty() { keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map_err(|err| zerror!("Error processing client key: {err}."))? - .into_iter() - .map(PrivateKey) - .collect(); + .map(|x| x.map(PrivateKeyDer::from)) + .collect::>() + .map_err(|err| zerror!("Error processing client key: {err}."))?; } if keys.is_empty() { bail!("No private key found for TLS client."); } - let builder = ClientConfig::builder() - .with_safe_default_cipher_suites() - .with_safe_default_kx_groups() - .with_protocol_versions(&[&TLS13])?; + let builder = ClientConfig::builder_with_protocol_versions(&[&TLS13]); if tls_server_name_verification { builder @@ -339,6 +332,7 @@ impl TlsClientConfig { .with_client_auth_cert(certs, keys.remove(0)) } else { builder + .dangerous() .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( root_cert_store, ))) @@ -346,17 +340,14 @@ impl TlsClientConfig { } .map_err(|e| zerror!("Bad certificate/key: {}", e))? } else { - let builder = ClientConfig::builder() - .with_safe_default_cipher_suites() - .with_safe_default_kx_groups() - .with_protocol_versions(&[&TLS13])?; - + let builder = ClientConfig::builder(); if tls_server_name_verification { builder .with_root_certificates(root_cert_store) .with_no_client_auth() } else { builder + .dangerous() .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( root_cert_store, ))) @@ -387,30 +378,19 @@ impl TlsClientConfig { } } -fn process_pem(pem: &mut dyn io::BufRead) -> ZResult> { +fn process_pem(pem: &mut dyn io::BufRead) -> ZResult>> { let certs: Vec = rustls_pemfile::certs(pem) - .map_err(|err| zerror!("Error processing PEM certificates: {err}."))? - .into_iter() - .map(CertificateDer::from) - .collect(); + .map(|result| result.map_err(|err| zerror!("Error processing PEM certificates: {err}."))) + .collect::, ZError>>()?; - let trust_anchors: Vec = certs + let trust_anchors: Vec = certs .into_iter() .map(|cert| { anchor_from_trusted_cert(&cert) .map_err(|err| zerror!("Error processing trust anchor: {err}.")) .map(|trust_anchor| trust_anchor.to_owned()) }) - .collect::, ZError>>()? - .into_iter() - .map(|ta| { - OwnedTrustAnchor::from_subject_spki_name_constraints( - ta.subject.to_vec(), - ta.subject_public_key_info.to_vec(), - ta.name_constraints.map(|nc| nc.to_vec()), - ) - }) - .collect(); + .collect::, ZError>>()?; Ok(trust_anchors) } @@ -471,7 +451,7 @@ fn load_trust_anchors(config: &Config<'_>) -> ZResult> { if let Some(value) = config.get(TLS_ROOT_CA_CERTIFICATE_RAW) { let mut pem = BufReader::new(value.as_bytes()); let trust_anchors = process_pem(&mut pem)?; - root_cert_store.roots.extend(trust_anchors); + root_cert_store.extend(trust_anchors); return Ok(Some(root_cert_store)); } @@ -479,14 +459,14 @@ fn load_trust_anchors(config: &Config<'_>) -> ZResult> { let certificate_pem = base64_decode(b64_certificate)?; let mut pem = BufReader::new(certificate_pem.as_slice()); let trust_anchors = process_pem(&mut pem)?; - root_cert_store.roots.extend(trust_anchors); + root_cert_store.extend(trust_anchors); return Ok(Some(root_cert_store)); } if let Some(filename) = config.get(TLS_ROOT_CA_CERTIFICATE_FILE) { let mut pem = BufReader::new(File::open(filename)?); let trust_anchors = process_pem(&mut pem)?; - root_cert_store.roots.extend(trust_anchors); + root_cert_store.extend(trust_anchors); return Ok(Some(root_cert_store)); } Ok(None) diff --git a/io/zenoh-links/zenoh-link-quic/src/verify.rs b/io/zenoh-links/zenoh-link-quic/src/verify.rs deleted file mode 100644 index 544d7c8a65..0000000000 --- a/io/zenoh-links/zenoh-link-quic/src/verify.rs +++ /dev/null @@ -1,42 +0,0 @@ -use std::time::SystemTime; - -use rustls::{client::verify_server_cert_signed_by_trust_anchor, server::ParsedCertificate}; -use tokio_rustls::rustls::{ - client::{ServerCertVerified, ServerCertVerifier}, - Certificate, RootCertStore, ServerName, -}; - -impl ServerCertVerifier for WebPkiVerifierAnyServerName { - /// Will verify the certificate is valid in the following ways: - /// - Signed by a trusted `RootCertStore` CA - /// - Not Expired - fn verify_server_cert( - &self, - end_entity: &Certificate, - intermediates: &[Certificate], - _server_name: &ServerName, - _scts: &mut dyn Iterator, - _ocsp_response: &[u8], - now: SystemTime, - ) -> Result { - let cert = ParsedCertificate::try_from(end_entity)?; - verify_server_cert_signed_by_trust_anchor(&cert, &self.roots, intermediates, now)?; - Ok(ServerCertVerified::assertion()) - } -} - -/// `ServerCertVerifier` that verifies that the server is signed by a trusted root, but allows any serverName -/// see the trait impl for more information. -pub struct WebPkiVerifierAnyServerName { - roots: RootCertStore, -} - -#[allow(unreachable_pub)] -impl WebPkiVerifierAnyServerName { - /// Constructs a new `WebPkiVerifierAnyServerName`. - /// - /// `roots` is the set of trust anchors to trust for issuing server certs. - pub fn new(roots: RootCertStore) -> Self { - Self { roots } - } -} diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index 1c42e805bb..5c4d086c5b 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -415,6 +415,15 @@ async fn accept_task( res = accept(&socket) => { match res { Ok((stream, dst_addr)) => { + // Get the right source address in case an unsepecified IP (i.e. 0.0.0.0 or [::]) is used + let src_addr = match stream.local_addr() { + Ok(sa) => sa, + Err(e) => { + tracing::debug!("Can not accept TCP connection: {}", e); + continue; + } + }; + tracing::debug!("Accepted TCP connection on {:?}: {:?}", src_addr, dst_addr); // Create the new link object let link = Arc::new(LinkUnicastTcp::new(stream, src_addr, dst_addr)); diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 188651d90d..41847a1577 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -394,6 +394,15 @@ async fn accept_task( res = accept(&socket) => { match res { Ok((tcp_stream, dst_addr)) => { + // Get the right source address in case an unsepecified IP (i.e. 0.0.0.0 or [::]) is used + let src_addr = match tcp_stream.local_addr() { + Ok(sa) => sa, + Err(e) => { + tracing::debug!("Can not accept TLS connection: {}", e); + continue; + } + }; + // Accept the TLS connection let tls_stream = match acceptor.accept(tcp_stream).await { Ok(stream) => TlsStream::Server(stream), diff --git a/io/zenoh-links/zenoh-link-tls/src/utils.rs b/io/zenoh-links/zenoh-link-tls/src/utils.rs index 421c5817f2..d6fde3d243 100644 --- a/io/zenoh-links/zenoh-link-tls/src/utils.rs +++ b/io/zenoh-links/zenoh-link-tls/src/utils.rs @@ -190,6 +190,16 @@ impl TlsServerConfig { bail!("No private key found for TLS server."); } + // Install ring based rustls CryptoProvider. + rustls::crypto::ring::default_provider() + // This can be called successfully at most once in any process execution. + // Call this early in your process to configure which provider is used for the provider. + // The configuration should happen before any use of ClientConfig::builder() or ServerConfig::builder(). + .install_default() + // Ignore the error here, because `rustls::crypto::ring::default_provider().install_default()` will inevitably be executed multiple times + // when there are multiple quic links, and all but the first execution will fail. + .ok(); + let sc = if tls_server_client_auth { let root_cert_store = load_trust_anchors(config)?.map_or_else( || { @@ -271,6 +281,16 @@ impl TlsClientConfig { root_cert_store.extend(custom_root_cert.roots); } + // Install ring based rustls CryptoProvider. + rustls::crypto::ring::default_provider() + // This can be called successfully at most once in any process execution. + // Call this early in your process to configure which provider is used for the provider. + // The configuration should happen before any use of ClientConfig::builder() or ServerConfig::builder(). + .install_default() + // Ignore the error here, because `rustls::crypto::ring::default_provider().install_default()` will inevitably be executed multiple times + // when there are multiple quic links, and all but the first execution will fail. + .ok(); + let cc = if tls_client_server_auth { tracing::debug!("Loading client authentication key and certificate..."); let tls_client_private_key = TlsClientConfig::load_tls_private_key(config).await?; diff --git a/io/zenoh-links/zenoh-link-udp/src/lib.rs b/io/zenoh-links/zenoh-link-udp/src/lib.rs index 0e3a5f26dc..3386ca387c 100644 --- a/io/zenoh-links/zenoh-link-udp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-udp/src/lib.rs @@ -45,7 +45,7 @@ use zenoh_result::{zerror, ZResult}; /// /// # Note /// -/// The theoretical Maximum Transmission Unit (MTU) of UDP is `u16::MAX`. From that we substract the +/// The theoretical Maximum Transmission Unit (MTU) of UDP is `u16::MAX`. From that we subtract the /// size of a UDP header (8 bytes) and the size of IPv4/IPv6 headers (resp. 20 and 40 bytes). /// /// Although in IPv6 it is possible to have UDP datagrams of size greater than 65,535 bytes via IPv6 diff --git a/io/zenoh-links/zenoh-link-udp/src/multicast.rs b/io/zenoh-links/zenoh-link-udp/src/multicast.rs index 280f5eb203..1ab1fbb398 100644 --- a/io/zenoh-links/zenoh-link-udp/src/multicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/multicast.rs @@ -283,7 +283,7 @@ impl LinkManagerMulticastUdp { .map_err(|e| zerror!("{}: {}", mcast_addr, e))?; } } - IpAddr::V6(src_ip6) => bail!("{}: unexepcted IPv6 source address", src_ip6), + IpAddr::V6(src_ip6) => bail!("{}: unexpected IPv6 source address", src_ip6), }, IpAddr::V6(dst_ip6) => { // Join default multicast group diff --git a/io/zenoh-links/zenoh-link-udp/src/unicast.rs b/io/zenoh-links/zenoh-link-udp/src/unicast.rs index 50f3af03ba..e67e821363 100644 --- a/io/zenoh-links/zenoh-link-udp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/unicast.rs @@ -509,6 +509,10 @@ async fn accept_read_task( tracing::trace!("Ready to accept UDP connections on: {:?}", src_addr); + if src_addr.ip().is_unspecified() { + tracing::warn!("Interceptors (e.g. Access Control, Downsampling) are not guaranteed to work on UDP when listening on 0.0.0.0 or [::]. Their usage is discouraged. See https://github.com/eclipse-zenoh/zenoh/issues/1126."); + } + loop { // Buffers for deserialization let mut buff = zenoh_buffers::vec::uninit(UDP_MAX_MTU as usize); diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index 5e61c7903b..df93b9cc61 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -84,12 +84,12 @@ impl Invitation { } async fn expect(expected_suffix: u32, pipe: &mut PipeR) -> ZResult<()> { - let recived_suffix = Self::receive(pipe).await?; - if recived_suffix != expected_suffix { + let received_suffix = Self::receive(pipe).await?; + if received_suffix != expected_suffix { bail!( "Suffix mismatch: expected {} got {}", expected_suffix, - recived_suffix + received_suffix ) } Ok(()) @@ -247,7 +247,7 @@ async fn handle_incoming_connections( // read invitation from the request channel let suffix = Invitation::receive(request_channel).await?; - // gererate uplink and downlink names + // generate uplink and downlink names let (dedicated_downlink_path, dedicated_uplink_path) = get_dedicated_pipe_names(path_downlink, path_uplink, suffix); @@ -255,10 +255,10 @@ async fn handle_incoming_connections( let mut dedicated_downlink = PipeW::new(&dedicated_downlink_path).await?; let mut dedicated_uplink = PipeR::new(&dedicated_uplink_path, access_mode).await?; - // confirm over the dedicated chanel + // confirm over the dedicated channel Invitation::confirm(suffix, &mut dedicated_downlink).await?; - // got confirmation over the dedicated chanel + // got confirmation over the dedicated channel Invitation::expect(suffix, &mut dedicated_uplink).await?; // create Locators @@ -356,7 +356,7 @@ async fn create_pipe( // generate random suffix let suffix: u32 = rand::thread_rng().gen(); - // gererate uplink and downlink names + // generate uplink and downlink names let (path_downlink, path_uplink) = get_dedicated_pipe_names(path_downlink, path_uplink, suffix); // try create uplink and downlink pipes to ensure that the selected suffix is available @@ -393,7 +393,7 @@ impl UnicastPipeClient { // listener owns the request channel, so failure of this call means that there is nobody listening on the provided endpoint let mut request_channel = PipeW::new(&path_uplink).await?; - // create dedicated channel prerequisities. The creation code also ensures that nobody else would use the same channel concurrently + // create dedicated channel prerequisites. The creation code also ensures that nobody else would use the same channel concurrently let ( mut dedicated_downlink, dedicated_suffix, @@ -401,10 +401,10 @@ impl UnicastPipeClient { dedicated_uplink_path, ) = dedicate_pipe(&path_uplink, &path_downlink, access_mode).await?; - // invite the listener to our dedicated channel over the requet channel + // invite the listener to our dedicated channel over the request channel Invitation::send(dedicated_suffix, &mut request_channel).await?; - // read responce that should be sent over the dedicated channel, confirming that everything is OK + // read response that should be sent over the dedicated channel, confirming that everything is OK // on the listener's side and it is already working with the dedicated channel Invitation::expect(dedicated_suffix, &mut dedicated_downlink).await?; diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index 4795838ba3..dfad4d9833 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -377,7 +377,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { e })?; - // Update the endpoint with the acutal local path + // Update the endpoint with the actual local path endpoint = EndPoint::new( endpoint.protocol(), local_path_str, diff --git a/io/zenoh-links/zenoh-link-ws/src/unicast.rs b/io/zenoh-links/zenoh-link-ws/src/unicast.rs index 2b6424725a..193c9a1724 100644 --- a/io/zenoh-links/zenoh-link-ws/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-ws/src/unicast.rs @@ -307,7 +307,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastWs { async fn new_link(&self, endpoint: EndPoint) -> ZResult { let dst_url = get_ws_url(endpoint.address()).await?; - let (stream, _) = tokio_tungstenite::connect_async(&dst_url) + let (stream, _) = tokio_tungstenite::connect_async(dst_url.as_str()) .await .map_err(|e| { zerror!( @@ -509,6 +509,15 @@ async fn accept_task( _ = token.cancelled() => break, }; + // Get the right source address in case an unsepecified IP (i.e. 0.0.0.0 or [::]) is used + let src_addr = match stream.local_addr() { + Ok(sa) => sa, + Err(e) => { + tracing::debug!("Can not accept TCP connection: {}", e); + continue; + } + }; + tracing::debug!( "Accepted TCP (WebSocket) connection on {:?}: {:?}", src_addr, diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index b91acdc7ff..9a58aafd5d 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -328,7 +328,7 @@ impl WBatch { }) .map_err(|_| zerror!("Compression error"))?; - // Verify wether the resulting compressed data is smaller than the initial input + // Verify whether the resulting compressed data is smaller than the initial input if support.len() < self.buffer.len() { Ok(Finalize::Buffer) } else { diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index 349f9ed560..e497199010 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -50,7 +50,6 @@ use crate::common::batch::BatchConfig; type NanoSeconds = u32; const RBLEN: usize = QueueSizeConf::MAX; -const TSLOT: NanoSeconds = 100; // Inner structure to reuse serialization batches struct StageInRefill { @@ -258,7 +257,7 @@ impl StageIn { // Treat all messages as non-droppable once we start fragmenting batch = zgetbatch_rets!(true, tch.sn.set(sn).unwrap()); - // Serialize the message fragmnet + // Serialize the message fragment match batch.encode((&mut reader, &mut fragment)) { Ok(_) => { // Update the SN @@ -354,6 +353,7 @@ enum Pull { // Inner structure to keep track and signal backoff operations #[derive(Clone)] struct Backoff { + tslot: NanoSeconds, retry_time: NanoSeconds, last_bytes: BatchSize, bytes: Arc, @@ -361,8 +361,9 @@ struct Backoff { } impl Backoff { - fn new(bytes: Arc, backoff: Arc) -> Self { + fn new(tslot: NanoSeconds, bytes: Arc, backoff: Arc) -> Self { Self { + tslot, retry_time: 0, last_bytes: 0, bytes, @@ -372,7 +373,7 @@ impl Backoff { fn next(&mut self) { if self.retry_time == 0 { - self.retry_time = TSLOT; + self.retry_time = self.tslot; self.backoff.store(true, Ordering::Relaxed); } else { match self.retry_time.checked_mul(2) { @@ -390,7 +391,7 @@ impl Backoff { } } - fn stop(&mut self) { + fn reset(&mut self) { self.retry_time = 0; self.backoff.store(false, Ordering::Relaxed); } @@ -407,7 +408,6 @@ impl StageOutIn { #[inline] fn try_pull(&mut self) -> Pull { if let Some(batch) = self.s_out_r.pull() { - self.backoff.stop(); return Pull::Some(batch); } @@ -419,41 +419,26 @@ impl StageOutIn { let old_bytes = self.backoff.last_bytes; self.backoff.last_bytes = new_bytes; - match new_bytes.cmp(&old_bytes) { - std::cmp::Ordering::Equal => { - // No new bytes have been written on the batch, try to pull - if let Ok(mut g) = self.current.try_lock() { - // First try to pull from stage OUT - if let Some(batch) = self.s_out_r.pull() { - self.backoff.stop(); + if new_bytes == old_bytes { + // It seems no new bytes have been written on the batch, try to pull + if let Ok(mut g) = self.current.try_lock() { + // First try to pull from stage OUT to make sure we are not in the case + // where new_bytes == old_bytes are because of two identical serializations + if let Some(batch) = self.s_out_r.pull() { + return Pull::Some(batch); + } + + // An incomplete (non-empty) batch may be available in the state IN pipeline. + match g.take() { + Some(batch) => { return Pull::Some(batch); } - - // An incomplete (non-empty) batch is available in the state IN pipeline. - match g.take() { - Some(batch) => { - self.backoff.stop(); - return Pull::Some(batch); - } - None => { - self.backoff.stop(); - return Pull::None; - } + None => { + return Pull::None; } } - // Go to backoff - } - std::cmp::Ordering::Less => { - // There should be a new batch in Stage OUT - if let Some(batch) = self.s_out_r.pull() { - self.backoff.stop(); - return Pull::Some(batch); - } - // Go to backoff - } - std::cmp::Ordering::Greater => { - // Go to backoff } + // Go to backoff } // Do backoff @@ -576,7 +561,7 @@ impl TransmissionPipeline { s_in: StageOutIn { s_out_r, current, - backoff: Backoff::new(bytes, backoff), + backoff: Backoff::new(config.backoff.as_nanos() as NanoSeconds, bytes, backoff), }, s_ref: StageOutRefill { n_ref_w, s_ref_w }, }); @@ -664,6 +649,11 @@ pub(crate) struct TransmissionPipelineConsumer { impl TransmissionPipelineConsumer { pub(crate) async fn pull(&mut self) -> Option<(WBatch, usize)> { + // Reset backoff before pulling + for queue in self.stage_out.iter_mut() { + queue.s_in.backoff.reset(); + } + while self.active.load(Ordering::Relaxed) { // Calculate the backoff maximum let mut bo = NanoSeconds::MAX; @@ -681,10 +671,29 @@ impl TransmissionPipelineConsumer { } } + // In case of writing many small messages, `recv_async()` will most likely return immedietaly. + // While trying to pull from the queue, the stage_in `lock()` will most likely taken, leading to + // a spinning behaviour while attempting to take the lock. Yield the current task to avoid + // spinning the current task indefinitely. + tokio::task::yield_now().await; + // Wait for the backoff to expire or for a new message - let _ = + let res = tokio::time::timeout(Duration::from_nanos(bo as u64), self.n_out_r.recv_async()) .await; + match res { + Ok(Ok(())) => { + // We have received a notification from the channel that some bytes are available, retry to pull. + } + Ok(Err(_channel_error)) => { + // The channel is closed, we can't be notified anymore. Break the loop and return None. + break; + } + Err(_timeout) => { + // The backoff timeout expired. Be aware that tokio timeout may not sleep for short duration since + // it has time resolution of 1ms: https://docs.rs/tokio/latest/tokio/time/fn.sleep.html + } + } } None } diff --git a/io/zenoh-transport/src/common/seq_num.rs b/io/zenoh-transport/src/common/seq_num.rs index f286d14741..ecbfd8a944 100644 --- a/io/zenoh-transport/src/common/seq_num.rs +++ b/io/zenoh-transport/src/common/seq_num.rs @@ -57,7 +57,7 @@ impl SeqNum { /// - 16_386 (i.e., 2^14) /// - 2_097_152 (i.e., 2^21) /// - /// This funtion will panic if `value` is out of bound w.r.t. `resolution`. That is if + /// This function will panic if `value` is out of bound w.r.t. `resolution`. That is if /// `value` is greater or equal than `resolution`. /// pub(crate) fn make(value: TransportSn, resolution: Bits) -> ZResult { @@ -179,7 +179,7 @@ impl SeqNumGenerator { /// As a consequence of wire zenoh's representation of sequence numbers /// this should be a multiple of 7. /// - /// This funtion will panic if `value` is out of bound w.r.t. `resolution`. That is if + /// This function will panic if `value` is out of bound w.r.t. `resolution`. That is if /// `value` is greater or equal than `resolution`. /// pub(crate) fn make(initial_sn: TransportSn, resolution: Bits) -> ZResult { diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index deba9cf6f6..0d8c29ea9d 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -80,7 +80,7 @@ use crate::multicast::manager::{ /// .lease(Duration::from_secs(1)) /// .keep_alive(4) // Send a KeepAlive every 250 ms /// .accept_timeout(Duration::from_secs(1)) -/// .accept_pending(10) // Set to 10 the number of simultanous pending incoming transports +/// .accept_pending(10) // Set to 10 the number of simultaneous pending incoming transports /// .max_sessions(5); // Allow max 5 transports open /// let mut resolution = Resolution::default(); /// resolution.set(Field::FrameSN, Bits::U8); diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 382109be0b..794d36d9e7 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -344,7 +344,7 @@ impl TransportLinkMulticastUniversal { ) .await; if let Err(e) = res { - tracing::debug!("{}", e); + tracing::debug!("TX task failed: {}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle zenoh_runtime::ZRuntime::Net.spawn(async move { c_transport.delete().await }); @@ -380,7 +380,7 @@ impl TransportLinkMulticastUniversal { .await; c_signal.trigger(); if let Err(e) = res { - tracing::debug!("{}", e); + tracing::debug!("RX task failed: {}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle zenoh_runtime::ZRuntime::Net.spawn(async move { c_transport.delete().await }); diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index ee8e024bb6..9a6cdb0d4d 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -65,7 +65,7 @@ impl TransportMulticastInner { || join.ext_qos.is_some() != peer.is_qos() { let e = format!( - "Ingoring Join on {} of peer: {}. Inconsistent parameters.", + "Ignoring Join on {} of peer: {}. Inconsistent parameters.", peer.locator, peer.zid, ); tracing::debug!("{}", e); @@ -83,7 +83,7 @@ impl TransportMulticastInner { ) -> ZResult<()> { if zread!(self.peers).len() >= self.manager.config.multicast.max_sessions { tracing::debug!( - "Ingoring Join on {} from peer: {}. Max sessions reached: {}.", + "Ignoring Join on {} from peer: {}. Max sessions reached: {}.", locator, join.zid, self.manager.config.multicast.max_sessions, @@ -93,7 +93,7 @@ impl TransportMulticastInner { if join.version != self.manager.config.version { tracing::debug!( - "Ingoring Join on {} from peer: {}. Unsupported version: {}. Expected: {}.", + "Ignoring Join on {} from peer: {}. Unsupported version: {}. Expected: {}.", locator, join.zid, join.version, @@ -104,7 +104,7 @@ impl TransportMulticastInner { if join.resolution != self.manager.config.resolution { tracing::debug!( - "Ingoring Join on {} from peer: {}. Unsupported SN resolution: {:?}. Expected: {:?}.", + "Ignoring Join on {} from peer: {}. Unsupported SN resolution: {:?}. Expected: {:?}.", locator, join.zid, join.resolution, @@ -115,7 +115,7 @@ impl TransportMulticastInner { if join.batch_size != batch_size { tracing::debug!( - "Ingoring Join on {} from peer: {}. Unsupported Batch Size: {:?}. Expected: {:?}.", + "Ignoring Join on {} from peer: {}. Unsupported Batch Size: {:?}. Expected: {:?}.", locator, join.zid, join.batch_size, @@ -126,7 +126,7 @@ impl TransportMulticastInner { if !self.manager.config.multicast.is_qos && join.ext_qos.is_some() { tracing::debug!( - "Ingoring Join on {} from peer: {}. QoS is not supported.", + "Ignoring Join on {} from peer: {}. QoS is not supported.", locator, join.zid, ); diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index 57a5eb1602..f3a053aa63 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -449,7 +449,7 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Verify that the cookie is the one we sent if input.cookie_nonce != cookie.nonce { - let e = zerror!("Rejecting OpenSyn on: {}. Unkwown cookie.", self.link); + let e = zerror!("Rejecting OpenSyn on: {}. Unknown cookie.", self.link); return Err((e.into(), Some(close::reason::INVALID))); } diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index 73d2e61398..c602dcf806 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -262,17 +262,19 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { drop(guard); // create a callback to start the link - let start_link = Box::new(move || { + let start_tx = Box::new(move || { // start keepalive task let keep_alive = self.manager.config.unicast.lease / self.manager.config.unicast.keep_alive as u32; self.start_keepalive(keep_alive); + }); + let start_rx = Box::new(move || { // start RX task self.internal_start_rx(other_lease); }); - Ok((start_link, ack)) + Ok((start_tx, start_rx, ack)) } /*************************************/ diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 9686de1ef7..bff221323e 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -100,7 +100,7 @@ pub struct TransportManagerParamsUnicast { pub struct TransportManagerBuilderUnicast { // NOTE: In order to consider eventual packet loss and transmission latency and jitter, // set the actual keep_alive timeout to one fourth of the lease time. - // This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + // This is in-line with the ITU-T G.8013/Y.1731 specification on continuous connectivity // check which considers a link as failed when no messages are received in 3.5 times the // target interval. pub(super) lease: Duration, @@ -450,7 +450,7 @@ impl TransportManager { } // Add the link to the transport - let (start_tx_rx, ack) = transport + let (start_tx, start_rx, ack) = transport .add_link(link, other_initial_sn, other_lease) .await .map_err(InitTransportError::Link)?; @@ -462,10 +462,12 @@ impl TransportManager { .await .map_err(|e| InitTransportError::Transport((e, c_t, close::reason::GENERIC)))?; + start_tx(); + // notify transport's callback interface that there is a new link Self::notify_new_link_unicast(&transport, c_link); - start_tx_rx(); + start_rx(); Ok(transport) } @@ -554,7 +556,8 @@ impl TransportManager { }; // Add the link to the transport - let (start_tx_rx, ack) = match t.add_link(link, other_initial_sn, other_lease).await { + let (start_tx, start_rx, ack) = match t.add_link(link, other_initial_sn, other_lease).await + { Ok(val) => val, Err(e) => { let _ = t.close(e.2).await; @@ -581,6 +584,8 @@ impl TransportManager { guard.insert(config.zid, t.clone()); drop(guard); + start_tx(); + // Notify manager's interface that there is a new transport transport_error!( self.notify_new_transport_unicast(&t), @@ -590,7 +595,7 @@ impl TransportManager { // Notify transport's callback interface that there is a new link Self::notify_new_link_unicast(&t, c_link); - start_tx_rx(); + start_rx(); zcondfeat!( "shared-memory", @@ -752,13 +757,17 @@ impl TransportManager { let c_manager = self.clone(); self.task_controller .spawn_with_rt(zenoh_runtime::ZRuntime::Acceptor, async move { - if let Err(e) = tokio::time::timeout( + if tokio::time::timeout( c_manager.config.unicast.accept_timeout, super::establishment::accept::accept_link(link, &c_manager), ) .await + .is_err() { - tracing::debug!("{}", e); + tracing::debug!( + "Failed to accept link before deadline ({}ms)", + c_manager.config.unicast.accept_timeout.as_millis() + ); } incoming_counter.fetch_sub(1, SeqCst); }); diff --git a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs index 0ea3b979ca..0a84e5e753 100644 --- a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs +++ b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs @@ -37,8 +37,14 @@ pub(crate) enum InitTransportError { Transport(TransportError), } -pub(crate) type AddLinkResult<'a> = - Result<(Box, MaybeOpenAck), LinkError>; +pub(crate) type AddLinkResult<'a> = Result< + ( + Box, + Box, + MaybeOpenAck, + ), + LinkError, +>; pub(crate) type InitTransportResult = Result, InitTransportError>; /*************************************/ diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index e0c3cd3db5..07de4fb744 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -99,7 +99,7 @@ impl TransportLinkUnicastUniversal { .await; if let Err(e) = res { - tracing::debug!("{}", e); + tracing::debug!("TX task failed: {}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle // TODO(yuyuan): do more study to check which ZRuntime should be used or refine the @@ -127,7 +127,7 @@ impl TransportLinkUnicastUniversal { // TODO(yuyuan): improve this callback if let Err(e) = res { - tracing::debug!("{}", e); + tracing::debug!("RX task failed: {}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index eab047460f..47f2ff344c 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -297,17 +297,21 @@ impl TransportUnicastTrait for TransportUnicastUniversal { // create a callback to start the link let transport = self.clone(); - let start_link = Box::new(move || { + let mut c_link = link.clone(); + let c_transport = transport.clone(); + let start_tx = Box::new(move || { // Start the TX loop let keep_alive = self.manager.config.unicast.lease / self.manager.config.unicast.keep_alive as u32; - link.start_tx(transport.clone(), consumer, keep_alive); + c_link.start_tx(c_transport, consumer, keep_alive); + }); + let start_rx = Box::new(move || { // Start the RX loop link.start_rx(transport, other_lease); }); - Ok((start_link, ack)) + Ok((start_tx, start_rx, ack)) } /*************************************/ diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index b49b863991..a0fabe1ffd 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -43,7 +43,7 @@ use zenoh_transport::{ TransportPeerEventHandler, }; -// These keys and certificates below are purposedly generated to run TLS and mTLS tests. +// These keys and certificates below are purposely generated to run TLS and mTLS tests. // // With 2 way authentication (mTLS), using TLS 1.3, we need two pairs of keys and certificates: one // for the "server" and another one for the "client". diff --git a/plugins/zenoh-backend-traits/src/config.rs b/plugins/zenoh-backend-traits/src/config.rs index 9f5e9bb25a..0d710d9942 100644 --- a/plugins/zenoh-backend-traits/src/config.rs +++ b/plugins/zenoh-backend-traits/src/config.rs @@ -90,7 +90,7 @@ impl Default for ReplicaConfig { // This will determine the time upto which replicas might be diverged // This can be different for each replica if not used to compute hot and warm publication_interval: Duration::from_secs(5), - // This indicates the uncertainity due to the network + // This indicates the uncertainty due to the network // The messages might still be in transit in the network propagation_delay: Duration::from_millis(200), // This is the chunk that you would like your data to be divide into in time. diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index fac516b7b8..d59d764004 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -90,7 +90,7 @@ //! //! async fn put(&mut self, key: Option, value: Value, timestamp: Timestamp) -> ZResult { //! // the key will be None if it exactly matched with the strip_prefix -//! // create a storge specific special structure to store it +//! // create a storage specific special structure to store it //! // Store the data with timestamp //! // @TODO: //! // store (key, value, timestamp) diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 69557af614..6ea19ce25c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -227,7 +227,7 @@ impl StorageRuntimeInner { config.volume_id ); // let _ = async_std::task::block_on(storage.send(StorageMessage::Stop)); - let _ = storage.send(StorageMessage::Stop); // TODO: was previosuly spawning a task. do we need that? + let _ = storage.send(StorageMessage::Stop); // TODO: was previously spawning a task. do we need that? } } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 95e8726cf4..9d12dbd599 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -298,7 +298,7 @@ impl StorageService { && self.is_latest(&k, sample.timestamp().unwrap()).await)) { tracing::trace!( - "Sample `{:?}` identified as neded processing for key {}", + "Sample `{:?}` identified as needed processing for key {}", sample, k ); diff --git a/plugins/zenoh-plugin-trait/src/lib.rs b/plugins/zenoh-plugin-trait/src/lib.rs index 6d9ac35fe9..b9dbb455ab 100644 --- a/plugins/zenoh-plugin-trait/src/lib.rs +++ b/plugins/zenoh-plugin-trait/src/lib.rs @@ -25,13 +25,13 @@ //! //! The actual work of the plugin is performed by the instance, which is created by the [`start`](Plugin::start) function. //! -//! Plugins are loaded, started and stopped by [`PluginsManager`](crate::manager::PluginsManager). Stopping pluign is just dropping it's instance. +//! Plugins are loaded, started and stopped by [`PluginsManager`](crate::manager::PluginsManager). Stopping plugin is just dropping it's instance. //! //! Plugins can be static and dynamic. //! //! Static plugin is just a type which implements [`Plugin`] trait. It can be added to [`PluginsManager`](crate::manager::PluginsManager) by [`PluginsManager::add_static_plugin`](crate::manager::PluginsManager::add_static_plugin) method. //! -//! Dynamic pluign is a shared library which exports set of C-repr (unmangled) functions which allows to check plugin compatibility and create plugin instance. These functiuons are defined automatically by [`declare_plugin`](crate::declare_plugin) macro. +//! Dynamic plugin is a shared library which exports set of C-repr (unmangled) functions which allows to check plugin compatibility and create plugin instance. These functiuons are defined automatically by [`declare_plugin`](crate::declare_plugin) macro. //! mod compatibility; mod manager; diff --git a/plugins/zenoh-plugin-trait/src/vtable.rs b/plugins/zenoh-plugin-trait/src/vtable.rs index e1108f87f1..74c7479c3e 100644 --- a/plugins/zenoh-plugin-trait/src/vtable.rs +++ b/plugins/zenoh-plugin-trait/src/vtable.rs @@ -48,7 +48,7 @@ impl PluginVTable { /// This macro adds non-mangled functions which provides plugin version and loads it into the host. /// If plugin library should work also as static, consider calling this macro under feature condition /// -/// The funcitons declared by this macro are: +/// The functions declared by this macro are: /// /// - `get_plugin_loader_version` - returns `PLUGIN_LOADER_VERSION` const of the crate. The [`PluginsManager`](crate::manager::PluginsManager) /// will check if this version is compatible with the host. diff --git a/zenoh-ext/examples/examples/README.md b/zenoh-ext/examples/examples/README.md index 892bded1cb..498a1ca6fe 100644 --- a/zenoh-ext/examples/examples/README.md +++ b/zenoh-ext/examples/examples/README.md @@ -17,7 +17,7 @@ ### z_pub_cache - Declares a publisher and an assiciated publication cache with a given key expression. + Declares a publisher and an associated publication cache with a given key expression. All the publications are locally cached (with a configurable history size - i.e. max number of cached data per resource). The cache can be queried by a QueryingSubscriber at startup (see next example). Typical usage: diff --git a/zenoh-ext/examples/examples/z_pub_cache.rs b/zenoh-ext/examples/examples/z_pub_cache.rs index 0c5a60751b..ae4a73112b 100644 --- a/zenoh-ext/examples/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/examples/z_pub_cache.rs @@ -62,7 +62,7 @@ struct Args { /// The number of publications to keep in cache. history: usize, #[arg(short = 'o', long)] - /// Set `complete` option to true. This means that this queryable is ulitmate data source, no need to scan other queryables. + /// Set `complete` option to true. This means that this queryable is ultimate data source, no need to scan other queryables. complete: bool, #[arg(short = 'x', long)] /// An optional queryable prefix. diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index f415ffe5be..9120a323ae 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // -//! To manage groups and group memeberships +//! To manage groups and group memberships use std::{ collections::HashMap, diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 279d071ee5..b50f5affb4 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -210,7 +210,7 @@ impl<'a> PublicationCache<'a> { } }, - // on query, reply with cache content + // on query, reply with cached content query = quer_recv.recv_async() => { if let Ok(query) = query { if !query.key_expr().as_str().contains('*') { diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 54f3ff0224..e26de62ae0 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -106,7 +106,7 @@ impl<'a, 'b, KeySpace> QueryingSubscriberBuilder<'a, 'b, KeySpace, DefaultHandle self.callback(locked(callback)) } - /// Use the given handler to recieve Samples. + /// Use the given handler to receive Samples. #[inline] pub fn with( self, @@ -590,9 +590,9 @@ where } } -/// A Subscriber that will run the given user defined `fetch` funtion at startup. +/// A Subscriber that will run the given user defined `fetch` function at startup. /// -/// The user defined `fetch` funtion should fetch some samples and return them through the callback funtion +/// The user defined `fetch` function should fetch some samples and return them through the callback function /// (it could typically be a Session::get()). Those samples will be merged with the received publications and made available in the receiver. /// Later on, new fetches can be performed again, calling [`FetchingSubscriber::fetch()`](super::FetchingSubscriber::fetch()). /// @@ -731,7 +731,7 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { /// Perform an additional `fetch`. /// - /// The provided `fetch` funtion should fetch some samples and return them through the callback funtion + /// The provided `fetch` function should fetch some samples and return them through the callback function /// (it could typically be a Session::get()). Those samples will be merged with the received publications and made available in the receiver. /// /// # Examples diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 3a52c04170..bac334035d 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -50,8 +50,8 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// /// This operation returns a [`FetchingSubscriberBuilder`](FetchingSubscriberBuilder) that can be used to finely configure the subscriber. /// As soon as built (calling `.wait()` or `.await` on the `FetchingSubscriberBuilder`), the `FetchingSubscriber` - /// will run the given `fetch` funtion. The user defined `fetch` funtion should fetch some samples and return them - /// through the callback funtion. Those samples will be merged with the received publications and made available in the receiver. + /// will run the given `fetch` function. The user defined `fetch` function should fetch some samples and return them + /// through the callback function. Those samples will be merged with the received publications and made available in the receiver. /// Later on, new fetches can be performed again, calling [`FetchingSubscriber::fetch()`](super::FetchingSubscriber::fetch()). /// /// A typical usage of the `FetchingSubscriber` is to retrieve publications that were made in the past, but stored in some zenoh Storage. @@ -128,8 +128,8 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> for SubscriberBuilde /// /// This operation returns a [`FetchingSubscriberBuilder`](FetchingSubscriberBuilder) that can be used to finely configure the subscriber. /// As soon as built (calling `.wait()` or `.await` on the `FetchingSubscriberBuilder`), the `FetchingSubscriber` - /// will run the given `fetch` funtion. The user defined `fetch` funtion should fetch some samples and return them - /// through the callback funtion. Those samples will be merged with the received publications and made available in the receiver. + /// will run the given `fetch` function. The user defined `fetch` function should fetch some samples and return them + /// through the callback function. Those samples will be merged with the received publications and made available in the receiver. /// Later on, new fetches can be performed again, calling [`FetchingSubscriber::fetch()`](super::FetchingSubscriber::fetch()). /// /// A typical usage of the `FetchingSubscriber` is to retrieve publications that were made in the past, but stored in some zenoh Storage. @@ -237,8 +237,8 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// /// This operation returns a [`FetchingSubscriberBuilder`](FetchingSubscriberBuilder) that can be used to finely configure the subscriber. /// As soon as built (calling `.wait()` or `.await` on the `FetchingSubscriberBuilder`), the `FetchingSubscriber` - /// will run the given `fetch` funtion. The user defined `fetch` funtion should fetch some samples and return them - /// through the callback funtion. Those samples will be merged with the received publications and made available in the receiver. + /// will run the given `fetch` function. The user defined `fetch` function should fetch some samples and return them + /// through the callback function. Those samples will be merged with the received publications and made available in the receiver. /// Later on, new fetches can be performed again, calling [`FetchingSubscriber::fetch()`](super::FetchingSubscriber::fetch()). /// /// A typical usage of the fetching liveliness subscriber is to retrieve existing liveliness tokens while susbcribing to diff --git a/zenoh/src/api/builders/publisher.rs b/zenoh/src/api/builders/publisher.rs index 97b3c22fe4..923689d0bc 100644 --- a/zenoh/src/api/builders/publisher.rs +++ b/zenoh/src/api/builders/publisher.rs @@ -283,7 +283,7 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { self } - // internal function for perfroming the publication + // internal function for performing the publication fn create_one_shot_publisher(self) -> ZResult> { Ok(Publisher { session: self.session, diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index d911fc8b65..d8a9ec7feb 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -78,7 +78,7 @@ impl ZBytes { Self(t.into()) } - /// Returns wether the ZBytes is empty or not. + /// Returns whether the ZBytes is empty or not. pub fn is_empty(&self) -> bool { self.0.is_empty() } diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index d23429ee0e..ac0c5c5ad2 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -61,8 +61,8 @@ use super::bytes::ZBytes; /// ### Schema /// /// Additionally, a schema can be associated to the encoding. -/// The convetions is to use the `;` separator if an encoding is created from a string. -/// Alternatively, [`with_schema()`](Encoding::with_schema) can be used to add a schme to one of the associated constants. +/// The conventions is to use the `;` separator if an encoding is created from a string. +/// Alternatively, [`with_schema()`](Encoding::with_schema) can be used to add a scheme to one of the associated constants. /// ``` /// use zenoh::encoding::Encoding; /// @@ -89,7 +89,7 @@ impl Encoding { id: 0, schema: None, }); - /// A VLE-encoded signed little-endian integer. Either 8bit, 16bit, 32bit, or 64bit. Binary reprensentation uses two's complement. + /// A VLE-encoded signed little-endian integer. Either 8bit, 16bit, 32bit, or 64bit. Binary representation uses two's complement. /// /// Constant alias for string: `"zenoh/int"`. pub const ZENOH_INT: Encoding = Self(zenoh_protocol::core::Encoding { @@ -253,7 +253,7 @@ impl Encoding { id: 23, schema: None, }); - /// A Web Protable (WebP) image. + /// A Web Portable (WebP) image. /// /// Constant alias for string: `"image/webp"`. pub const IMAGE_WEBP: Encoding = Self(zenoh_protocol::core::Encoding { diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index 8f0680897e..4a53a60851 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -160,7 +160,7 @@ impl<'a> IntoFuture for PeersZenohIdBuilder<'a> { } /// Struct returned by [`Session::info()`](crate::SessionDeclarations::info) which allows -/// to access informations about the current zenoh [`Session`](crate::Session). +/// to access information about the current zenoh [`Session`](crate::Session). /// /// # Examples /// ``` diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 18b3e2ca0c..c6ece3f129 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -51,7 +51,7 @@ pub(crate) enum KeyExprInner<'a> { /// A possibly-owned version of [`keyexpr`] that may carry optimisations for use with a [`Session`] that may have declared it. /// -/// Check [`keyexpr`]'s documentation for detailed explainations of the Key Expression Language. +/// Check [`keyexpr`]'s documentation for detailed explanations of the Key Expression Language. #[repr(transparent)] #[derive(Clone, serde::Deserialize, serde::Serialize)] #[serde(from = "OwnedKeyExpr")] @@ -200,7 +200,7 @@ impl<'a> KeyExpr<'a> { /// Joins both sides, inserting a `/` in between them. /// - /// This should be your prefered method when concatenating path segments. + /// This should be your preferred method when concatenating path segments. /// /// This is notably useful for workspaces: /// ```rust @@ -234,7 +234,7 @@ impl<'a> KeyExpr<'a> { /// Performs string concatenation and returns the result as a [`KeyExpr`] if possible. /// - /// You should probably prefer [`KeyExpr::join`] as Zenoh may then take advantage of the hierachical separation it inserts. + /// You should probably prefer [`KeyExpr::join`] as Zenoh may then take advantage of the hierarchical separation it inserts. pub fn concat + ?Sized>(&self, s: &S) -> ZResult> { let s = s.as_ref(); self._concat(s) diff --git a/zenoh/src/api/plugins.rs b/zenoh/src/api/plugins.rs index 63519eac2b..cfa53edc44 100644 --- a/zenoh/src/api/plugins.rs +++ b/zenoh/src/api/plugins.rs @@ -101,14 +101,14 @@ pub trait RunningPluginTrait: Send + Sync + PluginControl { /// * `Ok(Vec)`: the list of responses to the query. For example if plugins can return information on subleys "foo", "bar", "foo/buzz" and "bar/buzz" /// and it's requested with the query "@/router/ROUTER_ID/plugins/PLUGIN_NAME/*", it should return only information on "foo" and "bar" subkeys, but not on "foo/buzz" and "bar/buzz" /// as they doesn't match the query. - /// * `Err(ZError)`: Problem occured when processing the query. + /// * `Err(ZError)`: Problem occurred when processing the query. /// /// If plugin implements subplugins (as the storage plugin), then it should also reply with information about its subplugins with the same rules. /// /// TODO: /// * add example - /// * rework the admin space: rework "with_extented_string" function, provide it as utility for plugins - /// * reorder paramaters: plugin_status_key should be first as it describes the root of pluginb's admin space + /// * rework the admin space: rework "with_extended_string" function, provide it as utility for plugins + /// * reorder parameters: plugin_status_key should be first as it describes the root of pluginb's admin space /// * Instead of ZResult return just Vec. Check, do we really need ZResult? If yes, make it separate for each status record. /// fn adminspace_getter<'a>( diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index 96cedc960f..6b581ccfad 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -194,7 +194,7 @@ impl<'a> Publisher<'a> { /// pointer to it (`Arc`). This is equivalent to `Arc::new(Publisher)`. /// /// This is useful to share ownership of the `Publisher` between several threads - /// and tasks. It also alows to create [`MatchingListener`] with static + /// and tasks. It also allows to create [`MatchingListener`] with static /// lifetime that can be moved to several threads and tasks. /// /// Note: the given zenoh `Publisher` will be undeclared when the last reference to diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 41317b8b43..220785c668 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -133,7 +133,7 @@ impl DataInfoIntoSample for Option { } } -/// Informations on the source of a zenoh [`Sample`]. +/// Information on the source of a zenoh [`Sample`]. #[zenoh_macros::unstable] #[derive(Debug, Clone)] pub struct SourceInfo { diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index fe98cce6a6..e328761cb5 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -131,7 +131,7 @@ pub trait ZenohParameters { const TIME_RANGE_KEY: &'static str = "_time"; /// Sets the time range targeted by the selector parameters. fn set_time_range>>(&mut self, time_range: T); - /// Sets the parameter allowing to receieve replies from queryables not matching + /// Sets the parameter allowing to receive replies from queryables not matching /// the requested key expression. This may happen in this scenario: /// - we are requesting keyexpr `a/b`. /// - queryable is declared to handle `a/*` queries and contains data for `a/b` and `a/c`. diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index e874cd2393..abf2e52f8c 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -514,7 +514,7 @@ impl Session { /// pointer to it (`Arc`). This is equivalent to `Arc::new(session)`. /// /// This is useful to share ownership of the `Session` between several threads - /// and tasks. It also alows to create [`Subscriber`](Subscriber) and + /// and tasks. It also allows to create [`Subscriber`](Subscriber) and /// [`Queryable`](Queryable) with static lifetime that can be moved to several /// threads and tasks /// @@ -630,7 +630,7 @@ impl Session { /// The returned configuration [`Notifier`](Notifier) can be used to read the current /// zenoh configuration through the `get` function or /// modify the zenoh configuration through the `insert`, - /// or `insert_json5` funtion. + /// or `insert_json5` function. /// /// # Examples /// ### Read current zenoh configuration @@ -1675,7 +1675,7 @@ impl Session { } } Err(err) => { - tracing::error!("Received Data for unkown key_expr: {}", err); + tracing::error!("Received Data for unknown key_expr: {}", err); return; } } @@ -1923,7 +1923,7 @@ impl Session { ) } Err(err) => { - error!("Received Query for unkown key_expr: {}", err); + error!("Received Query for unknown key_expr: {}", err); return; } } @@ -2159,7 +2159,7 @@ impl Primitives for Session { } Err(err) => { tracing::error!( - "Received DeclareSubscriber for unkown wire_expr: {}", + "Received DeclareSubscriber for unknown wire_expr: {}", err ) } @@ -2174,7 +2174,7 @@ impl Primitives for Session { if let Some(expr) = state.remote_subscribers.remove(&m.id) { self.update_status_down(&state, &expr); } else { - tracing::error!("Received Undeclare Subscriber for unkown id: {}", m.id); + tracing::error!("Received Undeclare Subscriber for unknown id: {}", m.id); } } } @@ -2231,7 +2231,7 @@ impl Primitives for Session { } } Err(err) => { - tracing::error!("Received DeclareToken for unkown wire_expr: {}", err) + tracing::error!("Received DeclareToken for unknown wire_expr: {}", err) } } } @@ -2283,7 +2283,7 @@ impl Primitives for Session { } Err(err) => { tracing::error!( - "Received UndeclareToken for unkown wire_expr: {}", + "Received UndeclareToken for unknown wire_expr: {}", err ) } @@ -2381,7 +2381,7 @@ impl Primitives for Session { callback(new_reply); } None => { - tracing::warn!("Received ReplyData for unkown Query: {}", msg.rid); + tracing::warn!("Received ReplyData for unknown Query: {}", msg.rid); } } } @@ -2390,7 +2390,7 @@ impl Primitives for Session { let key_expr = match state.remote_key_to_expr(&msg.wire_expr) { Ok(key) => key.into_owned(), Err(e) => { - error!("Received ReplyData for unkown key_expr: {}", e); + error!("Received ReplyData for unknown key_expr: {}", e); return; } }; @@ -2545,7 +2545,7 @@ impl Primitives for Session { } } None => { - tracing::warn!("Received ReplyData for unkown Query: {}", msg.rid); + tracing::warn!("Received ReplyData for unknown Query: {}", msg.rid); } } } @@ -2570,7 +2570,7 @@ impl Primitives for Session { } } None => { - warn!("Received ResponseFinal for unkown Request: {}", msg.rid); + warn!("Received ResponseFinal for unknown Request: {}", msg.rid); } } } @@ -2732,7 +2732,7 @@ pub trait SessionDeclarations<'s, 'a> { /// ``` #[zenoh_macros::unstable] fn liveliness(&'s self) -> Liveliness<'a>; - /// Get informations about the zenoh [`Session`](Session). + /// Get information about the zenoh [`Session`](Session). /// /// # Examples /// ``` diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e07b8b6806..c75e31aa3a 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -45,7 +45,7 @@ //! ``` //! //! ### Subscribe -//! The example below shows how to consume values for a key expresison. +//! The example below shows how to consume values for a key expressions. //! ```no_run //! use futures::prelude::*; //! use zenoh::prelude::*; diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index 56bbbe4570..59111e5441 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -74,9 +74,21 @@ impl TransportPeerEventHandler for DeMux { NetworkBody::ResponseFinal(m) => self.face.send_response_final(m), NetworkBody::OAM(m) => { if let Some(transport) = self.transport.as_ref() { + let mut declares = vec![]; let ctrl_lock = zlock!(self.face.tables.ctrl_lock); let mut tables = zwrite!(self.face.tables.tables); - ctrl_lock.handle_oam(&mut tables, &self.face.tables, m, transport)? + ctrl_lock.handle_oam( + &mut tables, + &self.face.tables, + m, + transport, + &mut |p, m| declares.push((p.clone(), m)), + )?; + drop(tables); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } } } @@ -91,9 +103,17 @@ impl TransportPeerEventHandler for DeMux { fn closing(&self) { self.face.send_close(); if let Some(transport) = self.transport.as_ref() { + let mut declares = vec![]; let ctrl_lock = zlock!(self.face.tables.ctrl_lock); let mut tables = zwrite!(self.face.tables.tables); - let _ = ctrl_lock.closing(&mut tables, &self.face.tables, transport); + let _ = ctrl_lock.closing(&mut tables, &self.face.tables, transport, &mut |p, m| { + declares.push((p.clone(), m)) + }); + drop(tables); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } } diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 4627a40654..bbc910b124 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -215,6 +215,7 @@ impl Primitives for Face { fn send_interest(&self, msg: zenoh_protocol::network::Interest) { let ctrl_lock = zlock!(self.tables.ctrl_lock); if msg.mode != InterestMode::Final { + let mut declares = vec![]; declare_interest( ctrl_lock.as_ref(), &self.tables, @@ -223,7 +224,12 @@ impl Primitives for Face { msg.wire_expr.as_ref(), msg.mode, msg.options, + &mut |p, m| declares.push((p.clone(), m)), ); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } else { undeclare_interest( ctrl_lock.as_ref(), @@ -232,7 +238,6 @@ impl Primitives for Face { msg.id, ); } - drop(ctrl_lock); } fn send_declare(&self, msg: zenoh_protocol::network::Declare) { @@ -245,6 +250,7 @@ impl Primitives for Face { unregister_expr(&self.tables, &mut self.state.clone(), m.id); } zenoh_protocol::network::DeclareBody::DeclareSubscriber(m) => { + let mut declares = vec![]; declare_subscription( ctrl_lock.as_ref(), &self.tables, @@ -253,9 +259,15 @@ impl Primitives for Face { &m.wire_expr, &m.ext_info, msg.ext_nodeid.node_id, + &mut |p, m| declares.push((p.clone(), m)), ); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } zenoh_protocol::network::DeclareBody::UndeclareSubscriber(m) => { + let mut declares = vec![]; undeclare_subscription( ctrl_lock.as_ref(), &self.tables, @@ -263,9 +275,15 @@ impl Primitives for Face { m.id, &m.ext_wire_expr.wire_expr, msg.ext_nodeid.node_id, + &mut |p, m| declares.push((p.clone(), m)), ); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } zenoh_protocol::network::DeclareBody::DeclareQueryable(m) => { + let mut declares = vec![]; declare_queryable( ctrl_lock.as_ref(), &self.tables, @@ -274,9 +292,15 @@ impl Primitives for Face { &m.wire_expr, &m.ext_info, msg.ext_nodeid.node_id, + &mut |p, m| declares.push((p.clone(), m)), ); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } zenoh_protocol::network::DeclareBody::UndeclareQueryable(m) => { + let mut declares = vec![]; undeclare_queryable( ctrl_lock.as_ref(), &self.tables, @@ -284,9 +308,15 @@ impl Primitives for Face { m.id, &m.ext_wire_expr.wire_expr, msg.ext_nodeid.node_id, + &mut |p, m| declares.push((p.clone(), m)), ); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } zenoh_protocol::network::DeclareBody::DeclareToken(m) => { + let mut declares = vec![]; declare_token( ctrl_lock.as_ref(), &self.tables, @@ -295,9 +325,15 @@ impl Primitives for Face { &m.wire_expr, msg.ext_nodeid.node_id, msg.interest_id, + &mut |p, m| declares.push((p.clone(), m)), ); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } zenoh_protocol::network::DeclareBody::UndeclareToken(m) => { + let mut declares = vec![]; undeclare_token( ctrl_lock.as_ref(), &self.tables, @@ -305,7 +341,12 @@ impl Primitives for Face { m.id, &m.ext_wire_expr, msg.ext_nodeid.node_id, + &mut |p, m| declares.push((p.clone(), m)), ); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } zenoh_protocol::network::DeclareBody::DeclareFinal(_) => { if let Some(id) = msg.interest_id { @@ -314,7 +355,10 @@ impl Primitives for Face { .entry(id) .and_modify(|interest| interest.finalized = true); - declare_final(&mut self.state.clone(), id); + let mut declares = vec![]; + declare_final(&mut self.state.clone(), id, &mut |p, m| { + declares.push((p.clone(), m)) + }); // recompute routes // TODO: disable routes and recompute them in parallel to avoid holding @@ -323,10 +367,15 @@ impl Primitives for Face { let mut root_res = wtables.root_res.clone(); update_data_routes_from(&mut wtables, &mut root_res); update_query_routes_from(&mut wtables, &mut root_res); + + drop(wtables); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } } } - drop(ctrl_lock); } #[inline] diff --git a/zenoh/src/net/routing/dispatcher/interests.rs b/zenoh/src/net/routing/dispatcher/interests.rs index 32724363f9..d088b1f3f6 100644 --- a/zenoh/src/net/routing/dispatcher/interests.rs +++ b/zenoh/src/net/routing/dispatcher/interests.rs @@ -36,7 +36,7 @@ use super::{ tables::{register_expr_interest, TablesLock}, }; use crate::net::routing::{ - hat::HatTrait, + hat::{HatTrait, SendDeclare}, router::{unregister_expr_interest, Resource}, RoutingContext, }; @@ -48,22 +48,33 @@ pub(crate) struct CurrentInterest { pub(crate) src_interest_id: InterestId, } -pub(crate) fn declare_final(face: &mut Arc, id: InterestId) { +pub(crate) fn declare_final( + face: &mut Arc, + id: InterestId, + send_declare: &mut SendDeclare, +) { if let Some(interest) = get_mut_unchecked(face) .pending_current_interests .remove(&id) { - finalize_pending_interest(interest); + finalize_pending_interest(interest, send_declare); } } -pub(crate) fn finalize_pending_interests(_tables_ref: &TablesLock, face: &mut Arc) { +pub(crate) fn finalize_pending_interests( + _tables_ref: &TablesLock, + face: &mut Arc, + send_declare: &mut SendDeclare, +) { for (_, interest) in get_mut_unchecked(face).pending_current_interests.drain() { - finalize_pending_interest(interest); + finalize_pending_interest(interest, send_declare); } } -pub(crate) fn finalize_pending_interest(interest: (Arc, CancellationToken)) { +pub(crate) fn finalize_pending_interest( + interest: (Arc, CancellationToken), + send_declare: &mut SendDeclare, +) { let (interest, cancellation_token) = interest; cancellation_token.cancel(); if let Some(interest) = Arc::into_inner(interest) { @@ -72,17 +83,16 @@ pub(crate) fn finalize_pending_interest(interest: (Arc, Cancell interest.src_face, interest.src_interest_id ); - interest - .src_face - .primitives - .clone() - .send_declare(RoutingContext::new(Declare { + send_declare( + &interest.src_face.primitives, + RoutingContext::new(Declare { interest_id: Some(interest.src_interest_id), ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareFinal(DeclareFinal), - })); + }), + ); } } @@ -134,12 +144,13 @@ impl Timed for CurrentInterestCleanup { face, Duration::from_millis(INTEREST_TIMEOUT_MS), ); - finalize_pending_interest(interest); + finalize_pending_interest(interest, &mut |p, m| p.send_declare(m)); } } } } +#[allow(clippy::too_many_arguments)] pub(crate) fn declare_interest( hat_code: &(dyn HatTrait + Send + Sync), tables_ref: &Arc, @@ -148,6 +159,7 @@ pub(crate) fn declare_interest( expr: Option<&WireExpr>, mode: InterestMode, options: InterestOptions, + send_declare: &mut SendDeclare, ) { if options.keyexprs() && mode != InterestMode::Current { register_expr_interest(tables_ref, face, id, expr); @@ -199,6 +211,7 @@ pub(crate) fn declare_interest( Some(&mut res), mode, options, + send_declare, ); } None => tracing::error!( @@ -210,7 +223,16 @@ pub(crate) fn declare_interest( } } else { let mut wtables = zwrite!(tables_ref.tables); - hat_code.declare_interest(&mut wtables, tables_ref, face, id, None, mode, options); + hat_code.declare_interest( + &mut wtables, + tables_ref, + face, + id, + None, + mode, + options, + send_declare, + ); } } diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 5d9ab69a92..84c8433a48 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -31,8 +31,9 @@ use super::{ }; #[zenoh_macros::unstable] use crate::key_expr::KeyExpr; -use crate::net::routing::hat::HatTrait; +use crate::net::routing::hat::{HatTrait, SendDeclare}; +#[allow(clippy::too_many_arguments)] pub(crate) fn declare_subscription( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, @@ -41,6 +42,7 @@ pub(crate) fn declare_subscription( expr: &WireExpr, sub_info: &SubscriberInfo, node_id: NodeId, + send_declare: &mut SendDeclare, ) { let rtables = zread!(tables.tables); match rtables @@ -76,7 +78,15 @@ pub(crate) fn declare_subscription( (res, wtables) }; - hat_code.declare_subscription(&mut wtables, face, id, &mut res, sub_info, node_id); + hat_code.declare_subscription( + &mut wtables, + face, + id, + &mut res, + sub_info, + node_id, + send_declare, + ); disable_matches_data_routes(&mut wtables, &mut res); drop(wtables); @@ -109,6 +119,7 @@ pub(crate) fn undeclare_subscription( id: SubscriberId, expr: &WireExpr, node_id: NodeId, + send_declare: &mut SendDeclare, ) { tracing::debug!("Undeclare subscription {}", face); let res = if expr.is_empty() { @@ -139,7 +150,9 @@ pub(crate) fn undeclare_subscription( } }; let mut wtables = zwrite!(tables.tables); - if let Some(mut res) = hat_code.undeclare_subscription(&mut wtables, face, id, res, node_id) { + if let Some(mut res) = + hat_code.undeclare_subscription(&mut wtables, face, id, res, node_id, send_declare) + { tracing::debug!("{} Undeclare subscriber {} ({})", face, id, res.expr()); disable_matches_data_routes(&mut wtables, &mut res); drop(wtables); @@ -222,7 +235,7 @@ pub(crate) fn update_data_routes(tables: &Tables, res: &mut Arc) { pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc) { update_data_routes(tables, res); let res = get_mut_unchecked(res); - for child in res.childs.values_mut() { + for child in res.children.values_mut() { update_data_routes_from(tables, child); } } diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index f1163c829d..6ce9046a4a 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -43,13 +43,17 @@ use super::{ resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}, tables::{NodeId, RoutingExpr, Tables, TablesLock}, }; -use crate::net::routing::{hat::HatTrait, RoutingContext}; +use crate::net::routing::{ + hat::{HatTrait, SendDeclare}, + RoutingContext, +}; pub(crate) struct Query { src_face: Arc, src_qid: RequestId, } +#[allow(clippy::too_many_arguments)] pub(crate) fn declare_queryable( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, @@ -58,6 +62,7 @@ pub(crate) fn declare_queryable( expr: &WireExpr, qabl_info: &QueryableInfoType, node_id: NodeId, + send_declare: &mut SendDeclare, ) { let rtables = zread!(tables.tables); match rtables @@ -93,7 +98,15 @@ pub(crate) fn declare_queryable( (res, wtables) }; - hat_code.declare_queryable(&mut wtables, face, id, &mut res, qabl_info, node_id); + hat_code.declare_queryable( + &mut wtables, + face, + id, + &mut res, + qabl_info, + node_id, + send_declare, + ); disable_matches_query_routes(&mut wtables, &mut res); drop(wtables); @@ -126,6 +139,7 @@ pub(crate) fn undeclare_queryable( id: QueryableId, expr: &WireExpr, node_id: NodeId, + send_declare: &mut SendDeclare, ) { let res = if expr.is_empty() { None @@ -155,7 +169,9 @@ pub(crate) fn undeclare_queryable( } }; let mut wtables = zwrite!(tables.tables); - if let Some(mut res) = hat_code.undeclare_queryable(&mut wtables, face, id, res, node_id) { + if let Some(mut res) = + hat_code.undeclare_queryable(&mut wtables, face, id, res, node_id, send_declare) + { tracing::debug!("{} Undeclare queryable {} ({})", face, id, res.expr()); disable_matches_query_routes(&mut wtables, &mut res); drop(wtables); @@ -238,7 +254,7 @@ pub(crate) fn update_query_routes(tables: &Tables, res: &Arc) { pub(crate) fn update_query_routes_from(tables: &mut Tables, res: &mut Arc) { update_query_routes(tables, res); let res = get_mut_unchecked(res); - for child in res.childs.values_mut() { + for child in res.children.values_mut() { update_query_routes_from(tables, child); } } diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index a638c9a24f..b829709bf2 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -176,7 +176,7 @@ pub struct Resource { pub(crate) parent: Option>, pub(crate) suffix: String, pub(crate) nonwild_prefix: Option<(Arc, String)>, - pub(crate) childs: HashMap>, + pub(crate) children: HashMap>, pub(crate) context: Option, pub(crate) session_ctxs: HashMap>, } @@ -211,7 +211,7 @@ impl Resource { parent: Some(parent.clone()), suffix: String::from(suffix), nonwild_prefix, - childs: HashMap::new(), + children: HashMap::new(), context, session_ctxs: HashMap::new(), } @@ -295,7 +295,7 @@ impl Resource { parent: None, suffix: String::from(""), nonwild_prefix: None, - childs: HashMap::new(), + children: HashMap::new(), context: None, session_ctxs: HashMap::new(), }) @@ -305,7 +305,7 @@ impl Resource { let mut resclone = res.clone(); let mutres = get_mut_unchecked(&mut resclone); if let Some(ref mut parent) = mutres.parent { - if Arc::strong_count(res) <= 3 && res.childs.is_empty() { + if Arc::strong_count(res) <= 3 && res.children.is_empty() { // consider only childless resource held by only one external object (+ 1 strong count for resclone, + 1 strong count for res.parent to a total of 3 ) tracing::debug!("Unregister resource {}", res.expr()); if let Some(context) = mutres.context.as_mut() { @@ -322,7 +322,7 @@ impl Resource { } mutres.nonwild_prefix.take(); { - get_mut_unchecked(parent).childs.remove(&res.suffix); + get_mut_unchecked(parent).children.remove(&res.suffix); } Resource::clean(parent); } @@ -331,11 +331,11 @@ impl Resource { pub fn close(self: &mut Arc) { let r = get_mut_unchecked(self); - for c in r.childs.values_mut() { + for c in r.children.values_mut() { Self::close(c); } r.parent.take(); - r.childs.clear(); + r.children.clear(); r.nonwild_prefix.take(); r.session_ctxs.clear(); } @@ -344,7 +344,7 @@ impl Resource { pub fn print_tree(from: &Arc) -> String { let mut result = from.expr(); result.push('\n'); - for child in from.childs.values() { + for child in from.children.values() { result.push_str(&Resource::print_tree(child)); } result @@ -364,7 +364,7 @@ impl Resource { None => (suffix, ""), }; - match get_mut_unchecked(from).childs.get_mut(chunk) { + match get_mut_unchecked(from).children.get_mut(chunk) { Some(res) => Resource::make_resource(tables, res, rest), None => { let mut new = Arc::new(Resource::new(from, chunk, None)); @@ -373,7 +373,7 @@ impl Resource { } let res = Resource::make_resource(tables, &mut new, rest); get_mut_unchecked(from) - .childs + .children .insert(String::from(chunk), new); res } @@ -389,7 +389,7 @@ impl Resource { None => (suffix, ""), }; - match get_mut_unchecked(from).childs.get_mut(chunk) { + match get_mut_unchecked(from).children.get_mut(chunk) { Some(res) => Resource::make_resource(tables, res, rest), None => { let mut new = Arc::new(Resource::new(from, chunk, None)); @@ -398,7 +398,7 @@ impl Resource { } let res = Resource::make_resource(tables, &mut new, rest); get_mut_unchecked(from) - .childs + .children .insert(String::from(chunk), new); res } @@ -418,7 +418,7 @@ impl Resource { None => (suffix, ""), }; - match from.childs.get(chunk) { + match from.children.get(chunk) { Some(res) => Resource::get_resource(res, rest), None => None, } @@ -431,7 +431,7 @@ impl Resource { None => (suffix, ""), }; - match from.childs.get(chunk) { + match from.children.get(chunk) { Some(res) => Resource::get_resource(res, rest), None => None, } @@ -527,11 +527,11 @@ impl Resource { prefix: &Arc, suffix: &'a str, sid: usize, - checkchilds: bool, + checkclildren: bool, ) -> WireExpr<'a> { - if checkchilds && !suffix.is_empty() { + if checkclildren && !suffix.is_empty() { let (chunk, rest) = suffix.split_at(suffix.find('/').unwrap_or(suffix.len())); - if let Some(child) = prefix.childs.get(chunk) { + if let Some(child) = prefix.children.get(chunk) { return get_best_key_(child, rest, sid, true); } } @@ -565,7 +565,7 @@ impl Resource { if from.context.is_some() { matches.push(Arc::downgrade(from)); } - for child in from.childs.values() { + for child in from.children.values() { recursive_push(child, matches) } } @@ -575,7 +575,7 @@ impl Resource { matches: &mut Vec>, ) { if from.parent.is_none() || from.suffix == "/" { - for child in from.childs.values() { + for child in from.children.values() { get_matches_from(key_expr, child, matches); } return; @@ -597,12 +597,12 @@ impl Resource { matches.push(Arc::downgrade(from)); } if suffix.as_bytes() == b"**" { - for child in from.childs.values() { + for child in from.children.values() { get_matches_from(key_expr, child, matches) } } if let Some(child) = - from.childs.get("/**").or_else(|| from.childs.get("**")) + from.children.get("/**").or_else(|| from.children.get("**")) { if child.context.is_some() { matches.push(Arc::downgrade(child)) @@ -614,7 +614,7 @@ impl Resource { Some(rest) => { let recheck_keyexpr_one_level_lower = chunk.as_bytes() == b"**" || suffix.as_bytes() == b"**"; - for child in from.childs.values() { + for child in from.children.values() { get_matches_from(rest, child, matches); if recheck_keyexpr_one_level_lower { get_matches_from(key_expr, child, matches) diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 73d80d567d..2c5cfffffb 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -175,9 +175,16 @@ pub fn close_face(tables: &TablesLock, face: &Weak) { tracing::debug!("Close {}", face); face.task_controller.terminate_all(Duration::from_secs(10)); finalize_pending_queries(tables, &mut face); + let mut declares = vec![]; let ctrl_lock = zlock!(tables.ctrl_lock); - finalize_pending_interests(tables, &mut face); - ctrl_lock.close_face(tables, &mut face); + finalize_pending_interests(tables, &mut face, &mut |p, m| { + declares.push((p.clone(), m)) + }); + ctrl_lock.close_face(tables, &mut face, &mut |p, m| declares.push((p.clone(), m))); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } } None => tracing::error!("Face already closed!"), } diff --git a/zenoh/src/net/routing/dispatcher/token.rs b/zenoh/src/net/routing/dispatcher/token.rs index c563ce8802..a34e35af68 100644 --- a/zenoh/src/net/routing/dispatcher/token.rs +++ b/zenoh/src/net/routing/dispatcher/token.rs @@ -27,8 +27,12 @@ use super::{ face::FaceState, tables::{NodeId, TablesLock}, }; -use crate::net::routing::{hat::HatTrait, router::Resource}; +use crate::net::routing::{ + hat::{HatTrait, SendDeclare}, + router::Resource, +}; +#[allow(clippy::too_many_arguments)] pub(crate) fn declare_token( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, @@ -37,6 +41,7 @@ pub(crate) fn declare_token( expr: &WireExpr, node_id: NodeId, interest_id: Option, + send_declare: &mut SendDeclare, ) { let rtables = zread!(tables.tables); match rtables @@ -72,7 +77,15 @@ pub(crate) fn declare_token( (res, wtables) }; - hat_code.declare_token(&mut wtables, face, id, &mut res, node_id, interest_id); + hat_code.declare_token( + &mut wtables, + face, + id, + &mut res, + node_id, + interest_id, + send_declare, + ); drop(wtables); } None => tracing::error!( @@ -91,6 +104,7 @@ pub(crate) fn undeclare_token( id: TokenId, expr: &ext::WireExprType, node_id: NodeId, + send_declare: &mut SendDeclare, ) { let (res, mut wtables) = if expr.wire_expr.is_empty() { (None, zwrite!(tables.tables)) @@ -138,7 +152,8 @@ pub(crate) fn undeclare_token( } }; - if let Some(res) = hat_code.undeclare_token(&mut wtables, face, id, res, node_id) { + if let Some(res) = hat_code.undeclare_token(&mut wtables, face, id, res, node_id, send_declare) + { tracing::debug!("{} Undeclare token {} ({})", face, id, res.expr()); } else { tracing::error!("{} Undeclare unknown token {}", face, id); diff --git a/zenoh/src/net/routing/hat/client/interests.rs b/zenoh/src/net/routing/hat/client/interests.rs index 57e380ee12..b890e800f2 100644 --- a/zenoh/src/net/routing/hat/client/interests.rs +++ b/zenoh/src/net/routing/hat/client/interests.rs @@ -31,7 +31,7 @@ use crate::net::routing::{ resource::Resource, tables::{Tables, TablesLock}, }, - hat::{CurrentFutureTrait, HatInterestTrait}, + hat::{CurrentFutureTrait, HatInterestTrait, SendDeclare}, RoutingContext, }; @@ -81,6 +81,7 @@ impl HatInterestTrait for HatCode { res: Option<&mut Arc>, mode: InterestMode, options: InterestOptions, + send_declare: &mut SendDeclare, ) { if options.tokens() { declare_token_interest( @@ -90,6 +91,7 @@ impl HatInterestTrait for HatCode { res.as_ref().map(|r| (*r).clone()).as_mut(), mode, options.aggregate(), + send_declare, ) } face_hat_mut!(face) @@ -146,26 +148,28 @@ impl HatInterestTrait for HatCode { interest.src_face, interest.src_interest_id ); - interest - .src_face - .primitives - .clone() - .send_declare(RoutingContext::new(Declare { + send_declare( + &interest.src_face.primitives, + RoutingContext::new(Declare { interest_id: Some(interest.src_interest_id), ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareFinal(DeclareFinal), - })); + }), + ); } } else { - face.primitives.send_declare(RoutingContext::new(Declare { - interest_id: Some(id), - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareFinal(DeclareFinal), - })); + send_declare( + &face.primitives, + RoutingContext::new(Declare { + interest_id: Some(id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + }), + ); } } } diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 0cbdd6d4bc..230449bb9f 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -44,7 +44,7 @@ use super::{ face::FaceState, tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, }, - HatBaseTrait, HatTrait, + HatBaseTrait, HatTrait, SendDeclare, }; use crate::net::{ routing::{ @@ -103,11 +103,12 @@ impl HatBaseTrait for HatCode { tables: &mut Tables, _tables_ref: &Arc, face: &mut Face, + send_declare: &mut SendDeclare, ) -> ZResult<()> { interests_new_face(tables, &mut face.state); - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); - token_new_face(tables, &mut face.state); + pubsub_new_face(tables, &mut face.state, send_declare); + queries_new_face(tables, &mut face.state, send_declare); + token_new_face(tables, &mut face.state, send_declare); Ok(()) } @@ -117,15 +118,21 @@ impl HatBaseTrait for HatCode { _tables_ref: &Arc, face: &mut Face, _transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()> { interests_new_face(tables, &mut face.state); - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); - token_new_face(tables, &mut face.state); + pubsub_new_face(tables, &mut face.state, send_declare); + queries_new_face(tables, &mut face.state, send_declare); + token_new_face(tables, &mut face.state, send_declare); Ok(()) } - fn close_face(&self, tables: &TablesLock, face: &mut Arc) { + fn close_face( + &self, + tables: &TablesLock, + face: &mut Arc, + send_declare: &mut SendDeclare, + ) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); let face = get_mut_unchecked(face); @@ -156,7 +163,7 @@ impl HatBaseTrait for HatCode { let mut subs_matches = vec![]; for (_id, mut res) in hat_face.remote_subs.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); + undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -178,7 +185,7 @@ impl HatBaseTrait for HatCode { let mut qabls_matches = vec![]; for (_id, mut res) in hat_face.remote_qabls.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); + undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -199,7 +206,7 @@ impl HatBaseTrait for HatCode { for (_id, mut res) in hat_face.remote_tokens.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_token(&mut wtables, &mut face_clone, &mut res); + undeclare_client_token(&mut wtables, &mut face_clone, &mut res, send_declare); } drop(wtables); @@ -238,6 +245,7 @@ impl HatBaseTrait for HatCode { _tables_ref: &Arc, _oam: Oam, _transport: &TransportUnicast, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { Ok(()) } @@ -257,6 +265,7 @@ impl HatBaseTrait for HatCode { _tables: &mut Tables, _tables_ref: &Arc, _transport: &TransportUnicast, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { Ok(()) } diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 41dae88cdf..886333f92c 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -35,7 +35,7 @@ use crate::{ resource::{NodeId, Resource, SessionContext}, tables::{Route, RoutingExpr, Tables}, }, - hat::{HatPubSubTrait, Sources}, + hat::{HatPubSubTrait, SendDeclare, Sources}, router::{update_data_routes_from, RoutesIndexes}, RoutingContext, }, @@ -48,6 +48,7 @@ fn propagate_simple_subscription_to( res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { if src_face.id != dst_face.id && !face_hat!(dst_face).local_subs.contains_key(res) @@ -56,20 +57,23 @@ fn propagate_simple_subscription_to( let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + ), + ); } } @@ -78,6 +82,7 @@ fn propagate_simple_subscription( res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { for mut dst_face in tables .faces @@ -85,7 +90,14 @@ fn propagate_simple_subscription( .cloned() .collect::>>() { - propagate_simple_subscription_to(tables, &mut dst_face, res, sub_info, src_face); + propagate_simple_subscription_to( + tables, + &mut dst_face, + res, + sub_info, + src_face, + send_declare, + ); } } @@ -123,10 +135,11 @@ fn declare_client_subscription( id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, + send_declare: &mut SendDeclare, ) { register_client_subscription(tables, face, id, res, sub_info); - propagate_simple_subscription(tables, res, sub_info, face); + propagate_simple_subscription(tables, res, sub_info, face, send_declare); // This introduced a buffer overflow on windows // @TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] @@ -164,22 +177,29 @@ fn client_subs(res: &Arc) -> Vec> { .collect() } -fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { +fn propagate_forget_simple_subscription( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { for face in tables.faces.values_mut() { if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -188,6 +208,7 @@ pub(super) fn undeclare_client_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { @@ -196,24 +217,27 @@ pub(super) fn undeclare_client_subscription( let mut client_subs = client_subs(res); if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res); + propagate_forget_simple_subscription(tables, res, send_declare); } if client_subs.len() == 1 { let face = &mut client_subs[0]; if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -223,16 +247,21 @@ fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, id: SubscriberId, + send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { - undeclare_client_subscription(tables, face, &mut res); + undeclare_client_subscription(tables, face, &mut res, send_declare); Some(res) } else { None } } -pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { +pub(super) fn pubsub_new_face( + tables: &mut Tables, + face: &mut Arc, + send_declare: &mut SendDeclare, +) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; @@ -243,7 +272,14 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { .collect::>>() { for sub in face_hat!(src_face).remote_subs.values() { - propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); + propagate_simple_subscription_to( + tables, + face, + sub, + &sub_info, + &mut src_face.clone(), + send_declare, + ); } } // recompute routes @@ -259,8 +295,9 @@ impl HatPubSubTrait for HatCode { res: &mut Arc, sub_info: &SubscriberInfo, _node_id: NodeId, + send_declare: &mut SendDeclare, ) { - declare_client_subscription(tables, face, id, res, sub_info); + declare_client_subscription(tables, face, id, res, sub_info, send_declare); } fn undeclare_subscription( @@ -270,8 +307,9 @@ impl HatPubSubTrait for HatCode { id: SubscriberId, _res: Option>, _node_id: NodeId, + send_declare: &mut SendDeclare, ) -> Option> { - forget_client_subscription(tables, face, id) + forget_client_subscription(tables, face, id, send_declare) } fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)> { diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index cebc04cd2f..12e594500f 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -40,7 +40,7 @@ use crate::net::routing::{ resource::{NodeId, Resource, SessionContext}, tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, }, - hat::{HatQueriesTrait, Sources}, + hat::{HatQueriesTrait, SendDeclare, Sources}, router::RoutesIndexes, RoutingContext, }; @@ -80,6 +80,7 @@ fn propagate_simple_queryable( tables: &mut Tables, res: &Arc, src_face: Option<&mut Arc>, + send_declare: &mut SendDeclare, ) { let faces = tables.faces.values().cloned(); for mut dst_face in faces { @@ -105,20 +106,23 @@ fn propagate_simple_queryable( .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, &mut dst_face); println!("Decled key = {key_expr:?}"); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); } } } @@ -149,9 +153,10 @@ fn declare_client_queryable( id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfoType, + send_declare: &mut SendDeclare, ) { register_client_queryable(tables, face, id, res, qabl_info); - propagate_simple_queryable(tables, res, Some(face)); + propagate_simple_queryable(tables, res, Some(face), send_declare); } #[inline] @@ -168,22 +173,29 @@ fn client_qabls(res: &Arc) -> Vec> { .collect() } -fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { +fn propagate_forget_simple_queryable( + tables: &mut Tables, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { for face in tables.faces.values_mut() { if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -192,6 +204,7 @@ pub(super) fn undeclare_client_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { if !face_hat_mut!(face) .remote_qabls @@ -204,26 +217,29 @@ pub(super) fn undeclare_client_queryable( let mut client_qabls = client_qabls(res); if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); + propagate_forget_simple_queryable(tables, res, send_declare); } else { - propagate_simple_queryable(tables, res, None); + propagate_simple_queryable(tables, res, None, send_declare); } if client_qabls.len() == 1 { let face = &mut client_qabls[0]; if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -233,16 +249,21 @@ fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, id: QueryableId, + send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { - undeclare_client_queryable(tables, face, &mut res); + undeclare_client_queryable(tables, face, &mut res, send_declare); Some(res) } else { None } } -pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) { +pub(super) fn queries_new_face( + tables: &mut Tables, + _face: &mut Arc, + send_declare: &mut SendDeclare, +) { for face in tables .faces .values() @@ -250,7 +271,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) .collect::>>() { for qabl in face_hat!(face).remote_qabls.values() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); + propagate_simple_queryable(tables, qabl, Some(&mut face.clone()), send_declare); } } } @@ -268,8 +289,9 @@ impl HatQueriesTrait for HatCode { res: &mut Arc, qabl_info: &QueryableInfoType, _node_id: NodeId, + send_declare: &mut SendDeclare, ) { - declare_client_queryable(tables, face, id, res, qabl_info); + declare_client_queryable(tables, face, id, res, qabl_info, send_declare); } fn undeclare_queryable( @@ -279,8 +301,9 @@ impl HatQueriesTrait for HatCode { id: QueryableId, _res: Option>, _node_id: NodeId, + send_declare: &mut SendDeclare, ) -> Option> { - forget_client_queryable(tables, face, id) + forget_client_queryable(tables, face, id, send_declare) } fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)> { diff --git a/zenoh/src/net/routing/hat/client/token.rs b/zenoh/src/net/routing/hat/client/token.rs index 3b52bad36a..58af466f00 100644 --- a/zenoh/src/net/routing/hat/client/token.rs +++ b/zenoh/src/net/routing/hat/client/token.rs @@ -26,7 +26,7 @@ use zenoh_sync::get_mut_unchecked; use super::{face_hat, face_hat_mut, HatCode, HatFace}; use crate::net::routing::{ dispatcher::{face::FaceState, tables::Tables}, - hat::{CurrentFutureTrait, HatTokenTrait}, + hat::{CurrentFutureTrait, HatTokenTrait, SendDeclare}, router::{NodeId, Resource, SessionContext}, RoutingContext, }; @@ -37,6 +37,7 @@ fn propagate_simple_token_to( dst_face: &mut Arc, res: &Arc, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { if (src_face.id != dst_face.id || dst_face.whatami == WhatAmI::Client) && !face_hat!(dst_face).local_tokens.contains_key(res) @@ -45,30 +46,38 @@ fn propagate_simple_token_to( let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { - id, - wire_expr: key_expr, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + ), + ); } } -fn propagate_simple_token(tables: &mut Tables, res: &Arc, src_face: &mut Arc) { +fn propagate_simple_token( + tables: &mut Tables, + res: &Arc, + src_face: &mut Arc, + send_declare: &mut SendDeclare, +) { for mut dst_face in tables .faces .values() .cloned() .collect::>>() { - propagate_simple_token_to(tables, &mut dst_face, res, src_face); + propagate_simple_token_to(tables, &mut dst_face, res, src_face, send_declare); } } @@ -105,18 +114,18 @@ fn declare_client_token( id: TokenId, res: &mut Arc, interest_id: Option, + send_declare: &mut SendDeclare, ) { register_client_token(tables, face, id, res); - propagate_simple_token(tables, res, face); + propagate_simple_token(tables, res, face, send_declare); let wire_expr = Resource::decl_key(res, face); if let Some(interest_id) = interest_id { if let Some((interest, _)) = face.pending_current_interests.get(&interest_id) { - interest - .src_face - .primitives - .send_declare(RoutingContext::with_expr( + send_declare( + &interest.src_face.primitives, + RoutingContext::with_expr( Declare { interest_id: Some(interest.src_interest_id), ext_qos: ext::QoSType::default(), @@ -125,7 +134,8 @@ fn declare_client_token( body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), }, res.expr(), - )) + ), + ) } } } @@ -144,22 +154,29 @@ fn client_tokens(res: &Arc) -> Vec> { .collect() } -fn propagate_forget_simple_token(tables: &mut Tables, res: &Arc) { +fn propagate_forget_simple_token( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { for face in tables.faces.values_mut() { if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } else if face_hat!(face) .remote_interests .values() @@ -167,21 +184,24 @@ fn propagate_forget_simple_token(tables: &mut Tables, res: &Arc) { { // Token has never been declared on this face. // Send an Undeclare with a one shot generated id and a WireExpr ext. - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), - ext_wire_expr: WireExprType { - wire_expr: Resource::get_best_key(res, "", face.id), - }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), + ext_wire_expr: WireExprType { + wire_expr: Resource::get_best_key(res, "", face.id), + }, + }), + }, + res.expr(), + ), + ); } } } @@ -190,6 +210,7 @@ pub(super) fn undeclare_client_token( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { if !face_hat_mut!(face) .remote_tokens @@ -202,25 +223,28 @@ pub(super) fn undeclare_client_token( let mut client_tokens = client_tokens(res); if client_tokens.is_empty() { - propagate_forget_simple_token(tables, res); + propagate_forget_simple_token(tables, res, send_declare); } if client_tokens.len() == 1 { let face = &mut client_tokens[0]; if face.whatami != WhatAmI::Client { if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -232,19 +256,24 @@ fn forget_client_token( face: &mut Arc, id: TokenId, res: Option>, + send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { - undeclare_client_token(tables, face, &mut res); + undeclare_client_token(tables, face, &mut res, send_declare); Some(res) } else if let Some(mut res) = res { - undeclare_client_token(tables, face, &mut res); + undeclare_client_token(tables, face, &mut res, send_declare); Some(res) } else { None } } -pub(super) fn token_new_face(tables: &mut Tables, face: &mut Arc) { +pub(super) fn token_new_face( + tables: &mut Tables, + face: &mut Arc, + send_declare: &mut SendDeclare, +) { for src_face in tables .faces .values() @@ -252,7 +281,7 @@ pub(super) fn token_new_face(tables: &mut Tables, face: &mut Arc) { .collect::>>() { for token in face_hat!(src_face).remote_tokens.values() { - propagate_simple_token_to(tables, face, token, &mut src_face.clone()); + propagate_simple_token_to(tables, face, token, &mut src_face.clone(), send_declare); } } } @@ -264,6 +293,7 @@ pub(crate) fn declare_token_interest( res: Option<&mut Arc>, mode: InterestMode, aggregate: bool, + send_declare: &mut SendDeclare, ) { if mode.current() { let interest_id = (!mode.future()).then_some(id); @@ -283,16 +313,19 @@ pub(crate) fn declare_token_interest( 0 }; let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + res.expr(), + ), + ); } } else { for src_face in tables @@ -311,16 +344,22 @@ pub(crate) fn declare_token_interest( 0 }; let wire_expr = Resource::decl_key(token, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), - }, - res.expr(), - )) + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr, + }), + }, + res.expr(), + ), + ) } } } @@ -341,16 +380,19 @@ pub(crate) fn declare_token_interest( 0 }; let wire_expr = Resource::decl_key(token, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), - }, - token.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + ), + ); } } } @@ -366,8 +408,9 @@ impl HatTokenTrait for HatCode { res: &mut Arc, _node_id: NodeId, interest_id: Option, + send_declare: &mut SendDeclare, ) { - declare_client_token(tables, face, id, res, interest_id); + declare_client_token(tables, face, id, res, interest_id, send_declare); } fn undeclare_token( @@ -377,7 +420,8 @@ impl HatTokenTrait for HatCode { id: TokenId, res: Option>, _node_id: NodeId, + send_declare: &mut SendDeclare, ) -> Option> { - forget_client_token(tables, face, id, res) + forget_client_token(tables, face, id, res, send_declare) } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/interests.rs b/zenoh/src/net/routing/hat/linkstate_peer/interests.rs index 40bfb49780..6b23f8d657 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/interests.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/interests.rs @@ -30,7 +30,7 @@ use crate::net::routing::{ resource::Resource, tables::{Tables, TablesLock}, }, - hat::{CurrentFutureTrait, HatInterestTrait}, + hat::{CurrentFutureTrait, HatInterestTrait, SendDeclare}, RoutingContext, }; @@ -44,6 +44,7 @@ impl HatInterestTrait for HatCode { res: Option<&mut Arc>, mode: InterestMode, options: InterestOptions, + send_declare: &mut SendDeclare, ) { if options.subscribers() { declare_sub_interest( @@ -53,6 +54,7 @@ impl HatInterestTrait for HatCode { res.as_ref().map(|r| (*r).clone()).as_mut(), mode, options.aggregate(), + send_declare, ) } if options.queryables() { @@ -63,6 +65,7 @@ impl HatInterestTrait for HatCode { res.as_ref().map(|r| (*r).clone()).as_mut(), mode, options.aggregate(), + send_declare, ) } if options.tokens() { @@ -73,6 +76,7 @@ impl HatInterestTrait for HatCode { res.as_ref().map(|r| (*r).clone()).as_mut(), mode, options.aggregate(), + send_declare, ) } if mode.future() { @@ -81,13 +85,16 @@ impl HatInterestTrait for HatCode { .insert(id, (res.cloned(), options)); } if mode.current() { - face.primitives.send_declare(RoutingContext::new(Declare { - interest_id: Some(id), - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareFinal(DeclareFinal), - })); + send_declare( + &face.primitives, + RoutingContext::new(Declare { + interest_id: Some(id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + }), + ); } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 500ac29510..1dd4e65318 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -51,7 +51,7 @@ use super::{ face::FaceState, tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, }, - HatBaseTrait, HatTrait, + HatBaseTrait, HatTrait, SendDeclare, }; use crate::net::{ codec::Zenoh080Routing, @@ -156,12 +156,12 @@ impl HatTables { let mut tables = zwrite!(tables_ref.tables); tracing::trace!("Compute trees"); - let new_childs = hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(); + let new_children = hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(); tracing::trace!("Compute routes"); - pubsub::pubsub_tree_change(&mut tables, &new_childs); - queries::queries_tree_change(&mut tables, &new_childs); - token::token_tree_change(&mut tables, &new_childs); + pubsub::pubsub_tree_change(&mut tables, &new_children); + queries::queries_tree_change(&mut tables, &new_children); + token::token_tree_change(&mut tables, &new_children); tracing::trace!("Computations completed"); hat_mut!(tables).peers_trees_task = None; @@ -222,6 +222,7 @@ impl HatBaseTrait for HatCode { _tables: &mut Tables, _tables_ref: &Arc, _face: &mut Face, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { // Nothing to do Ok(()) @@ -233,6 +234,7 @@ impl HatBaseTrait for HatCode { tables_ref: &Arc, face: &mut Face, transport: &TransportUnicast, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { let link_id = if face.state.whatami != WhatAmI::Client { if let Some(net) = hat_mut!(tables).peers_net.as_mut() { @@ -252,7 +254,12 @@ impl HatBaseTrait for HatCode { Ok(()) } - fn close_face(&self, tables: &TablesLock, face: &mut Arc) { + fn close_face( + &self, + tables: &TablesLock, + face: &mut Arc, + send_declare: &mut SendDeclare, + ) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); let face = get_mut_unchecked(face); @@ -283,7 +290,7 @@ impl HatBaseTrait for HatCode { let mut subs_matches = vec![]; for (_id, mut res) in hat_face.remote_subs.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); + undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -305,7 +312,7 @@ impl HatBaseTrait for HatCode { let mut qabls_matches = vec![]; for (_, mut res) in hat_face.remote_qabls.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); + undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -326,7 +333,7 @@ impl HatBaseTrait for HatCode { for (_id, mut res) in hat_face.remote_tokens.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_token(&mut wtables, &mut face_clone, &mut res); + undeclare_client_token(&mut wtables, &mut face_clone, &mut res, send_declare); } drop(wtables); @@ -365,6 +372,7 @@ impl HatBaseTrait for HatCode { tables_ref: &Arc, oam: Oam, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()> { if oam.id == OAM_LINKSTATE { if let ZExtBody::ZBuf(buf) = oam.body { @@ -381,9 +389,9 @@ impl HatBaseTrait for HatCode { let changes = net.link_states(list.link_states, zid); for (_, removed_node) in changes.removed_nodes { - pubsub_remove_node(tables, &removed_node.zid); - queries_remove_node(tables, &removed_node.zid); - token_remove_node(tables, &removed_node.zid); + pubsub_remove_node(tables, &removed_node.zid, send_declare); + queries_remove_node(tables, &removed_node.zid, send_declare); + token_remove_node(tables, &removed_node.zid, send_declare); } hat_mut!(tables).schedule_compute_trees(tables_ref.clone()); @@ -415,6 +423,7 @@ impl HatBaseTrait for HatCode { tables: &mut Tables, tables_ref: &Arc, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()> { match (transport.get_zid(), transport.get_whatami()) { (Ok(zid), Ok(whatami)) => { @@ -425,9 +434,9 @@ impl HatBaseTrait for HatCode { .unwrap() .remove_link(&zid) { - pubsub_remove_node(tables, &removed_node.zid); - queries_remove_node(tables, &removed_node.zid); - token_remove_node(tables, &removed_node.zid); + pubsub_remove_node(tables, &removed_node.zid, send_declare); + queries_remove_node(tables, &removed_node.zid, send_declare); + token_remove_node(tables, &removed_node.zid, send_declare); } hat_mut!(tables).schedule_compute_trees(tables_ref.clone()); diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index 4ca3dcfb92..bfa7ccf969 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -109,7 +109,7 @@ pub(super) struct Changes { #[derive(Clone)] pub(super) struct Tree { pub(super) parent: Option, - pub(super) childs: Vec, + pub(super) children: Vec, pub(super) directions: Vec>, } @@ -160,7 +160,7 @@ impl Network { links: VecMap::new(), trees: vec![Tree { parent: None, - childs: vec![], + children: vec![], directions: vec![None], }], distances: vec![0.0], @@ -902,12 +902,13 @@ impl Network { let indexes = self.graph.node_indices().collect::>(); let max_idx = indexes.iter().max().unwrap(); - let old_childs: Vec> = self.trees.iter().map(|t| t.childs.clone()).collect(); + let old_children: Vec> = + self.trees.iter().map(|t| t.children.clone()).collect(); self.trees.clear(); self.trees.resize_with(max_idx.index() + 1, || Tree { parent: None, - childs: vec![], + children: vec![], directions: vec![], }); @@ -941,7 +942,7 @@ impl Network { for idx in &indexes { if let Some(parent_idx) = paths.predecessors[idx.index()] { if parent_idx == self.idx { - self.trees[tree_root_idx.index()].childs.push(*idx); + self.trees[tree_root_idx.index()].children.push(*idx); } } } @@ -979,22 +980,22 @@ impl Network { } } - let mut new_childs = Vec::with_capacity(self.trees.len()); - new_childs.resize(self.trees.len(), vec![]); + let mut new_children = Vec::with_capacity(self.trees.len()); + new_children.resize(self.trees.len(), vec![]); - for i in 0..new_childs.len() { - new_childs[i] = if i < old_childs.len() { + for i in 0..new_children.len() { + new_children[i] = if i < old_children.len() { self.trees[i] - .childs + .children .iter() - .filter(|idx| !old_childs[i].contains(idx)) + .filter(|idx| !old_children[i].contains(idx)) .cloned() .collect() } else { - self.trees[i].childs.clone() + self.trees[i].children.clone() }; } - new_childs + new_children } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 49bd026a31..849921d24b 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -43,22 +43,22 @@ use crate::net::routing::{ resource::{NodeId, Resource, SessionContext}, tables::{Route, RoutingExpr, Tables}, }, - hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, + hat::{CurrentFutureTrait, HatPubSubTrait, SendDeclare, Sources}, router::RoutesIndexes, RoutingContext, }; #[inline] -fn send_sourced_subscription_to_net_childs( +fn send_sourced_subscription_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, sub_info: &SubscriberInfo, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -99,6 +99,7 @@ fn propagate_simple_subscription_to( res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { if (src_face.id != dst_face.id) && !face_hat!(dst_face).local_subs.contains_key(res) @@ -108,20 +109,23 @@ fn propagate_simple_subscription_to( let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + ), + ); } else { let matching_interests = face_hat!(dst_face) .remote_interests @@ -142,20 +146,23 @@ fn propagate_simple_subscription_to( let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + ), + ); } } } @@ -167,6 +174,7 @@ fn propagate_simple_subscription( res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { for mut dst_face in tables .faces @@ -174,7 +182,14 @@ fn propagate_simple_subscription( .cloned() .collect::>>() { - propagate_simple_subscription_to(tables, &mut dst_face, res, sub_info, src_face); + propagate_simple_subscription_to( + tables, + &mut dst_face, + res, + sub_info, + src_face, + send_declare, + ); } } @@ -189,10 +204,10 @@ fn propagate_sourced_subscription( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_sourced_subscription_to_net_childs( + send_sourced_subscription_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, sub_info, @@ -221,6 +236,7 @@ fn register_peer_subscription( res: &mut Arc, sub_info: &SubscriberInfo, peer: ZenohIdProto, + send_declare: &mut SendDeclare, ) { if !res_hat!(res).peer_subs.contains(&peer) { // Register peer subscription @@ -234,7 +250,7 @@ fn register_peer_subscription( } // Propagate subscription to clients - propagate_simple_subscription(tables, res, sub_info, face); + propagate_simple_subscription(tables, res, sub_info, face, send_declare); } fn declare_peer_subscription( @@ -243,8 +259,9 @@ fn declare_peer_subscription( res: &mut Arc, sub_info: &SubscriberInfo, peer: ZenohIdProto, + send_declare: &mut SendDeclare, ) { - register_peer_subscription(tables, face, res, sub_info, peer); + register_peer_subscription(tables, face, res, sub_info, peer, send_declare); } fn register_client_subscription( @@ -281,10 +298,11 @@ fn declare_client_subscription( id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, + send_declare: &mut SendDeclare, ) { register_client_subscription(tables, face, id, res, sub_info); let zid = tables.zid; - register_peer_subscription(tables, face, res, sub_info, zid); + register_peer_subscription(tables, face, res, sub_info, zid, send_declare); } #[inline] @@ -318,15 +336,15 @@ fn remote_client_subs(res: &Arc, face: &Arc) -> bool { } #[inline] -fn send_forget_sourced_subscription_to_net_childs( +fn send_forget_sourced_subscription_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: Option, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -359,22 +377,29 @@ fn send_forget_sourced_subscription_to_net_childs( } } -fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { +fn propagate_forget_simple_subscription( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { for mut face in tables.faces.values().cloned() { if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(face) .local_subs @@ -389,19 +414,22 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc }) }) { if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -418,10 +446,10 @@ fn propagate_forget_sourced_subscription( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_forget_sourced_subscription_to_net_childs( + send_forget_sourced_subscription_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, Some(tree_sid.index() as NodeId), @@ -443,7 +471,12 @@ fn propagate_forget_sourced_subscription( } } -fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { +fn unregister_peer_subscription( + tables: &mut Tables, + res: &mut Arc, + peer: &ZenohIdProto, + send_declare: &mut SendDeclare, +) { res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); if res_hat!(res).peer_subs.is_empty() { @@ -451,7 +484,7 @@ fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, pe .peer_subs .retain(|sub| !Arc::ptr_eq(sub, res)); - propagate_forget_simple_subscription(tables, res); + propagate_forget_simple_subscription(tables, res, send_declare); } } @@ -460,9 +493,10 @@ fn undeclare_peer_subscription( face: Option<&Arc>, res: &mut Arc, peer: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { if res_hat!(res).peer_subs.contains(peer) { - unregister_peer_subscription(tables, res, peer); + unregister_peer_subscription(tables, res, peer, send_declare); propagate_forget_sourced_subscription(tables, res, face, peer); } } @@ -472,14 +506,16 @@ fn forget_peer_subscription( face: &mut Arc, res: &mut Arc, peer: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { - undeclare_peer_subscription(tables, Some(face), res, peer); + undeclare_peer_subscription(tables, Some(face), res, peer, send_declare); } pub(super) fn undeclare_client_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { @@ -489,26 +525,29 @@ pub(super) fn undeclare_client_subscription( let mut client_subs = client_subs(res); let peer_subs = remote_peer_subs(tables, res); if client_subs.is_empty() { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); + undeclare_peer_subscription(tables, None, res, &tables.zid.clone(), send_declare); } if client_subs.len() == 1 && !peer_subs { let mut face = &mut client_subs[0]; if face.whatami != WhatAmI::Client { if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(face) .local_subs @@ -523,19 +562,24 @@ pub(super) fn undeclare_client_subscription( }) }) { if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber( + UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }, + ), + }, + res.expr(), + ), + ); } } } @@ -548,16 +592,21 @@ fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, id: SubscriberId, + send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { - undeclare_client_subscription(tables, face, &mut res); + undeclare_client_subscription(tables, face, &mut res, send_declare); Some(res) } else { None } } -pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohIdProto) { +pub(super) fn pubsub_remove_node( + tables: &mut Tables, + node: &ZenohIdProto, + send_declare: &mut SendDeclare, +) { for mut res in hat!(tables) .peer_subs .iter() @@ -565,14 +614,14 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohIdProto) { .cloned() .collect::>>() { - unregister_peer_subscription(tables, &mut res, node); + unregister_peer_subscription(tables, &mut res, node, send_declare); update_matches_data_routes(tables, &mut res); Resource::clean(&mut res) } } -pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec]) { +pub(super) fn pubsub_tree_change(tables: &mut Tables, new_children: &[Vec]) { let net = match hat!(tables).peers_net.as_ref() { Some(net) => net, None => { @@ -580,9 +629,9 @@ pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec>, mode: InterestMode, aggregate: bool, + send_declare: &mut SendDeclare, ) { if mode.current() && face.whatami == WhatAmI::Client { let interest_id = (!mode.future()).then_some(id); @@ -644,20 +694,23 @@ pub(super) fn declare_sub_interest( 0 }; let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + res.expr(), + ), + ); } } else { for sub in &hat!(tables).peer_subs { @@ -673,20 +726,23 @@ pub(super) fn declare_sub_interest( 0 }; let wire_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + ), + ); } } } @@ -703,20 +759,23 @@ pub(super) fn declare_sub_interest( 0 }; let wire_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + ), + ); } } } @@ -732,13 +791,14 @@ impl HatPubSubTrait for HatCode { res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, + send_declare: &mut SendDeclare, ) { if face.whatami != WhatAmI::Client { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_subscription(tables, face, res, sub_info, peer) + declare_peer_subscription(tables, face, res, sub_info, peer, send_declare) } } else { - declare_client_subscription(tables, face, id, res, sub_info) + declare_client_subscription(tables, face, id, res, sub_info, send_declare) } } @@ -749,11 +809,12 @@ impl HatPubSubTrait for HatCode { id: SubscriberId, res: Option>, node_id: NodeId, + send_declare: &mut SendDeclare, ) -> Option> { if face.whatami != WhatAmI::Client { if let Some(mut res) = res { if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_subscription(tables, face, &mut res, &peer); + forget_peer_subscription(tables, face, &mut res, &peer, send_declare); Some(res) } else { None @@ -762,7 +823,7 @@ impl HatPubSubTrait for HatCode { None } } else { - forget_client_subscription(tables, face, id) + forget_client_subscription(tables, face, id, send_declare) } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 36fc03c03d..883db69975 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -48,7 +48,7 @@ use crate::net::routing::{ resource::{NodeId, Resource, SessionContext}, tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, }, - hat::{CurrentFutureTrait, HatQueriesTrait, Sources}, + hat::{CurrentFutureTrait, HatQueriesTrait, SendDeclare, Sources}, router::RoutesIndexes, RoutingContext, }; @@ -120,16 +120,16 @@ fn local_qabl_info( } #[inline] -fn send_sourced_queryable_to_net_childs( +fn send_sourced_queryable_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -168,6 +168,7 @@ fn propagate_simple_queryable( tables: &mut Tables, res: &Arc, src_face: Option<&mut Arc>, + send_declare: &mut SendDeclare, ) { let faces = tables.faces.values().cloned(); for mut dst_face in faces { @@ -191,20 +192,23 @@ fn propagate_simple_queryable( .local_qabls .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, &mut dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); } } } @@ -220,10 +224,10 @@ fn propagate_sourced_queryable( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_sourced_queryable_to_net_childs( + send_sourced_queryable_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, qabl_info, src_face, @@ -252,6 +256,7 @@ fn register_peer_queryable( res: &mut Arc, qabl_info: &QueryableInfoType, peer: ZenohIdProto, + send_declare: &mut SendDeclare, ) { let current_info = res_hat!(res).peer_qabls.get(&peer); if current_info.is_none() || current_info.unwrap() != qabl_info { @@ -266,7 +271,7 @@ fn register_peer_queryable( } // Propagate queryable to clients - propagate_simple_queryable(tables, res, face); + propagate_simple_queryable(tables, res, face, send_declare); } fn declare_peer_queryable( @@ -275,9 +280,10 @@ fn declare_peer_queryable( res: &mut Arc, qabl_info: &QueryableInfoType, peer: ZenohIdProto, + send_declare: &mut SendDeclare, ) { let face = Some(face); - register_peer_queryable(tables, face, res, qabl_info, peer); + register_peer_queryable(tables, face, res, qabl_info, peer, send_declare); } fn register_client_queryable( @@ -306,11 +312,12 @@ fn declare_client_queryable( id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfoType, + send_declare: &mut SendDeclare, ) { register_client_queryable(tables, face, id, res, qabl_info); let local_details = local_peer_qabl_info(tables, res); let zid = tables.zid; - register_peer_queryable(tables, Some(face), res, &local_details, zid); + register_peer_queryable(tables, Some(face), res, &local_details, zid, send_declare); } #[inline] @@ -344,15 +351,15 @@ fn remote_client_qabls(res: &Arc, face: &Arc) -> bool { } #[inline] -fn send_forget_sourced_queryable_to_net_childs( +fn send_forget_sourced_queryable_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -385,22 +392,29 @@ fn send_forget_sourced_queryable_to_net_childs( } } -fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { +fn propagate_forget_simple_queryable( + tables: &mut Tables, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { for mut face in tables.faces.values().cloned() { if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(&mut face) .local_qabls @@ -415,19 +429,22 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc { if net.trees.len() > tree_sid.index() { - send_forget_sourced_queryable_to_net_childs( + send_forget_sourced_queryable_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, tree_sid.index() as NodeId, @@ -469,7 +486,12 @@ fn propagate_forget_sourced_queryable( } } -fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { +fn unregister_peer_queryable( + tables: &mut Tables, + res: &mut Arc, + peer: &ZenohIdProto, + send_declare: &mut SendDeclare, +) { res_hat_mut!(res).peer_qabls.remove(peer); if res_hat!(res).peer_qabls.is_empty() { @@ -477,7 +499,7 @@ fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: .peer_qabls .retain(|qabl| !Arc::ptr_eq(qabl, res)); - propagate_forget_simple_queryable(tables, res); + propagate_forget_simple_queryable(tables, res, send_declare); } } @@ -486,9 +508,10 @@ fn undeclare_peer_queryable( face: Option<&Arc>, res: &mut Arc, peer: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { if res_hat!(res).peer_qabls.contains_key(peer) { - unregister_peer_queryable(tables, res, peer); + unregister_peer_queryable(tables, res, peer, send_declare); propagate_forget_sourced_queryable(tables, res, face, peer); } } @@ -498,14 +521,16 @@ fn forget_peer_queryable( face: &mut Arc, res: &mut Arc, peer: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { - undeclare_peer_queryable(tables, Some(face), res, peer); + undeclare_peer_queryable(tables, Some(face), res, peer, send_declare); } pub(super) fn undeclare_client_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { if !face_hat_mut!(face) .remote_qabls @@ -520,28 +545,31 @@ pub(super) fn undeclare_client_queryable( let peer_qabls = remote_peer_qabls(tables, res); if client_qabls.is_empty() { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); + undeclare_peer_queryable(tables, None, res, &tables.zid.clone(), send_declare); } else { let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, None, res, &local_info, tables.zid); + register_peer_queryable(tables, None, res, &local_info, tables.zid, send_declare); } if client_qabls.len() == 1 && !peer_qabls { let mut face = &mut client_qabls[0]; if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(face) .local_qabls @@ -556,19 +584,22 @@ pub(super) fn undeclare_client_queryable( }) }) { if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -580,16 +611,21 @@ fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, id: QueryableId, + send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { - undeclare_client_queryable(tables, face, &mut res); + undeclare_client_queryable(tables, face, &mut res, send_declare); Some(res) } else { None } } -pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohIdProto) { +pub(super) fn queries_remove_node( + tables: &mut Tables, + node: &ZenohIdProto, + send_declare: &mut SendDeclare, +) { let mut qabls = vec![]; for res in hat!(tables).peer_qabls.iter() { for qabl in res_hat!(res).peer_qabls.keys() { @@ -599,14 +635,14 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohIdProto) { } } for mut res in qabls { - unregister_peer_queryable(tables, &mut res, node); + unregister_peer_queryable(tables, &mut res, node, send_declare); update_matches_query_routes(tables, &res); Resource::clean(&mut res) } } -pub(super) fn queries_tree_change(tables: &mut Tables, new_childs: &[Vec]) { +pub(super) fn queries_tree_change(tables: &mut Tables, new_children: &[Vec]) { let net = match hat!(tables).peers_net.as_ref() { Some(net) => net, None => { @@ -614,9 +650,9 @@ pub(super) fn queries_tree_change(tables: &mut Tables, new_childs: &[Vec>, mode: InterestMode, aggregate: bool, + send_declare: &mut SendDeclare, ) { if mode.current() && face.whatami == WhatAmI::Client { let interest_id = (!mode.future()).then_some(id); @@ -719,20 +756,23 @@ pub(super) fn declare_qabl_interest( 0 }; let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr, - ext_info: info, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); } } else { for qabl in hat!(tables).peer_qabls.iter() { @@ -751,20 +791,23 @@ pub(super) fn declare_qabl_interest( 0 }; let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + ), + ); } } } @@ -784,20 +827,23 @@ pub(super) fn declare_qabl_interest( 0 }; let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + ), + ); } } } @@ -813,13 +859,14 @@ impl HatQueriesTrait for HatCode { res: &mut Arc, qabl_info: &QueryableInfoType, node_id: NodeId, + send_declare: &mut SendDeclare, ) { if face.whatami != WhatAmI::Client { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_queryable(tables, face, res, qabl_info, peer); + declare_peer_queryable(tables, face, res, qabl_info, peer, send_declare); } } else { - declare_client_queryable(tables, face, id, res, qabl_info); + declare_client_queryable(tables, face, id, res, qabl_info, send_declare); } } @@ -830,11 +877,12 @@ impl HatQueriesTrait for HatCode { id: QueryableId, res: Option>, node_id: NodeId, + send_declare: &mut SendDeclare, ) -> Option> { if face.whatami != WhatAmI::Client { if let Some(mut res) = res { if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_queryable(tables, face, &mut res, &peer); + forget_peer_queryable(tables, face, &mut res, &peer, send_declare); Some(res) } else { None @@ -843,7 +891,7 @@ impl HatQueriesTrait for HatCode { None } } else { - forget_client_queryable(tables, face, id) + forget_client_queryable(tables, face, id, send_declare) } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/token.rs b/zenoh/src/net/routing/hat/linkstate_peer/token.rs index 0085d8deb0..adb55b7bbb 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/token.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/token.rs @@ -32,21 +32,21 @@ use super::{ }; use crate::net::routing::{ dispatcher::{face::FaceState, tables::Tables}, - hat::{CurrentFutureTrait, HatTokenTrait}, + hat::{CurrentFutureTrait, HatTokenTrait, SendDeclare}, router::{NodeId, Resource, SessionContext}, RoutingContext, }; #[inline] -fn send_sourced_token_to_net_childs( +fn send_sourced_token_to_net_clildren( tables: &Tables, net: &Network, - childs: &[NodeIndex], + clildren: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: NodeId, ) { - for child in childs { + for child in clildren { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -85,25 +85,29 @@ fn propagate_simple_token_to( dst_face: &mut Arc, res: &Arc, _src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { if !face_hat!(dst_face).local_tokens.contains_key(res) && dst_face.whatami == WhatAmI::Client { if dst_face.whatami != WhatAmI::Client { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { - id, - wire_expr: key_expr, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + ), + ); } else { let matching_interests = face_hat!(dst_face) .remote_interests @@ -122,33 +126,41 @@ fn propagate_simple_token_to( let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { - id, - wire_expr: key_expr, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + ), + ); } } } } } -fn propagate_simple_token(tables: &mut Tables, res: &Arc, src_face: &mut Arc) { +fn propagate_simple_token( + tables: &mut Tables, + res: &Arc, + src_face: &mut Arc, + send_declare: &mut SendDeclare, +) { for mut dst_face in tables .faces .values() .cloned() .collect::>>() { - propagate_simple_token_to(tables, &mut dst_face, res, src_face); + propagate_simple_token_to(tables, &mut dst_face, res, src_face, send_declare); } } @@ -162,10 +174,10 @@ fn propagate_sourced_token( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_sourced_token_to_net_childs( + send_sourced_token_to_net_clildren( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, tree_sid.index() as NodeId, @@ -192,6 +204,7 @@ fn register_peer_token( face: &mut Arc, res: &mut Arc, peer: ZenohIdProto, + send_declare: &mut SendDeclare, ) { if !res_hat!(res).peer_tokens.contains(&peer) { // Register peer liveliness @@ -205,7 +218,7 @@ fn register_peer_token( } // Propagate liveliness to clients - propagate_simple_token(tables, res, face); + propagate_simple_token(tables, res, face, send_declare); } fn declare_peer_token( @@ -213,8 +226,9 @@ fn declare_peer_token( face: &mut Arc, res: &mut Arc, peer: ZenohIdProto, + send_declare: &mut SendDeclare, ) { - register_peer_token(tables, face, res, peer); + register_peer_token(tables, face, res, peer, send_declare); } fn register_client_token( @@ -249,10 +263,11 @@ fn declare_client_token( face: &mut Arc, id: TokenId, res: &mut Arc, + send_declare: &mut SendDeclare, ) { register_client_token(tables, face, id, res); let zid = tables.zid; - register_peer_token(tables, face, res, zid); + register_peer_token(tables, face, res, zid, send_declare); } #[inline] @@ -286,15 +301,15 @@ fn remote_client_tokens(res: &Arc, face: &Arc) -> bool { } #[inline] -fn send_forget_sourced_token_to_net_childs( +fn send_forget_sourced_token_to_net_clildren( tables: &Tables, net: &Network, - childs: &[NodeIndex], + clildren: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: Option, ) { - for child in childs { + for child in clildren { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -327,22 +342,29 @@ fn send_forget_sourced_token_to_net_childs( } } -fn propagate_forget_simple_token(tables: &mut Tables, res: &Arc) { +fn propagate_forget_simple_token( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { for mut face in tables.faces.values().cloned() { if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(face) .local_tokens @@ -357,19 +379,22 @@ fn propagate_forget_simple_token(tables: &mut Tables, res: &Arc) { }) }) { if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -386,10 +411,10 @@ fn propagate_forget_sourced_token( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_forget_sourced_token_to_net_childs( + send_forget_sourced_token_to_net_clildren( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, Some(tree_sid.index() as NodeId), @@ -411,7 +436,12 @@ fn propagate_forget_sourced_token( } } -fn unregister_peer_token(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { +fn unregister_peer_token( + tables: &mut Tables, + res: &mut Arc, + peer: &ZenohIdProto, + send_declare: &mut SendDeclare, +) { res_hat_mut!(res).peer_tokens.retain(|token| token != peer); if res_hat!(res).peer_tokens.is_empty() { @@ -419,7 +449,7 @@ fn unregister_peer_token(tables: &mut Tables, res: &mut Arc, peer: &Ze .peer_tokens .retain(|token| !Arc::ptr_eq(token, res)); - propagate_forget_simple_token(tables, res); + propagate_forget_simple_token(tables, res, send_declare); } } @@ -428,9 +458,10 @@ fn undeclare_peer_token( face: Option<&Arc>, res: &mut Arc, peer: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { if res_hat!(res).peer_tokens.contains(peer) { - unregister_peer_token(tables, res, peer); + unregister_peer_token(tables, res, peer, send_declare); propagate_forget_sourced_token(tables, res, face, peer); } } @@ -440,14 +471,16 @@ fn forget_peer_token( face: &mut Arc, res: &mut Arc, peer: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { - undeclare_peer_token(tables, Some(face), res, peer); + undeclare_peer_token(tables, Some(face), res, peer, send_declare); } pub(super) fn undeclare_client_token( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { if !face_hat_mut!(face) .remote_tokens @@ -461,26 +494,29 @@ pub(super) fn undeclare_client_token( let mut client_tokens = client_tokens(res); let peer_tokens = remote_peer_tokens(tables, res); if client_tokens.is_empty() { - undeclare_peer_token(tables, None, res, &tables.zid.clone()); + undeclare_peer_token(tables, None, res, &tables.zid.clone(), send_declare); } if client_tokens.len() == 1 && !peer_tokens { let mut face = &mut client_tokens[0]; if face.whatami != WhatAmI::Client { if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(face) .local_tokens @@ -496,19 +532,22 @@ pub(super) fn undeclare_client_token( }) }) { if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -521,16 +560,21 @@ fn forget_client_token( tables: &mut Tables, face: &mut Arc, id: TokenId, + send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { - undeclare_client_token(tables, face, &mut res); + undeclare_client_token(tables, face, &mut res, send_declare); Some(res) } else { None } } -pub(super) fn token_remove_node(tables: &mut Tables, node: &ZenohIdProto) { +pub(super) fn token_remove_node( + tables: &mut Tables, + node: &ZenohIdProto, + send_declare: &mut SendDeclare, +) { for mut res in hat!(tables) .peer_tokens .iter() @@ -538,12 +582,12 @@ pub(super) fn token_remove_node(tables: &mut Tables, node: &ZenohIdProto) { .cloned() .collect::>>() { - unregister_peer_token(tables, &mut res, node); + unregister_peer_token(tables, &mut res, node, send_declare); Resource::clean(&mut res) } } -pub(super) fn token_tree_change(tables: &mut Tables, new_childs: &[Vec]) { +pub(super) fn token_tree_change(tables: &mut Tables, new_clildren: &[Vec]) { let net = match hat!(tables).peers_net.as_ref() { Some(net) => net, None => { @@ -551,9 +595,9 @@ pub(super) fn token_tree_change(tables: &mut Tables, new_childs: &[Vec>, mode: InterestMode, aggregate: bool, + send_declare: &mut SendDeclare, ) { if mode.current() && face.whatami == WhatAmI::Client { let interest_id = (!mode.future()).then_some(id); @@ -605,16 +650,19 @@ pub(crate) fn declare_token_interest( 0 }; let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + res.expr(), + ), + ); } } else { for token in &hat!(tables).peer_tokens { @@ -630,16 +678,19 @@ pub(crate) fn declare_token_interest( 0 }; let wire_expr = Resource::decl_key(token, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), - }, - token.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + ), + ); } } } @@ -656,16 +707,19 @@ pub(crate) fn declare_token_interest( 0 }; let wire_expr = Resource::decl_key(token, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), - }, - token.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + ), + ); } } } @@ -681,13 +735,14 @@ impl HatTokenTrait for HatCode { res: &mut Arc, node_id: NodeId, _interest_id: Option, + send_declare: &mut SendDeclare, ) { if face.whatami != WhatAmI::Client { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_token(tables, face, res, peer) + declare_peer_token(tables, face, res, peer, send_declare) } } else { - declare_client_token(tables, face, id, res) + declare_client_token(tables, face, id, res, send_declare) } } @@ -698,11 +753,12 @@ impl HatTokenTrait for HatCode { id: TokenId, res: Option>, node_id: NodeId, + send_declare: &mut SendDeclare, ) -> Option> { if face.whatami != WhatAmI::Client { if let Some(mut res) = res { if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_token(tables, face, &mut res, &peer); + forget_peer_token(tables, face, &mut res, &peer, send_declare); Some(res) } else { None @@ -711,7 +767,7 @@ impl HatTokenTrait for HatCode { None } } else { - forget_client_token(tables, face, id) + forget_client_token(tables, face, id, send_declare) } } } diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index f2175474d4..649e41f4c5 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -28,7 +28,7 @@ use zenoh_protocol::{ SubscriberId, TokenId, }, interest::{InterestId, InterestMode, InterestOptions}, - Oam, + Declare, Oam, }, }; use zenoh_result::ZResult; @@ -42,6 +42,7 @@ use super::{ tables::{NodeId, QueryTargetQablSet, Resource, Route, RoutingExpr, Tables, TablesLock}, }, router::RoutesIndexes, + RoutingContext, }; use crate::net::runtime::Runtime; @@ -71,6 +72,8 @@ impl Sources { } } +pub(crate) type SendDeclare<'a> = dyn FnMut(&Arc, RoutingContext) + + 'a; pub(crate) trait HatTrait: HatBaseTrait + HatInterestTrait + HatPubSubTrait + HatQueriesTrait + HatTokenTrait { @@ -90,6 +93,7 @@ pub(crate) trait HatBaseTrait { tables: &mut Tables, tables_ref: &Arc, face: &mut Face, + send_declare: &mut SendDeclare, ) -> ZResult<()>; fn new_transport_unicast_face( @@ -98,6 +102,7 @@ pub(crate) trait HatBaseTrait { tables_ref: &Arc, face: &mut Face, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()>; fn handle_oam( @@ -106,6 +111,7 @@ pub(crate) trait HatBaseTrait { tables_ref: &Arc, oam: Oam, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()>; fn map_routing_context( @@ -132,9 +138,15 @@ pub(crate) trait HatBaseTrait { tables: &mut Tables, tables_ref: &Arc, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()>; - fn close_face(&self, tables: &TablesLock, face: &mut Arc); + fn close_face( + &self, + tables: &TablesLock, + face: &mut Arc, + send_declare: &mut SendDeclare, + ); } pub(crate) trait HatInterestTrait { @@ -148,11 +160,13 @@ pub(crate) trait HatInterestTrait { res: Option<&mut Arc>, mode: InterestMode, options: InterestOptions, + send_declare: &mut SendDeclare, ); fn undeclare_interest(&self, tables: &mut Tables, face: &mut Arc, id: InterestId); } pub(crate) trait HatPubSubTrait { + #[allow(clippy::too_many_arguments)] fn declare_subscription( &self, tables: &mut Tables, @@ -161,6 +175,7 @@ pub(crate) trait HatPubSubTrait { res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, + send_declare: &mut SendDeclare, ); fn undeclare_subscription( &self, @@ -169,6 +184,7 @@ pub(crate) trait HatPubSubTrait { id: SubscriberId, res: Option>, node_id: NodeId, + send_declare: &mut SendDeclare, ) -> Option>; fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)>; @@ -192,6 +208,7 @@ pub(crate) trait HatPubSubTrait { } pub(crate) trait HatQueriesTrait { + #[allow(clippy::too_many_arguments)] fn declare_queryable( &self, tables: &mut Tables, @@ -200,6 +217,7 @@ pub(crate) trait HatQueriesTrait { res: &mut Arc, qabl_info: &QueryableInfoType, node_id: NodeId, + send_declare: &mut SendDeclare, ); fn undeclare_queryable( &self, @@ -208,6 +226,7 @@ pub(crate) trait HatQueriesTrait { id: QueryableId, res: Option>, node_id: NodeId, + send_declare: &mut SendDeclare, ) -> Option>; fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)>; @@ -238,6 +257,7 @@ pub(crate) fn new_hat(whatami: WhatAmI, config: &Config) -> Box, node_id: NodeId, interest_id: Option, + send_declare: &mut SendDeclare, ); fn undeclare_token( @@ -255,6 +276,7 @@ pub trait HatTokenTrait { id: TokenId, res: Option>, node_id: NodeId, + send_declare: &mut SendDeclare, ) -> Option>; } diff --git a/zenoh/src/net/routing/hat/p2p_peer/interests.rs b/zenoh/src/net/routing/hat/p2p_peer/interests.rs index 4fe8936cc7..068cdd0eeb 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/interests.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/interests.rs @@ -34,7 +34,7 @@ use crate::net::routing::{ resource::Resource, tables::{Tables, TablesLock}, }, - hat::{CurrentFutureTrait, HatInterestTrait}, + hat::{CurrentFutureTrait, HatInterestTrait, SendDeclare}, RoutingContext, }; @@ -86,6 +86,7 @@ impl HatInterestTrait for HatCode { res: Option<&mut Arc>, mode: InterestMode, options: InterestOptions, + send_declare: &mut SendDeclare, ) { if options.subscribers() { declare_sub_interest( @@ -95,6 +96,7 @@ impl HatInterestTrait for HatCode { res.as_ref().map(|r| (*r).clone()).as_mut(), mode, options.aggregate(), + send_declare, ) } if options.queryables() { @@ -105,6 +107,7 @@ impl HatInterestTrait for HatCode { res.as_ref().map(|r| (*r).clone()).as_mut(), mode, options.aggregate(), + send_declare, ) } if options.tokens() { @@ -115,6 +118,7 @@ impl HatInterestTrait for HatCode { res.as_ref().map(|r| (*r).clone()).as_mut(), mode, options.aggregate(), + send_declare, ) } face_hat_mut!(face) @@ -170,17 +174,16 @@ impl HatInterestTrait for HatCode { interest.src_face, interest.src_interest_id ); - interest - .src_face - .primitives - .clone() - .send_declare(RoutingContext::new(Declare { + send_declare( + &interest.src_face.primitives, + RoutingContext::new(Declare { interest_id: Some(interest.src_interest_id), ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareFinal(DeclareFinal), - })); + }), + ); } } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index eab2f393de..8dbbcec936 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -53,7 +53,7 @@ use super::{ face::FaceState, tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, }, - HatBaseTrait, HatTrait, + HatBaseTrait, HatTrait, SendDeclare, }; use crate::net::{ codec::Zenoh080Routing, @@ -148,11 +148,12 @@ impl HatBaseTrait for HatCode { tables: &mut Tables, _tables_ref: &Arc, face: &mut Face, + send_declare: &mut SendDeclare, ) -> ZResult<()> { interests_new_face(tables, &mut face.state); - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); - token_new_face(tables, &mut face.state); + pubsub_new_face(tables, &mut face.state, send_declare); + queries_new_face(tables, &mut face.state, send_declare); + token_new_face(tables, &mut face.state, send_declare); Ok(()) } @@ -162,6 +163,7 @@ impl HatBaseTrait for HatCode { _tables_ref: &Arc, face: &mut Face, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()> { if face.state.whatami != WhatAmI::Client { if let Some(net) = hat_mut!(tables).gossip.as_mut() { @@ -180,25 +182,31 @@ impl HatBaseTrait for HatCode { } interests_new_face(tables, &mut face.state); - pubsub_new_face(tables, &mut face.state); - queries_new_face(tables, &mut face.state); - token_new_face(tables, &mut face.state); + pubsub_new_face(tables, &mut face.state, send_declare); + queries_new_face(tables, &mut face.state, send_declare); + token_new_face(tables, &mut face.state, send_declare); if face.state.whatami == WhatAmI::Peer { - face.state - .primitives - .send_declare(RoutingContext::new(Declare { + send_declare( + &face.state.primitives, + RoutingContext::new(Declare { interest_id: Some(0), ext_qos: QoSType::default(), ext_tstamp: None, ext_nodeid: NodeIdType::default(), body: DeclareBody::DeclareFinal(DeclareFinal), - })); + }), + ); } Ok(()) } - fn close_face(&self, tables: &TablesLock, face: &mut Arc) { + fn close_face( + &self, + tables: &TablesLock, + face: &mut Arc, + send_declare: &mut SendDeclare, + ) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); let face = get_mut_unchecked(face); @@ -229,7 +237,7 @@ impl HatBaseTrait for HatCode { let mut subs_matches = vec![]; for (_id, mut res) in hat_face.remote_subs.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); + undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -251,7 +259,7 @@ impl HatBaseTrait for HatCode { let mut qabls_matches = vec![]; for (_id, mut res) in hat_face.remote_qabls.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); + undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -272,7 +280,7 @@ impl HatBaseTrait for HatCode { for (_id, mut res) in hat_face.remote_tokens.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_token(&mut wtables, &mut face_clone, &mut res); + undeclare_client_token(&mut wtables, &mut face_clone, &mut res, send_declare); } drop(wtables); @@ -311,6 +319,7 @@ impl HatBaseTrait for HatCode { _tables_ref: &Arc, oam: Oam, transport: &TransportUnicast, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { if oam.id == OAM_LINKSTATE { if let ZExtBody::ZBuf(buf) = oam.body { @@ -349,6 +358,7 @@ impl HatBaseTrait for HatCode { tables: &mut Tables, _tables_ref: &Arc, transport: &TransportUnicast, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { match (transport.get_zid(), transport.get_whatami()) { (Ok(zid), Ok(whatami)) => { diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index bc0a6f7de2..dca20b1d94 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -38,7 +38,7 @@ use crate::{ resource::{NodeId, Resource, SessionContext}, tables::{Route, RoutingExpr, Tables}, }, - hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, + hat::{CurrentFutureTrait, HatPubSubTrait, SendDeclare, Sources}, router::{update_data_routes_from, RoutesIndexes}, RoutingContext, }, @@ -51,6 +51,7 @@ fn propagate_simple_subscription_to( res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { if (src_face.id != dst_face.id) && !face_hat!(dst_face).local_subs.contains_key(res) @@ -60,20 +61,23 @@ fn propagate_simple_subscription_to( let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + ), + ); } else { let matching_interests = face_hat!(dst_face) .remote_interests @@ -94,20 +98,23 @@ fn propagate_simple_subscription_to( let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + ), + ); } } } @@ -119,6 +126,7 @@ fn propagate_simple_subscription( res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { for mut dst_face in tables .faces @@ -126,7 +134,14 @@ fn propagate_simple_subscription( .cloned() .collect::>>() { - propagate_simple_subscription_to(tables, &mut dst_face, res, sub_info, src_face); + propagate_simple_subscription_to( + tables, + &mut dst_face, + res, + sub_info, + src_face, + send_declare, + ); } } @@ -164,10 +179,11 @@ fn declare_client_subscription( id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, + send_declare: &mut SendDeclare, ) { register_client_subscription(tables, face, id, res, sub_info); - propagate_simple_subscription(tables, res, sub_info, face); + propagate_simple_subscription(tables, res, sub_info, face, send_declare); // This introduced a buffer overflow on windows // TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] @@ -212,22 +228,29 @@ fn remote_client_subs(res: &Arc, face: &Arc) -> bool { .any(|ctx| ctx.face.id != face.id && ctx.subs.is_some()) } -fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { +fn propagate_forget_simple_subscription( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { for mut face in tables.faces.values().cloned() { if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(face) .local_subs @@ -240,19 +263,22 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc .is_some_and(|m| m.context.is_some() && remote_client_subs(&m, &face)) }) { if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -263,6 +289,7 @@ pub(super) fn undeclare_client_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { @@ -271,25 +298,28 @@ pub(super) fn undeclare_client_subscription( let mut client_subs = client_subs(res); if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res); + propagate_forget_simple_subscription(tables, res, send_declare); } if client_subs.len() == 1 { let mut face = &mut client_subs[0]; if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(face) .local_subs @@ -302,19 +332,22 @@ pub(super) fn undeclare_client_subscription( .is_some_and(|m| m.context.is_some() && remote_client_subs(&m, face)) }) { if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -326,16 +359,21 @@ fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, id: SubscriberId, + send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { - undeclare_client_subscription(tables, face, &mut res); + undeclare_client_subscription(tables, face, &mut res, send_declare); Some(res) } else { None } } -pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { +pub(super) fn pubsub_new_face( + tables: &mut Tables, + face: &mut Arc, + send_declare: &mut SendDeclare, +) { if face.whatami != WhatAmI::Client { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers @@ -353,13 +391,14 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { sub, &sub_info, &mut src_face.clone(), + send_declare, ); } } } // recompute routes // TODO: disable data routes and recompute them in parallel to avoid holding - // tables write lock for a long time on peer conenction. + // tables write lock for a long time on peer connection. update_data_routes_from(tables, &mut tables.root_res.clone()); } @@ -370,6 +409,7 @@ pub(super) fn declare_sub_interest( res: Option<&mut Arc>, mode: InterestMode, aggregate: bool, + send_declare: &mut SendDeclare, ) { if mode.current() && face.whatami == WhatAmI::Client { let interest_id = (!mode.future()).then_some(id); @@ -393,20 +433,23 @@ pub(super) fn declare_sub_interest( 0 }; let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + res.expr(), + ), + ); } } else { for src_face in tables @@ -426,20 +469,25 @@ pub(super) fn declare_sub_interest( 0 }; let wire_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber( + DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }, + ), + }, + sub.expr(), + ), + ); } } } @@ -462,20 +510,23 @@ pub(super) fn declare_sub_interest( 0 }; let wire_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + ), + ); } } } @@ -492,8 +543,9 @@ impl HatPubSubTrait for HatCode { res: &mut Arc, sub_info: &SubscriberInfo, _node_id: NodeId, + send_declare: &mut SendDeclare, ) { - declare_client_subscription(tables, face, id, res, sub_info); + declare_client_subscription(tables, face, id, res, sub_info, send_declare); } fn undeclare_subscription( @@ -503,8 +555,9 @@ impl HatPubSubTrait for HatCode { id: SubscriberId, _res: Option>, _node_id: NodeId, + send_declare: &mut SendDeclare, ) -> Option> { - forget_client_subscription(tables, face, id) + forget_client_subscription(tables, face, id, send_declare) } fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)> { diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 74b676ca80..9b0e0e490f 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -43,7 +43,7 @@ use crate::net::routing::{ resource::{NodeId, Resource, SessionContext}, tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, }, - hat::{CurrentFutureTrait, HatQueriesTrait, Sources}, + hat::{CurrentFutureTrait, HatQueriesTrait, SendDeclare, Sources}, router::{update_query_routes_from, RoutesIndexes}, RoutingContext, }; @@ -85,6 +85,7 @@ fn propagate_simple_queryable_to( dst_face: &mut Arc, res: &Arc, src_face: &Option<&mut Arc>, + send_declare: &mut SendDeclare, ) { let info = local_qabl_info(tables, res, dst_face); let current = face_hat!(dst_face).local_qabls.get(res); @@ -112,20 +113,23 @@ fn propagate_simple_queryable_to( .local_qabls .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); } } @@ -133,6 +137,7 @@ fn propagate_simple_queryable( tables: &mut Tables, res: &Arc, src_face: Option<&mut Arc>, + send_declare: &mut SendDeclare, ) { let faces = tables .faces @@ -140,7 +145,7 @@ fn propagate_simple_queryable( .cloned() .collect::>>(); for mut dst_face in faces { - propagate_simple_queryable_to(tables, &mut dst_face, res, &src_face); + propagate_simple_queryable_to(tables, &mut dst_face, res, &src_face, send_declare); } } @@ -170,9 +175,10 @@ fn declare_client_queryable( id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfoType, + send_declare: &mut SendDeclare, ) { register_client_queryable(tables, face, id, res, qabl_info); - propagate_simple_queryable(tables, res, Some(face)); + propagate_simple_queryable(tables, res, Some(face), send_declare); } #[inline] @@ -196,22 +202,29 @@ fn remote_client_qabls(res: &Arc, face: &Arc) -> bool { .any(|ctx| ctx.face.id != face.id && ctx.qabl.is_some()) } -fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { +fn propagate_forget_simple_queryable( + tables: &mut Tables, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { for face in tables.faces.values_mut() { if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(face) .local_qabls @@ -224,19 +237,22 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { if !face_hat_mut!(face) .remote_qabls @@ -259,26 +276,29 @@ pub(super) fn undeclare_client_queryable( let mut client_qabls = client_qabls(res); if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); + propagate_forget_simple_queryable(tables, res, send_declare); } else { - propagate_simple_queryable(tables, res, None); + propagate_simple_queryable(tables, res, None, send_declare); } if client_qabls.len() == 1 { let mut face = &mut client_qabls[0]; if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(face) .local_qabls @@ -291,19 +311,22 @@ pub(super) fn undeclare_client_queryable( .is_some_and(|m| m.context.is_some() && (remote_client_qabls(&m, face))) }) { if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -315,16 +338,21 @@ fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, id: QueryableId, + send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { - undeclare_client_queryable(tables, face, &mut res); + undeclare_client_queryable(tables, face, &mut res, send_declare); Some(res) } else { None } } -pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { +pub(super) fn queries_new_face( + tables: &mut Tables, + face: &mut Arc, + send_declare: &mut SendDeclare, +) { if face.whatami != WhatAmI::Client { for src_face in tables .faces @@ -333,13 +361,19 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { .collect::>>() { for qabl in face_hat!(src_face).remote_qabls.values() { - propagate_simple_queryable_to(tables, face, qabl, &Some(&mut src_face.clone())); + propagate_simple_queryable_to( + tables, + face, + qabl, + &Some(&mut src_face.clone()), + send_declare, + ); } } } // recompute routes // TODO: disable query routes and recompute them in parallel to avoid holding - // tables write lock for a long time on peer conenction. + // tables write lock for a long time on peer connection. update_query_routes_from(tables, &mut tables.root_res.clone()); } @@ -354,6 +388,7 @@ pub(super) fn declare_qabl_interest( res: Option<&mut Arc>, mode: InterestMode, aggregate: bool, + send_declare: &mut SendDeclare, ) { if mode.current() && face.whatami == WhatAmI::Client { let interest_id = (!mode.future()).then_some(id); @@ -377,20 +412,23 @@ pub(super) fn declare_qabl_interest( 0 }; let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr, - ext_info: info, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); } } else { for src_face in tables @@ -413,20 +451,23 @@ pub(super) fn declare_qabl_interest( 0 }; let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + ), + ); } } } @@ -453,20 +494,23 @@ pub(super) fn declare_qabl_interest( 0 }; let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + ), + ); } } } @@ -484,8 +528,9 @@ impl HatQueriesTrait for HatCode { res: &mut Arc, qabl_info: &QueryableInfoType, _node_id: NodeId, + send_declare: &mut SendDeclare, ) { - declare_client_queryable(tables, face, id, res, qabl_info); + declare_client_queryable(tables, face, id, res, qabl_info, send_declare); } fn undeclare_queryable( @@ -495,8 +540,9 @@ impl HatQueriesTrait for HatCode { id: QueryableId, _res: Option>, _node_id: NodeId, + send_declare: &mut SendDeclare, ) -> Option> { - forget_client_queryable(tables, face, id) + forget_client_queryable(tables, face, id, send_declare) } fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)> { diff --git a/zenoh/src/net/routing/hat/p2p_peer/token.rs b/zenoh/src/net/routing/hat/p2p_peer/token.rs index 65c351c812..be36a4eb15 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/token.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/token.rs @@ -26,7 +26,7 @@ use zenoh_sync::get_mut_unchecked; use super::{face_hat, face_hat_mut, HatCode, HatFace}; use crate::net::routing::{ dispatcher::{face::FaceState, tables::Tables}, - hat::{CurrentFutureTrait, HatTokenTrait}, + hat::{CurrentFutureTrait, HatTokenTrait, SendDeclare}, router::{NodeId, Resource, SessionContext}, RoutingContext, }; @@ -37,6 +37,7 @@ fn propagate_simple_token_to( dst_face: &mut Arc, res: &Arc, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { if (src_face.id != dst_face.id || dst_face.whatami == WhatAmI::Client) && !face_hat!(dst_face).local_tokens.contains_key(res) @@ -46,19 +47,22 @@ fn propagate_simple_token_to( let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { - id, - wire_expr: key_expr, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + ), + ); } else { let matching_interests = face_hat!(dst_face) .remote_interests @@ -77,33 +81,41 @@ fn propagate_simple_token_to( let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { - id, - wire_expr: key_expr, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + ), + ); } } } } } -fn propagate_simple_token(tables: &mut Tables, res: &Arc, src_face: &mut Arc) { +fn propagate_simple_token( + tables: &mut Tables, + res: &Arc, + src_face: &mut Arc, + send_declare: &mut SendDeclare, +) { for mut dst_face in tables .faces .values() .cloned() .collect::>>() { - propagate_simple_token_to(tables, &mut dst_face, res, src_face); + propagate_simple_token_to(tables, &mut dst_face, res, src_face, send_declare); } } @@ -139,10 +151,11 @@ fn declare_client_token( face: &mut Arc, id: TokenId, res: &mut Arc, + send_declare: &mut SendDeclare, ) { register_client_token(tables, face, id, res); - propagate_simple_token(tables, res, face); + propagate_simple_token(tables, res, face, send_declare); } #[inline] @@ -166,42 +179,52 @@ fn remote_client_tokens(res: &Arc, face: &Arc) -> bool { .any(|ctx| ctx.face.id != face.id && ctx.token) } -fn propagate_forget_simple_token(tables: &mut Tables, res: &Arc) { +fn propagate_forget_simple_token( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { for mut face in tables.faces.values().cloned() { if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } else if face_hat!(face).remote_interests.values().any(|(r, o)| { o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true) && !o.aggregate() }) { // Token has never been declared on this face. // Send an Undeclare with a one shot generated id and a WireExpr ext. - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), - ext_wire_expr: WireExprType { - wire_expr: Resource::get_best_key(res, "", face.id), - }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), + ext_wire_expr: WireExprType { + wire_expr: Resource::get_best_key(res, "", face.id), + }, + }), + }, + res.expr(), + ), + ); } for res in face_hat!(face) .local_tokens @@ -214,19 +237,22 @@ fn propagate_forget_simple_token(tables: &mut Tables, res: &Arc) { .is_some_and(|m| m.context.is_some() && remote_client_tokens(&m, &face)) }) { if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } else if face_hat!(face).remote_interests.values().any(|(r, o)| { o.tokens() && r.as_ref().map(|r| r.matches(&res)).unwrap_or(true) @@ -234,21 +260,24 @@ fn propagate_forget_simple_token(tables: &mut Tables, res: &Arc) { }) { // Token has never been declared on this face. // Send an Undeclare with a one shot generated id and a WireExpr ext. - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), - ext_wire_expr: WireExprType { - wire_expr: Resource::get_best_key(&res, "", face.id), - }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), + ext_wire_expr: WireExprType { + wire_expr: Resource::get_best_key(&res, "", face.id), + }, + }), + }, + res.expr(), + ), + ); } } } @@ -259,6 +288,7 @@ pub(super) fn undeclare_client_token( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { if !face_hat_mut!(face) .remote_tokens @@ -271,26 +301,29 @@ pub(super) fn undeclare_client_token( let mut client_tokens = client_tokens(res); if client_tokens.is_empty() { - propagate_forget_simple_token(tables, res); + propagate_forget_simple_token(tables, res, send_declare); } if client_tokens.len() == 1 { let mut face = &mut client_tokens[0]; if face.whatami != WhatAmI::Client { if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(face) .local_tokens @@ -303,19 +336,22 @@ pub(super) fn undeclare_client_token( .is_some_and(|m| m.context.is_some() && remote_client_tokens(&m, face)) }) { if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -329,19 +365,24 @@ fn forget_client_token( face: &mut Arc, id: TokenId, res: Option>, + send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { - undeclare_client_token(tables, face, &mut res); + undeclare_client_token(tables, face, &mut res, send_declare); Some(res) } else if let Some(mut res) = res { - undeclare_client_token(tables, face, &mut res); + undeclare_client_token(tables, face, &mut res, send_declare); Some(res) } else { None } } -pub(super) fn token_new_face(tables: &mut Tables, face: &mut Arc) { +pub(super) fn token_new_face( + tables: &mut Tables, + face: &mut Arc, + send_declare: &mut SendDeclare, +) { if face.whatami != WhatAmI::Client { for mut src_face in tables .faces @@ -350,7 +391,7 @@ pub(super) fn token_new_face(tables: &mut Tables, face: &mut Arc) { .collect::>>() { for token in face_hat!(src_face.clone()).remote_tokens.values() { - propagate_simple_token_to(tables, face, token, &mut src_face); + propagate_simple_token_to(tables, face, token, &mut src_face, send_declare); } } } @@ -363,6 +404,7 @@ pub(crate) fn declare_token_interest( res: Option<&mut Arc>, mode: InterestMode, aggregate: bool, + send_declare: &mut SendDeclare, ) { if mode.current() && face.whatami == WhatAmI::Client { let interest_id = (!mode.future()).then_some(id); @@ -382,16 +424,19 @@ pub(crate) fn declare_token_interest( 0 }; let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + res.expr(), + ), + ); } } else { for src_face in tables @@ -410,16 +455,22 @@ pub(crate) fn declare_token_interest( 0 }; let wire_expr = Resource::decl_key(token, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), - }, - token.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr, + }), + }, + token.expr(), + ), + ); } } } @@ -440,16 +491,19 @@ pub(crate) fn declare_token_interest( 0 }; let wire_expr = Resource::decl_key(token, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), - }, - token.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + ), + ); } } } @@ -465,8 +519,9 @@ impl HatTokenTrait for HatCode { res: &mut Arc, _node_id: NodeId, _interest_id: Option, + send_declare: &mut SendDeclare, ) { - declare_client_token(tables, face, id, res) + declare_client_token(tables, face, id, res, send_declare) } fn undeclare_token( @@ -476,7 +531,8 @@ impl HatTokenTrait for HatCode { id: TokenId, res: Option>, _node_id: NodeId, + send_declare: &mut SendDeclare, ) -> Option> { - forget_client_token(tables, face, id, res) + forget_client_token(tables, face, id, res, send_declare) } } diff --git a/zenoh/src/net/routing/hat/router/interests.rs b/zenoh/src/net/routing/hat/router/interests.rs index f9f289bfa7..fcd0269fcc 100644 --- a/zenoh/src/net/routing/hat/router/interests.rs +++ b/zenoh/src/net/routing/hat/router/interests.rs @@ -33,7 +33,7 @@ use crate::net::routing::{ resource::Resource, tables::{Tables, TablesLock}, }, - hat::{CurrentFutureTrait, HatInterestTrait}, + hat::{CurrentFutureTrait, HatInterestTrait, SendDeclare}, RoutingContext, }; @@ -47,6 +47,7 @@ impl HatInterestTrait for HatCode { res: Option<&mut Arc>, mode: InterestMode, mut options: InterestOptions, + send_declare: &mut SendDeclare, ) { if options.aggregate() && face.whatami == WhatAmI::Peer { tracing::warn!( @@ -63,6 +64,7 @@ impl HatInterestTrait for HatCode { res.as_ref().map(|r| (*r).clone()).as_mut(), mode, options.aggregate(), + send_declare, ) } if options.queryables() { @@ -73,6 +75,7 @@ impl HatInterestTrait for HatCode { res.as_ref().map(|r| (*r).clone()).as_mut(), mode, options.aggregate(), + send_declare, ) } if options.tokens() { @@ -83,6 +86,7 @@ impl HatInterestTrait for HatCode { res.as_ref().map(|r| (*r).clone()).as_mut(), mode, options.aggregate(), + send_declare, ) } if mode.future() { @@ -91,13 +95,16 @@ impl HatInterestTrait for HatCode { .insert(id, (res.cloned(), options)); } if mode.current() { - face.primitives.send_declare(RoutingContext::new(Declare { - interest_id: Some(id), - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareFinal(DeclareFinal), - })); + send_declare( + &face.primitives, + RoutingContext::new(Declare { + interest_id: Some(id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + }), + ); } } diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 94352ea77d..a48e06987a 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -52,7 +52,7 @@ use super::{ face::FaceState, tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, }, - HatBaseTrait, HatTrait, + HatBaseTrait, HatTrait, SendDeclare, }; use crate::net::{ codec::Zenoh080Routing, @@ -272,7 +272,7 @@ impl HatTables { let mut tables = zwrite!(tables_ref.tables); tracing::trace!("Compute trees"); - let new_childs = match net_type { + let new_children = match net_type { WhatAmI::Router => hat_mut!(tables) .routers_net .as_mut() @@ -282,9 +282,9 @@ impl HatTables { }; tracing::trace!("Compute routes"); - pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); - queries::queries_tree_change(&mut tables, &new_childs, net_type); - token::token_tree_change(&mut tables, &new_childs, net_type); + pubsub::pubsub_tree_change(&mut tables, &new_children, net_type); + queries::queries_tree_change(&mut tables, &new_children, net_type); + token::token_tree_change(&mut tables, &new_children, net_type); tracing::trace!("Computations completed"); match net_type { @@ -372,6 +372,7 @@ impl HatBaseTrait for HatCode { _tables: &mut Tables, _tables_ref: &Arc, _face: &mut Face, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { // Nothing to do Ok(()) @@ -383,6 +384,7 @@ impl HatBaseTrait for HatCode { tables_ref: &Arc, face: &mut Face, transport: &TransportUnicast, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { let link_id = match face.state.whatami { WhatAmI::Router => hat_mut!(tables) @@ -423,7 +425,12 @@ impl HatBaseTrait for HatCode { Ok(()) } - fn close_face(&self, tables: &TablesLock, face: &mut Arc) { + fn close_face( + &self, + tables: &TablesLock, + face: &mut Arc, + send_declare: &mut SendDeclare, + ) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); let face = get_mut_unchecked(face); @@ -454,7 +461,7 @@ impl HatBaseTrait for HatCode { let mut subs_matches = vec![]; for (_id, mut res) in hat_face.remote_subs.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); + undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -476,7 +483,7 @@ impl HatBaseTrait for HatCode { let mut qabls_matches = vec![]; for (_, mut res) in hat_face.remote_qabls.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); + undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -497,7 +504,7 @@ impl HatBaseTrait for HatCode { for (_id, mut res) in hat_face.remote_tokens.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_token(&mut wtables, &mut face_clone, &mut res); + undeclare_client_token(&mut wtables, &mut face_clone, &mut res, send_declare); } drop(wtables); @@ -536,6 +543,7 @@ impl HatBaseTrait for HatCode { tables_ref: &Arc, oam: Oam, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()> { if oam.id == OAM_LINKSTATE { if let ZExtBody::ZBuf(buf) = oam.body { @@ -556,9 +564,24 @@ impl HatBaseTrait for HatCode { .link_states(list.link_states, zid) .removed_nodes { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); - token_remove_node(tables, &removed_node.zid, WhatAmI::Router); + pubsub_remove_node( + tables, + &removed_node.zid, + WhatAmI::Router, + send_declare, + ); + queries_remove_node( + tables, + &removed_node.zid, + WhatAmI::Router, + send_declare, + ); + token_remove_node( + tables, + &removed_node.zid, + WhatAmI::Router, + send_declare, + ); } if hat!(tables).full_net(WhatAmI::Peer) { @@ -580,13 +603,20 @@ impl HatBaseTrait for HatCode { tables, &removed_node.zid, WhatAmI::Peer, + send_declare, ); queries_remove_node( tables, &removed_node.zid, WhatAmI::Peer, + send_declare, + ); + token_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + send_declare, ); - token_remove_node(tables, &removed_node.zid, WhatAmI::Peer); } hat_mut!(tables).shared_nodes = shared_nodes( @@ -602,16 +632,19 @@ impl HatBaseTrait for HatCode { tables, &updated_node.zid, &updated_node.links, + send_declare, ); queries_linkstate_change( tables, &updated_node.zid, &updated_node.links, + send_declare, ); token_linkstate_change( tables, &updated_node.zid, &updated_node.links, + send_declare, ); } } @@ -659,6 +692,7 @@ impl HatBaseTrait for HatCode { tables: &mut Tables, tables_ref: &Arc, transport: &TransportUnicast, + send_declare: &mut SendDeclare, ) -> ZResult<()> { match (transport.get_zid(), transport.get_whatami()) { (Ok(zid), Ok(whatami)) => { @@ -670,9 +704,24 @@ impl HatBaseTrait for HatCode { .unwrap() .remove_link(&zid) { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); - token_remove_node(tables, &removed_node.zid, WhatAmI::Router); + pubsub_remove_node( + tables, + &removed_node.zid, + WhatAmI::Router, + send_declare, + ); + queries_remove_node( + tables, + &removed_node.zid, + WhatAmI::Router, + send_declare, + ); + token_remove_node( + tables, + &removed_node.zid, + WhatAmI::Router, + send_declare, + ); } if hat!(tables).full_net(WhatAmI::Peer) { @@ -693,9 +742,24 @@ impl HatBaseTrait for HatCode { .unwrap() .remove_link(&zid) { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Peer); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Peer); - token_remove_node(tables, &removed_node.zid, WhatAmI::Peer); + pubsub_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + send_declare, + ); + queries_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + send_declare, + ); + token_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + send_declare, + ); } hat_mut!(tables).shared_nodes = shared_nodes( diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs index 151957d19a..3bfdde49d1 100644 --- a/zenoh/src/net/routing/hat/router/network.rs +++ b/zenoh/src/net/routing/hat/router/network.rs @@ -109,7 +109,7 @@ pub(super) struct Changes { #[derive(Clone)] pub(super) struct Tree { pub(super) parent: Option, - pub(super) childs: Vec, + pub(super) children: Vec, pub(super) directions: Vec>, } @@ -160,7 +160,7 @@ impl Network { links: VecMap::new(), trees: vec![Tree { parent: None, - childs: vec![], + children: vec![], directions: vec![None], }], distances: vec![0.0], @@ -906,12 +906,13 @@ impl Network { let indexes = self.graph.node_indices().collect::>(); let max_idx = indexes.iter().max().unwrap(); - let old_childs: Vec> = self.trees.iter().map(|t| t.childs.clone()).collect(); + let old_children: Vec> = + self.trees.iter().map(|t| t.children.clone()).collect(); self.trees.clear(); self.trees.resize_with(max_idx.index() + 1, || Tree { parent: None, - childs: vec![], + children: vec![], directions: vec![], }); @@ -945,7 +946,7 @@ impl Network { for idx in &indexes { if let Some(parent_idx) = paths.predecessors[idx.index()] { if parent_idx == self.idx { - self.trees[tree_root_idx.index()].childs.push(*idx); + self.trees[tree_root_idx.index()].children.push(*idx); } } } @@ -983,23 +984,23 @@ impl Network { } } - let mut new_childs = Vec::with_capacity(self.trees.len()); - new_childs.resize(self.trees.len(), vec![]); + let mut new_children = Vec::with_capacity(self.trees.len()); + new_children.resize(self.trees.len(), vec![]); - for i in 0..new_childs.len() { - new_childs[i] = if i < old_childs.len() { + for i in 0..new_children.len() { + new_children[i] = if i < old_children.len() { self.trees[i] - .childs + .children .iter() - .filter(|idx| !old_childs[i].contains(idx)) + .filter(|idx| !old_children[i].contains(idx)) .cloned() .collect() } else { - self.trees[i].childs.clone() + self.trees[i].children.clone() }; } - new_childs + new_children } #[inline] diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 5c1ced2405..948c9eabb7 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -43,22 +43,22 @@ use crate::net::routing::{ resource::{NodeId, Resource, SessionContext}, tables::{Route, RoutingExpr, Tables}, }, - hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, + hat::{CurrentFutureTrait, HatPubSubTrait, SendDeclare, Sources}, router::RoutesIndexes, RoutingContext, }; #[inline] -fn send_sourced_subscription_to_net_childs( +fn send_sourced_subscription_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, sub_info: &SubscriberInfo, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -100,6 +100,7 @@ fn propagate_simple_subscription_to( sub_info: &SubscriberInfo, src_face: &mut Arc, full_peer_net: bool, + send_declare: &mut SendDeclare, ) { if src_face.id != dst_face.id && !face_hat!(dst_face).local_subs.contains_key(res) @@ -129,20 +130,23 @@ fn propagate_simple_subscription_to( let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + ), + ); } } } @@ -153,6 +157,7 @@ fn propagate_simple_subscription( res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, + send_declare: &mut SendDeclare, ) { let full_peer_net = hat!(tables).full_net(WhatAmI::Peer); for mut dst_face in tables @@ -168,6 +173,7 @@ fn propagate_simple_subscription( sub_info, src_face, full_peer_net, + send_declare, ); } } @@ -184,10 +190,10 @@ fn propagate_sourced_subscription( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_sourced_subscription_to_net_childs( + send_sourced_subscription_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, sub_info, @@ -216,6 +222,7 @@ fn register_router_subscription( res: &mut Arc, sub_info: &SubscriberInfo, router: ZenohIdProto, + send_declare: &mut SendDeclare, ) { if !res_hat!(res).router_subs.contains(&router) { // Register router subscription @@ -233,7 +240,7 @@ fn register_router_subscription( } // Propagate subscription to clients - propagate_simple_subscription(tables, res, sub_info, face); + propagate_simple_subscription(tables, res, sub_info, face, send_declare); } fn declare_router_subscription( @@ -242,8 +249,9 @@ fn declare_router_subscription( res: &mut Arc, sub_info: &SubscriberInfo, router: ZenohIdProto, + send_declare: &mut SendDeclare, ) { - register_router_subscription(tables, face, res, sub_info, router); + register_router_subscription(tables, face, res, sub_info, router, send_declare); } fn register_peer_subscription( @@ -271,11 +279,12 @@ fn declare_peer_subscription( res: &mut Arc, sub_info: &SubscriberInfo, peer: ZenohIdProto, + send_declare: &mut SendDeclare, ) { register_peer_subscription(tables, face, res, sub_info, peer); let propa_sub_info = *sub_info; let zid = tables.zid; - register_router_subscription(tables, face, res, &propa_sub_info, zid); + register_router_subscription(tables, face, res, &propa_sub_info, zid, send_declare); } fn register_client_subscription( @@ -312,10 +321,11 @@ fn declare_client_subscription( id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, + send_declare: &mut SendDeclare, ) { register_client_subscription(tables, face, id, res, sub_info); let zid = tables.zid; - register_router_subscription(tables, face, res, sub_info, zid); + register_router_subscription(tables, face, res, sub_info, zid, send_declare); } #[inline] @@ -358,15 +368,15 @@ fn remote_client_subs(res: &Arc, face: &Arc) -> bool { } #[inline] -fn send_forget_sourced_subscription_to_net_childs( +fn send_forget_sourced_subscription_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: Option, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -399,22 +409,29 @@ fn send_forget_sourced_subscription_to_net_childs( } } -fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { +fn propagate_forget_simple_subscription( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { for mut face in tables.faces.values().cloned() { if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(&mut face) .local_subs @@ -431,26 +448,33 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc }) }) { if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } } } -fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc) { +fn propagate_forget_simple_subscription_to_peers( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { if !hat!(tables).full_net(WhatAmI::Peer) && res_hat!(res).router_subs.len() == 1 && res_hat!(res).router_subs.contains(&tables.zid) @@ -472,19 +496,22 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< }) { if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -502,10 +529,10 @@ fn propagate_forget_sourced_subscription( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_forget_sourced_subscription_to_net_childs( + send_forget_sourced_subscription_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, Some(tree_sid.index() as NodeId), @@ -531,6 +558,7 @@ fn unregister_router_subscription( tables: &mut Tables, res: &mut Arc, router: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { res_hat_mut!(res).router_subs.retain(|sub| sub != router); @@ -542,10 +570,10 @@ fn unregister_router_subscription( if hat_mut!(tables).full_net(WhatAmI::Peer) { undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); } - propagate_forget_simple_subscription(tables, res); + propagate_forget_simple_subscription(tables, res, send_declare); } - propagate_forget_simple_subscription_to_peers(tables, res); + propagate_forget_simple_subscription_to_peers(tables, res, send_declare); } fn undeclare_router_subscription( @@ -553,9 +581,10 @@ fn undeclare_router_subscription( face: Option<&Arc>, res: &mut Arc, router: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { if res_hat!(res).router_subs.contains(router) { - unregister_router_subscription(tables, res, router); + unregister_router_subscription(tables, res, router, send_declare); propagate_forget_sourced_subscription(tables, res, face, router, WhatAmI::Router); } } @@ -565,8 +594,9 @@ fn forget_router_subscription( face: &mut Arc, res: &mut Arc, router: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { - undeclare_router_subscription(tables, Some(face), res, router); + undeclare_router_subscription(tables, Some(face), res, router, send_declare); } fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { @@ -596,13 +626,14 @@ fn forget_peer_subscription( face: &mut Arc, res: &mut Arc, peer: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { undeclare_peer_subscription(tables, Some(face), res, peer); let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); let peer_subs = remote_peer_subs(tables, res); let zid = tables.zid; if !client_subs && !peer_subs { - undeclare_router_subscription(tables, None, res, &zid); + undeclare_router_subscription(tables, None, res, &zid, send_declare); } } @@ -610,6 +641,7 @@ pub(super) fn undeclare_client_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { @@ -620,27 +652,30 @@ pub(super) fn undeclare_client_subscription( let router_subs = remote_router_subs(tables, res); let peer_subs = remote_peer_subs(tables, res); if client_subs.is_empty() && !peer_subs { - undeclare_router_subscription(tables, None, res, &tables.zid.clone()); + undeclare_router_subscription(tables, None, res, &tables.zid.clone(), send_declare); } else { - propagate_forget_simple_subscription_to_peers(tables, res); + propagate_forget_simple_subscription_to_peers(tables, res, send_declare); } if client_subs.len() == 1 && !router_subs && !peer_subs { let mut face = &mut client_subs[0]; if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(face) .local_subs @@ -657,19 +692,22 @@ pub(super) fn undeclare_client_subscription( }) }) { if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -681,16 +719,22 @@ fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, id: SubscriberId, + send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { - undeclare_client_subscription(tables, face, &mut res); + undeclare_client_subscription(tables, face, &mut res, send_declare); Some(res) } else { None } } -pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohIdProto, net_type: WhatAmI) { +pub(super) fn pubsub_remove_node( + tables: &mut Tables, + node: &ZenohIdProto, + net_type: WhatAmI, + send_declare: &mut SendDeclare, +) { match net_type { WhatAmI::Router => { for mut res in hat!(tables) @@ -700,7 +744,7 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohIdProto, net_t .cloned() .collect::>>() { - unregister_router_subscription(tables, &mut res, node); + unregister_router_subscription(tables, &mut res, node, send_declare); update_matches_data_routes(tables, &mut res); Resource::clean(&mut res) @@ -718,7 +762,13 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohIdProto, net_t let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); let peer_subs = remote_peer_subs(tables, &res); if !client_subs && !peer_subs { - undeclare_router_subscription(tables, None, &mut res, &tables.zid.clone()); + undeclare_router_subscription( + tables, + None, + &mut res, + &tables.zid.clone(), + send_declare, + ); } update_matches_data_routes(tables, &mut res); @@ -731,7 +781,7 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohIdProto, net_t pub(super) fn pubsub_tree_change( tables: &mut Tables, - new_childs: &[Vec], + new_children: &[Vec], net_type: WhatAmI, ) { let net = match hat!(tables).get_net(net_type) { @@ -741,9 +791,9 @@ pub(super) fn pubsub_tree_change( return; } }; - // propagate subs to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { + // propagate subs to new children + for (tree_sid, tree_children) in new_children.iter().enumerate() { + if !tree_children.is_empty() { let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; @@ -763,10 +813,10 @@ pub(super) fn pubsub_tree_change( let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; - send_sourced_subscription_to_net_childs( + send_sourced_subscription_to_net_children( tables, net, - tree_childs, + tree_children, res, None, &sub_info, @@ -787,6 +837,7 @@ pub(super) fn pubsub_linkstate_change( tables: &mut Tables, zid: &ZenohIdProto, links: &[ZenohIdProto], + send_declare: &mut SendDeclare, ) { if let Some(src_face) = tables.get_face(zid).cloned() { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { @@ -820,21 +871,24 @@ pub(super) fn pubsub_linkstate_change( }) }; if forget { - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber( - UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }, - ), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber( + UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }, + ), + }, + res.expr(), + ), + ); face_hat_mut!(dst_face).local_subs.remove(res); } @@ -846,20 +900,25 @@ pub(super) fn pubsub_linkstate_change( let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber( + DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: sub_info, + }, + ), + }, + res.expr(), + ), + ); } } } @@ -876,6 +935,7 @@ pub(crate) fn declare_sub_interest( res: Option<&mut Arc>, mode: InterestMode, aggregate: bool, + send_declare: &mut SendDeclare, ) { if mode.current() { let interest_id = (!mode.future()).then_some(id); @@ -899,20 +959,23 @@ pub(crate) fn declare_sub_interest( 0 }; let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + res.expr(), + ), + ); } } else { for sub in &hat!(tables).router_subs { @@ -938,20 +1001,23 @@ pub(crate) fn declare_sub_interest( 0 }; let wire_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + ), + ); } } } @@ -975,20 +1041,23 @@ pub(crate) fn declare_sub_interest( 0 }; let wire_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + ), + ); } } } @@ -1004,23 +1073,24 @@ impl HatPubSubTrait for HatCode { res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, + send_declare: &mut SendDeclare, ) { match face.whatami { WhatAmI::Router => { if let Some(router) = get_router(tables, face, node_id) { - declare_router_subscription(tables, face, res, sub_info, router) + declare_router_subscription(tables, face, res, sub_info, router, send_declare) } } WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_subscription(tables, face, res, sub_info, peer) + declare_peer_subscription(tables, face, res, sub_info, peer, send_declare) } } else { - declare_client_subscription(tables, face, id, res, sub_info) + declare_client_subscription(tables, face, id, res, sub_info, send_declare) } } - _ => declare_client_subscription(tables, face, id, res, sub_info), + _ => declare_client_subscription(tables, face, id, res, sub_info, send_declare), } } @@ -1031,12 +1101,13 @@ impl HatPubSubTrait for HatCode { id: SubscriberId, res: Option>, node_id: NodeId, + send_declare: &mut SendDeclare, ) -> Option> { match face.whatami { WhatAmI::Router => { if let Some(mut res) = res { if let Some(router) = get_router(tables, face, node_id) { - forget_router_subscription(tables, face, &mut res, &router); + forget_router_subscription(tables, face, &mut res, &router, send_declare); Some(res) } else { None @@ -1049,7 +1120,7 @@ impl HatPubSubTrait for HatCode { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(mut res) = res { if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_subscription(tables, face, &mut res, &peer); + forget_peer_subscription(tables, face, &mut res, &peer, send_declare); Some(res) } else { None @@ -1058,10 +1129,10 @@ impl HatPubSubTrait for HatCode { None } } else { - forget_client_subscription(tables, face, id) + forget_client_subscription(tables, face, id, send_declare) } } - _ => forget_client_subscription(tables, face, id), + _ => forget_client_subscription(tables, face, id, send_declare), } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 5a89757c46..93eceaa8f3 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -48,7 +48,7 @@ use crate::net::routing::{ resource::{NodeId, Resource, SessionContext}, tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, }, - hat::{CurrentFutureTrait, HatQueriesTrait, Sources}, + hat::{CurrentFutureTrait, HatQueriesTrait, SendDeclare, Sources}, router::RoutesIndexes, RoutingContext, }; @@ -188,16 +188,16 @@ fn local_qabl_info( } #[inline] -fn send_sourced_queryable_to_net_childs( +fn send_sourced_queryable_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -236,6 +236,7 @@ fn propagate_simple_queryable( tables: &mut Tables, res: &Arc, src_face: Option<&mut Arc>, + send_declare: &mut SendDeclare, ) { let full_peers_net = hat!(tables).full_net(WhatAmI::Peer); let faces = tables.faces.values().cloned(); @@ -272,20 +273,23 @@ fn propagate_simple_queryable( .local_qabls .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, &mut dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); } } } @@ -302,10 +306,10 @@ fn propagate_sourced_queryable( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_sourced_queryable_to_net_childs( + send_sourced_queryable_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, qabl_info, src_face, @@ -334,6 +338,7 @@ fn register_router_queryable( res: &mut Arc, qabl_info: &QueryableInfoType, router: ZenohIdProto, + send_declare: &mut SendDeclare, ) { let current_info = res_hat!(res).router_qabls.get(&router); if current_info.is_none() || current_info.unwrap() != qabl_info { @@ -363,7 +368,7 @@ fn register_router_queryable( } // Propagate queryable to clients - propagate_simple_queryable(tables, res, face); + propagate_simple_queryable(tables, res, face, send_declare); } fn declare_router_queryable( @@ -372,8 +377,9 @@ fn declare_router_queryable( res: &mut Arc, qabl_info: &QueryableInfoType, router: ZenohIdProto, + send_declare: &mut SendDeclare, ) { - register_router_queryable(tables, Some(face), res, qabl_info, router); + register_router_queryable(tables, Some(face), res, qabl_info, router, send_declare); } fn register_peer_queryable( @@ -402,12 +408,13 @@ fn declare_peer_queryable( res: &mut Arc, qabl_info: &QueryableInfoType, peer: ZenohIdProto, + send_declare: &mut SendDeclare, ) { let mut face = Some(face); register_peer_queryable(tables, face.as_deref_mut(), res, qabl_info, peer); let local_info = local_router_qabl_info(tables, res); let zid = tables.zid; - register_router_queryable(tables, face, res, &local_info, zid); + register_router_queryable(tables, face, res, &local_info, zid, send_declare); } fn register_client_queryable( @@ -436,11 +443,12 @@ fn declare_client_queryable( id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfoType, + send_declare: &mut SendDeclare, ) { register_client_queryable(tables, face, id, res, qabl_info); let local_details = local_router_qabl_info(tables, res); let zid = tables.zid; - register_router_queryable(tables, Some(face), res, &local_details, zid); + register_router_queryable(tables, Some(face), res, &local_details, zid, send_declare); } #[inline] @@ -483,15 +491,15 @@ fn remote_client_qabls(res: &Arc, face: &Arc) -> bool { } #[inline] -fn send_forget_sourced_queryable_to_net_childs( +fn send_forget_sourced_queryable_to_net_children( tables: &Tables, net: &Network, - childs: &[NodeIndex], + children: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: NodeId, ) { - for child in childs { + for child in children { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -524,22 +532,29 @@ fn send_forget_sourced_queryable_to_net_childs( } } -fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { +fn propagate_forget_simple_queryable( + tables: &mut Tables, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { for mut face in tables.faces.values().cloned() { if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(&mut face) .local_qabls @@ -556,26 +571,33 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { +fn propagate_forget_simple_queryable_to_peers( + tables: &mut Tables, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { if !hat!(tables).full_net(WhatAmI::Peer) && res_hat!(res).router_qabls.len() == 1 && res_hat!(res).router_qabls.contains_key(&tables.zid) @@ -597,19 +619,22 @@ fn propagate_forget_simple_queryable_to_peers(tables: &mut Tables, res: &mut Arc }) { if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -627,10 +652,10 @@ fn propagate_forget_sourced_queryable( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_forget_sourced_queryable_to_net_childs( + send_forget_sourced_queryable_to_net_children( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, tree_sid.index() as NodeId, @@ -656,6 +681,7 @@ fn unregister_router_queryable( tables: &mut Tables, res: &mut Arc, router: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { res_hat_mut!(res).router_qabls.remove(router); @@ -667,10 +693,10 @@ fn unregister_router_queryable( if hat!(tables).full_net(WhatAmI::Peer) { undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); } - propagate_forget_simple_queryable(tables, res); + propagate_forget_simple_queryable(tables, res, send_declare); } - propagate_forget_simple_queryable_to_peers(tables, res); + propagate_forget_simple_queryable_to_peers(tables, res, send_declare); } fn undeclare_router_queryable( @@ -678,9 +704,10 @@ fn undeclare_router_queryable( face: Option<&Arc>, res: &mut Arc, router: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { if res_hat!(res).router_qabls.contains_key(router) { - unregister_router_queryable(tables, res, router); + unregister_router_queryable(tables, res, router, send_declare); propagate_forget_sourced_queryable(tables, res, face, router, WhatAmI::Router); } } @@ -690,8 +717,9 @@ fn forget_router_queryable( face: &mut Arc, res: &mut Arc, router: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { - undeclare_router_queryable(tables, Some(face), res, router); + undeclare_router_queryable(tables, Some(face), res, router, send_declare); } fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { @@ -721,6 +749,7 @@ fn forget_peer_queryable( face: &mut Arc, res: &mut Arc, peer: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { undeclare_peer_queryable(tables, Some(face), res, peer); @@ -728,10 +757,10 @@ fn forget_peer_queryable( let peer_qabls = remote_peer_qabls(tables, res); let zid = tables.zid; if !client_qabls && !peer_qabls { - undeclare_router_queryable(tables, None, res, &zid); + undeclare_router_queryable(tables, None, res, &zid, send_declare); } else { let local_info = local_router_qabl_info(tables, res); - register_router_queryable(tables, None, res, &local_info, zid); + register_router_queryable(tables, None, res, &local_info, zid, send_declare); } } @@ -739,6 +768,7 @@ pub(super) fn undeclare_client_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { if !face_hat_mut!(face) .remote_qabls @@ -754,29 +784,32 @@ pub(super) fn undeclare_client_queryable( let peer_qabls = remote_peer_qabls(tables, res); if client_qabls.is_empty() && !peer_qabls { - undeclare_router_queryable(tables, None, res, &tables.zid.clone()); + undeclare_router_queryable(tables, None, res, &tables.zid.clone(), send_declare); } else { let local_info = local_router_qabl_info(tables, res); - register_router_queryable(tables, None, res, &local_info, tables.zid); - propagate_forget_simple_queryable_to_peers(tables, res); + register_router_queryable(tables, None, res, &local_info, tables.zid, send_declare); + propagate_forget_simple_queryable_to_peers(tables, res, send_declare); } if client_qabls.len() == 1 && !router_qabls && !peer_qabls { let mut face = &mut client_qabls[0]; if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(face) .local_qabls @@ -793,19 +826,22 @@ pub(super) fn undeclare_client_queryable( }) }) { if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -817,16 +853,22 @@ fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, id: QueryableId, + send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { - undeclare_client_queryable(tables, face, &mut res); + undeclare_client_queryable(tables, face, &mut res, send_declare); Some(res) } else { None } } -pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohIdProto, net_type: WhatAmI) { +pub(super) fn queries_remove_node( + tables: &mut Tables, + node: &ZenohIdProto, + net_type: WhatAmI, + send_declare: &mut SendDeclare, +) { match net_type { WhatAmI::Router => { let mut qabls = vec![]; @@ -838,7 +880,7 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohIdProto, net_ } } for mut res in qabls { - unregister_router_queryable(tables, &mut res, node); + unregister_router_queryable(tables, &mut res, node, send_declare); update_matches_query_routes(tables, &res); Resource::clean(&mut res); @@ -859,10 +901,23 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohIdProto, net_ let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); let peer_qabls = remote_peer_qabls(tables, &res); if !client_qabls && !peer_qabls { - undeclare_router_queryable(tables, None, &mut res, &tables.zid.clone()); + undeclare_router_queryable( + tables, + None, + &mut res, + &tables.zid.clone(), + send_declare, + ); } else { let local_info = local_router_qabl_info(tables, &res); - register_router_queryable(tables, None, &mut res, &local_info, tables.zid); + register_router_queryable( + tables, + None, + &mut res, + &local_info, + tables.zid, + send_declare, + ); } update_matches_query_routes(tables, &res); @@ -877,6 +932,7 @@ pub(super) fn queries_linkstate_change( tables: &mut Tables, zid: &ZenohIdProto, links: &[ZenohIdProto], + send_declare: &mut SendDeclare, ) { if let Some(src_face) = tables.get_face(zid) { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { @@ -911,21 +967,24 @@ pub(super) fn queries_linkstate_change( }) }; if forget { - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable( - UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }, - ), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable( + UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }, + ), + }, + res.expr(), + ), + ); face_hat_mut!(dst_face).local_qabls.remove(res); } @@ -937,20 +996,23 @@ pub(super) fn queries_linkstate_change( .local_qabls .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); } } } @@ -962,7 +1024,7 @@ pub(super) fn queries_linkstate_change( pub(super) fn queries_tree_change( tables: &mut Tables, - new_childs: &[Vec], + new_children: &[Vec], net_type: WhatAmI, ) { let net = match hat!(tables).get_net(net_type) { @@ -972,9 +1034,9 @@ pub(super) fn queries_tree_change( return; } }; - // propagate qabls to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { + // propagate qabls to new children + for (tree_sid, tree_children) in new_children.iter().enumerate() { + if !tree_children.is_empty() { let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; @@ -990,10 +1052,10 @@ pub(super) fn queries_tree_change( _ => &res_hat!(res).peer_qabls, }; if let Some(qabl_info) = qabls.get(&tree_id) { - send_sourced_queryable_to_net_childs( + send_sourced_queryable_to_net_children( tables, net, - tree_childs, + tree_children, res, qabl_info, None, @@ -1062,6 +1124,7 @@ pub(crate) fn declare_qabl_interest( res: Option<&mut Arc>, mode: InterestMode, aggregate: bool, + send_declare: &mut SendDeclare, ) { if mode.current() { let interest_id = (!mode.future()).then_some(id); @@ -1093,20 +1156,23 @@ pub(crate) fn declare_qabl_interest( 0 }; let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr, - ext_info: info, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); } } else { for qabl in hat!(tables).router_qabls.iter() { @@ -1132,20 +1198,23 @@ pub(crate) fn declare_qabl_interest( 0 }; let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + ), + ); } } } @@ -1167,20 +1236,23 @@ pub(crate) fn declare_qabl_interest( 0 }; let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + ), + ); } } } @@ -1196,23 +1268,24 @@ impl HatQueriesTrait for HatCode { res: &mut Arc, qabl_info: &QueryableInfoType, node_id: NodeId, + send_declare: &mut SendDeclare, ) { match face.whatami { WhatAmI::Router => { if let Some(router) = get_router(tables, face, node_id) { - declare_router_queryable(tables, face, res, qabl_info, router) + declare_router_queryable(tables, face, res, qabl_info, router, send_declare) } } WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_queryable(tables, face, res, qabl_info, peer) + declare_peer_queryable(tables, face, res, qabl_info, peer, send_declare) } } else { - declare_client_queryable(tables, face, id, res, qabl_info) + declare_client_queryable(tables, face, id, res, qabl_info, send_declare) } } - _ => declare_client_queryable(tables, face, id, res, qabl_info), + _ => declare_client_queryable(tables, face, id, res, qabl_info, send_declare), } } @@ -1223,12 +1296,13 @@ impl HatQueriesTrait for HatCode { id: QueryableId, res: Option>, node_id: NodeId, + send_declare: &mut SendDeclare, ) -> Option> { match face.whatami { WhatAmI::Router => { if let Some(mut res) = res { if let Some(router) = get_router(tables, face, node_id) { - forget_router_queryable(tables, face, &mut res, &router); + forget_router_queryable(tables, face, &mut res, &router, send_declare); Some(res) } else { None @@ -1241,7 +1315,7 @@ impl HatQueriesTrait for HatCode { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(mut res) = res { if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_queryable(tables, face, &mut res, &peer); + forget_peer_queryable(tables, face, &mut res, &peer, send_declare); Some(res) } else { None @@ -1250,10 +1324,10 @@ impl HatQueriesTrait for HatCode { None } } else { - forget_client_queryable(tables, face, id) + forget_client_queryable(tables, face, id, send_declare) } } - _ => forget_client_queryable(tables, face, id), + _ => forget_client_queryable(tables, face, id, send_declare), } } diff --git a/zenoh/src/net/routing/hat/router/token.rs b/zenoh/src/net/routing/hat/router/token.rs index 583a4dc336..c167c8df15 100644 --- a/zenoh/src/net/routing/hat/router/token.rs +++ b/zenoh/src/net/routing/hat/router/token.rs @@ -32,21 +32,21 @@ use super::{ }; use crate::net::routing::{ dispatcher::{face::FaceState, tables::Tables}, - hat::{CurrentFutureTrait, HatTokenTrait}, + hat::{CurrentFutureTrait, HatTokenTrait, SendDeclare}, router::{NodeId, Resource, SessionContext}, RoutingContext, }; #[inline] -fn send_sourced_token_to_net_childs( +fn send_sourced_token_to_net_clildren( tables: &Tables, net: &Network, - childs: &[NodeIndex], + clildren: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: NodeId, ) { - for child in childs { + for child in clildren { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -86,6 +86,7 @@ fn propagate_simple_token_to( res: &Arc, src_face: &mut Arc, full_peer_net: bool, + send_declare: &mut SendDeclare, ) { if (src_face.id != dst_face.id || dst_face.whatami == WhatAmI::Client) && !face_hat!(dst_face).local_tokens.contains_key(res) @@ -115,25 +116,33 @@ fn propagate_simple_token_to( let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { - id, - wire_expr: key_expr, - }), - }, - res.expr(), - )); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + ), + ); } } } } -fn propagate_simple_token(tables: &mut Tables, res: &Arc, src_face: &mut Arc) { +fn propagate_simple_token( + tables: &mut Tables, + res: &Arc, + src_face: &mut Arc, + send_declare: &mut SendDeclare, +) { let full_peer_net = hat!(tables).full_net(WhatAmI::Peer); for mut dst_face in tables .faces @@ -141,7 +150,14 @@ fn propagate_simple_token(tables: &mut Tables, res: &Arc, src_face: &m .cloned() .collect::>>() { - propagate_simple_token_to(tables, &mut dst_face, res, src_face, full_peer_net); + propagate_simple_token_to( + tables, + &mut dst_face, + res, + src_face, + full_peer_net, + send_declare, + ); } } @@ -156,10 +172,10 @@ fn propagate_sourced_token( match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { - send_sourced_token_to_net_childs( + send_sourced_token_to_net_clildren( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, tree_sid.index() as NodeId, @@ -186,6 +202,7 @@ fn register_router_token( face: &mut Arc, res: &mut Arc, router: ZenohIdProto, + send_declare: &mut SendDeclare, ) { if !res_hat!(res).router_tokens.contains(&router) { // Register router liveliness @@ -203,7 +220,7 @@ fn register_router_token( } // Propagate liveliness to clients - propagate_simple_token(tables, res, face); + propagate_simple_token(tables, res, face, send_declare); } fn declare_router_token( @@ -211,8 +228,9 @@ fn declare_router_token( face: &mut Arc, res: &mut Arc, router: ZenohIdProto, + send_declare: &mut SendDeclare, ) { - register_router_token(tables, face, res, router); + register_router_token(tables, face, res, router, send_declare); } fn register_peer_token( @@ -238,10 +256,11 @@ fn declare_peer_token( face: &mut Arc, res: &mut Arc, peer: ZenohIdProto, + send_declare: &mut SendDeclare, ) { register_peer_token(tables, face, res, peer); let zid = tables.zid; - register_router_token(tables, face, res, zid); + register_router_token(tables, face, res, zid, send_declare); } fn register_client_token( @@ -276,10 +295,11 @@ fn declare_client_token( face: &mut Arc, id: TokenId, res: &mut Arc, + send_declare: &mut SendDeclare, ) { register_client_token(tables, face, id, res); let zid = tables.zid; - register_router_token(tables, face, res, zid); + register_router_token(tables, face, res, zid, send_declare); } #[inline] @@ -322,15 +342,15 @@ fn remote_client_tokens(res: &Arc, face: &Arc) -> bool { } #[inline] -fn send_forget_sourced_token_to_net_childs( +fn send_forget_sourced_token_to_net_clildren( tables: &Tables, net: &Network, - childs: &[NodeIndex], + clildren: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, routing_context: Option, ) { - for child in childs { + for child in clildren { if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { @@ -363,42 +383,52 @@ fn send_forget_sourced_token_to_net_childs( } } -fn propagate_forget_simple_token(tables: &mut Tables, res: &Arc) { +fn propagate_forget_simple_token( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { for mut face in tables.faces.values().cloned() { if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } else if face_hat!(face).remote_interests.values().any(|(r, o)| { o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true) && !o.aggregate() }) { // Token has never been declared on this face. // Send an Undeclare with a one shot generated id and a WireExpr ext. - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), - ext_wire_expr: WireExprType { - wire_expr: Resource::get_best_key(res, "", face.id), - }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), + ext_wire_expr: WireExprType { + wire_expr: Resource::get_best_key(res, "", face.id), + }, + }), + }, + res.expr(), + ), + ); } for res in face_hat!(&mut face) .local_tokens @@ -415,19 +445,22 @@ fn propagate_forget_simple_token(tables: &mut Tables, res: &Arc) { }) }) { if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } else if face_hat!(face).remote_interests.values().any(|(r, o)| { o.tokens() && r.as_ref().map(|r| r.matches(&res)).unwrap_or(true) @@ -435,28 +468,35 @@ fn propagate_forget_simple_token(tables: &mut Tables, res: &Arc) { }) { // Token has never been declared on this face. // Send an Undeclare with a one shot generated id and a WireExpr ext. - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), - ext_wire_expr: WireExprType { - wire_expr: Resource::get_best_key(&res, "", face.id), - }, - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), + ext_wire_expr: WireExprType { + wire_expr: Resource::get_best_key(&res, "", face.id), + }, + }), + }, + res.expr(), + ), + ); } } } } } -fn propagate_forget_simple_token_to_peers(tables: &mut Tables, res: &Arc) { +fn propagate_forget_simple_token_to_peers( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { if !hat!(tables).full_net(WhatAmI::Peer) && res_hat!(res).router_tokens.len() == 1 && res_hat!(res).router_tokens.contains(&tables.zid) @@ -478,19 +518,22 @@ fn propagate_forget_simple_token_to_peers(tables: &mut Tables, res: &Arc { if net.trees.len() > tree_sid.index() { - send_forget_sourced_token_to_net_childs( + send_forget_sourced_token_to_net_clildren( tables, net, - &net.trees[tree_sid.index()].childs, + &net.trees[tree_sid.index()].children, res, src_face, Some(tree_sid.index() as NodeId), @@ -533,7 +576,12 @@ fn propagate_forget_sourced_token( } } -fn unregister_router_token(tables: &mut Tables, res: &mut Arc, router: &ZenohIdProto) { +fn unregister_router_token( + tables: &mut Tables, + res: &mut Arc, + router: &ZenohIdProto, + send_declare: &mut SendDeclare, +) { res_hat_mut!(res) .router_tokens .retain(|token| token != router); @@ -546,10 +594,10 @@ fn unregister_router_token(tables: &mut Tables, res: &mut Arc, router: if hat_mut!(tables).full_net(WhatAmI::Peer) { undeclare_peer_token(tables, None, res, &tables.zid.clone()); } - propagate_forget_simple_token(tables, res); + propagate_forget_simple_token(tables, res, send_declare); } - propagate_forget_simple_token_to_peers(tables, res); + propagate_forget_simple_token_to_peers(tables, res, send_declare); } fn undeclare_router_token( @@ -557,9 +605,10 @@ fn undeclare_router_token( face: Option<&Arc>, res: &mut Arc, router: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { if res_hat!(res).router_tokens.contains(router) { - unregister_router_token(tables, res, router); + unregister_router_token(tables, res, router, send_declare); propagate_forget_sourced_token(tables, res, face, router, WhatAmI::Router); } } @@ -569,8 +618,9 @@ fn forget_router_token( face: &mut Arc, res: &mut Arc, router: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { - undeclare_router_token(tables, Some(face), res, router); + undeclare_router_token(tables, Some(face), res, router, send_declare); } fn unregister_peer_token(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { @@ -600,13 +650,14 @@ fn forget_peer_token( face: &mut Arc, res: &mut Arc, peer: &ZenohIdProto, + send_declare: &mut SendDeclare, ) { undeclare_peer_token(tables, Some(face), res, peer); let client_tokens = res.session_ctxs.values().any(|ctx| ctx.token); let peer_tokens = remote_peer_tokens(tables, res); let zid = tables.zid; if !client_tokens && !peer_tokens { - undeclare_router_token(tables, None, res, &zid); + undeclare_router_token(tables, None, res, &zid, send_declare); } } @@ -614,6 +665,7 @@ pub(super) fn undeclare_client_token( tables: &mut Tables, face: &mut Arc, res: &mut Arc, + send_declare: &mut SendDeclare, ) { if !face_hat_mut!(face) .remote_tokens @@ -628,28 +680,31 @@ pub(super) fn undeclare_client_token( let router_tokens = remote_router_tokens(tables, res); let peer_tokens = remote_peer_tokens(tables, res); if client_tokens.is_empty() && !peer_tokens { - undeclare_router_token(tables, None, res, &tables.zid.clone()); + undeclare_router_token(tables, None, res, &tables.zid.clone(), send_declare); } else { - propagate_forget_simple_token_to_peers(tables, res); + propagate_forget_simple_token_to_peers(tables, res, send_declare); } if client_tokens.len() == 1 && !router_tokens && !peer_tokens { let mut face = &mut client_tokens[0]; if face.whatami != WhatAmI::Client { if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } for res in face_hat!(face) .local_tokens @@ -666,19 +721,22 @@ pub(super) fn undeclare_client_token( }) }) { if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); } } } @@ -691,16 +749,22 @@ fn forget_client_token( tables: &mut Tables, face: &mut Arc, id: TokenId, + send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { - undeclare_client_token(tables, face, &mut res); + undeclare_client_token(tables, face, &mut res, send_declare); Some(res) } else { None } } -pub(super) fn token_remove_node(tables: &mut Tables, node: &ZenohIdProto, net_type: WhatAmI) { +pub(super) fn token_remove_node( + tables: &mut Tables, + node: &ZenohIdProto, + net_type: WhatAmI, + send_declare: &mut SendDeclare, +) { match net_type { WhatAmI::Router => { for mut res in hat!(tables) @@ -710,7 +774,7 @@ pub(super) fn token_remove_node(tables: &mut Tables, node: &ZenohIdProto, net_ty .cloned() .collect::>>() { - unregister_router_token(tables, &mut res, node); + unregister_router_token(tables, &mut res, node, send_declare); Resource::clean(&mut res) } } @@ -726,7 +790,13 @@ pub(super) fn token_remove_node(tables: &mut Tables, node: &ZenohIdProto, net_ty let client_tokens = res.session_ctxs.values().any(|ctx| ctx.token); let peer_tokens = remote_peer_tokens(tables, &res); if !client_tokens && !peer_tokens { - undeclare_router_token(tables, None, &mut res, &tables.zid.clone()); + undeclare_router_token( + tables, + None, + &mut res, + &tables.zid.clone(), + send_declare, + ); } Resource::clean(&mut res) } @@ -737,7 +807,7 @@ pub(super) fn token_remove_node(tables: &mut Tables, node: &ZenohIdProto, net_ty pub(super) fn token_tree_change( tables: &mut Tables, - new_childs: &[Vec], + new_clildren: &[Vec], net_type: WhatAmI, ) { let net = match hat!(tables).get_net(net_type) { @@ -747,9 +817,9 @@ pub(super) fn token_tree_change( return; } }; - // propagate tokens to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { + // propagate tokens to new clildren + for (tree_sid, tree_clildren) in new_clildren.iter().enumerate() { + if !tree_clildren.is_empty() { let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; @@ -766,10 +836,10 @@ pub(super) fn token_tree_change( }; for token in tokens { if *token == tree_id { - send_sourced_token_to_net_childs( + send_sourced_token_to_net_clildren( tables, net, - tree_childs, + tree_clildren, res, None, tree_sid as NodeId, @@ -786,6 +856,7 @@ pub(super) fn token_linkstate_change( tables: &mut Tables, zid: &ZenohIdProto, links: &[ZenohIdProto], + send_declare: &mut SendDeclare, ) { if let Some(src_face) = tables.get_face(zid).cloned() { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { @@ -819,40 +890,46 @@ pub(super) fn token_linkstate_change( }) }; if forget { - dst_face.primitives.send_declare(RoutingContext::with_expr( + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + + face_hat_mut!(dst_face).local_tokens.remove(res); + } + } else if HatTables::failover_brokering_to(links, ctx.face.zid) { + let dst_face = &mut get_mut_unchecked(ctx).face; + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( Declare { interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { + body: DeclareBody::DeclareToken(DeclareToken { id, - ext_wire_expr: WireExprType::null(), + wire_expr: key_expr, }), }, res.expr(), - )); - - face_hat_mut!(dst_face).local_tokens.remove(res); - } - } else if HatTables::failover_brokering_to(links, ctx.face.zid) { - let dst_face = &mut get_mut_unchecked(ctx).face; - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { - id, - wire_expr: key_expr, - }), - }, - res.expr(), - )); + ), + ); } } } @@ -869,6 +946,7 @@ pub(crate) fn declare_token_interest( res: Option<&mut Arc>, mode: InterestMode, aggregate: bool, + send_declare: &mut SendDeclare, ) { if mode.current() && face.whatami == WhatAmI::Client { let interest_id = (!mode.future()).then_some(id); @@ -889,16 +967,19 @@ pub(crate) fn declare_token_interest( 0 }; let wire_expr = Resource::decl_key(res, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), - }, - res.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + res.expr(), + ), + ); } } else { for token in &hat!(tables).router_tokens { @@ -927,16 +1008,19 @@ pub(crate) fn declare_token_interest( 0 }; let wire_expr = Resource::decl_key(token, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), - }, - token.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + ), + ); } } } @@ -963,16 +1047,19 @@ pub(crate) fn declare_token_interest( 0 }; let wire_expr = Resource::decl_key(token, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), - }, - token.expr(), - )); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + ), + ); } } } @@ -988,23 +1075,24 @@ impl HatTokenTrait for HatCode { res: &mut Arc, node_id: NodeId, _interest_id: Option, + send_declare: &mut SendDeclare, ) { match face.whatami { WhatAmI::Router => { if let Some(router) = get_router(tables, face, node_id) { - declare_router_token(tables, face, res, router) + declare_router_token(tables, face, res, router, send_declare) } } WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_token(tables, face, res, peer) + declare_peer_token(tables, face, res, peer, send_declare) } } else { - declare_client_token(tables, face, id, res) + declare_client_token(tables, face, id, res, send_declare) } } - _ => declare_client_token(tables, face, id, res), + _ => declare_client_token(tables, face, id, res, send_declare), } } @@ -1015,12 +1103,13 @@ impl HatTokenTrait for HatCode { id: TokenId, res: Option>, node_id: NodeId, + send_declare: &mut SendDeclare, ) -> Option> { match face.whatami { WhatAmI::Router => { if let Some(mut res) = res { if let Some(router) = get_router(tables, face, node_id) { - forget_router_token(tables, face, &mut res, &router); + forget_router_token(tables, face, &mut res, &router, send_declare); Some(res) } else { None @@ -1033,7 +1122,7 @@ impl HatTokenTrait for HatCode { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(mut res) = res { if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_token(tables, face, &mut res, &peer); + forget_peer_token(tables, face, &mut res, &peer, send_declare); Some(res) } else { None @@ -1042,10 +1131,10 @@ impl HatTokenTrait for HatCode { None } } else { - forget_client_token(tables, face, id) + forget_client_token(tables, face, id, send_declare) } } - _ => forget_client_token(tables, face, id), + _ => forget_client_token(tables, face, id, send_declare), } } } diff --git a/zenoh/src/net/routing/interceptor/downsampling.rs b/zenoh/src/net/routing/interceptor/downsampling.rs index 06e86ec3ce..1b31040d3c 100644 --- a/zenoh/src/net/routing/interceptor/downsampling.rs +++ b/zenoh/src/net/routing/interceptor/downsampling.rs @@ -153,11 +153,11 @@ impl InterceptorTrait for DownsamplingInterceptor { return None; } } else { - tracing::debug!("unxpected cache ID {}", id); + tracing::debug!("unexpected cache ID {}", id); } } } else { - tracing::debug!("unxpected cache type {:?}", ctx.full_expr()); + tracing::debug!("unexpected cache type {:?}", ctx.full_expr()); } } } diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index f13d1a7b95..cd525189d3 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -100,11 +100,17 @@ impl Router { tables: self.tables.clone(), state: newface, }; + let mut declares = vec![]; ctrl_lock - .new_local_face(&mut tables, &self.tables, &mut face) + .new_local_face(&mut tables, &self.tables, &mut face, &mut |p, m| { + declares.push((p.clone(), m)) + }) .unwrap(); drop(tables); drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } Arc::new(face) } @@ -156,7 +162,19 @@ impl Router { let _ = mux.face.set(Face::downgrade(&face)); - ctrl_lock.new_transport_unicast_face(&mut tables, &self.tables, &mut face, &transport)?; + let mut declares = vec![]; + ctrl_lock.new_transport_unicast_face( + &mut tables, + &self.tables, + &mut face, + &transport, + &mut |p, m| declares.push((p.clone(), m)), + )?; + drop(tables); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } Ok(Arc::new(DeMux::new(face, Some(transport), ingress))) } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 342423c44d..1e5c7e499e 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -647,7 +647,7 @@ fn metrics(context: &AdminContext, query: Query) { .unwrap(); #[allow(unused_mut)] let mut metrics = format!( - r#"# HELP zenoh_build Informations about zenoh. + r#"# HELP zenoh_build Information about zenoh. # TYPE zenoh_build gauge zenoh_build{{version="{}"}} 1 "#, diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index a89ec94d61..4f3c6974f7 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -413,7 +413,7 @@ impl TransportEventHandler for RuntimeTransportEventHandler { .state .router .new_transport_multicast(transport.clone())?; - Ok(Arc::new(RuntimeMuticastGroup { + Ok(Arc::new(RuntimeMulticastGroup { runtime: runtime.clone(), transport, slave_handlers, @@ -470,20 +470,20 @@ impl TransportPeerEventHandler for RuntimeSession { } } -pub(super) struct RuntimeMuticastGroup { +pub(super) struct RuntimeMulticastGroup { pub(super) runtime: Runtime, pub(super) transport: TransportMulticast, pub(super) slave_handlers: Vec>, } -impl TransportMulticastEventHandler for RuntimeMuticastGroup { +impl TransportMulticastEventHandler for RuntimeMulticastGroup { fn new_peer(&self, peer: TransportPeer) -> ZResult> { let slave_handlers: Vec> = self .slave_handlers .iter() .filter_map(|handler| handler.new_peer(peer.clone()).ok()) .collect(); - Ok(Arc::new(RuntimeMuticastSession { + Ok(Arc::new(RuntimeMulticastSession { main_handler: self .runtime .state @@ -510,12 +510,12 @@ impl TransportMulticastEventHandler for RuntimeMuticastGroup { } } -pub(super) struct RuntimeMuticastSession { +pub(super) struct RuntimeMulticastSession { pub(super) main_handler: Arc, pub(super) slave_handlers: Vec>, } -impl TransportPeerEventHandler for RuntimeMuticastSession { +impl TransportPeerEventHandler for RuntimeMulticastSession { fn handle_message(&self, msg: NetworkMessage) -> ZResult<()> { self.main_handler.handle_message(msg) } diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index 73a0355199..da7739e3be 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -158,7 +158,7 @@ impl Runtime { } } } else { - bail!("No peer specified and multicast scouting desactivated!") + bail!("No peer specified and multicast scouting deactivated!") } } _ => self.connect_peers(&peers, true).await, @@ -423,10 +423,10 @@ impl Runtime { pub(crate) async fn update_peers(&self) -> ZResult<()> { let peers = { self.state.config.lock().connect().endpoints().clone() }; - let tranports = self.manager().get_transports_unicast().await; + let transports = self.manager().get_transports_unicast().await; if self.state.whatami == WhatAmI::Client { - for transport in tranports { + for transport in transports { let should_close = if let Ok(Some(orch_transport)) = transport.get_callback() { if let Some(orch_transport) = orch_transport .as_any() @@ -449,7 +449,7 @@ impl Runtime { } } else { for peer in peers { - if !tranports.iter().any(|transport| { + if !transports.iter().any(|transport| { if let Ok(Some(orch_transport)) = transport.get_callback() { if let Some(orch_transport) = orch_transport .as_any() diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index de0b691b04..8ef9294edc 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -79,6 +79,7 @@ fn base_test() { &WireExpr::from(1).with_suffix("four/five"), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); Tables::print(&zread!(tables.tables)); @@ -204,6 +205,7 @@ fn multisub_test() { &"sub".into(), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); let optres = Resource::get_resource(zread!(tables.tables)._get_root(), "sub") .map(|res| Arc::downgrade(&res)); @@ -219,6 +221,7 @@ fn multisub_test() { &"sub".into(), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); assert!(res.upgrade().is_some()); @@ -229,6 +232,7 @@ fn multisub_test() { 0, &WireExpr::empty(), NodeId::default(), + &mut |p, m| p.send_declare(m), ); assert!(res.upgrade().is_some()); @@ -239,6 +243,7 @@ fn multisub_test() { 1, &WireExpr::empty(), NodeId::default(), + &mut |p, m| p.send_declare(m), ); assert!(res.upgrade().is_none()); @@ -323,6 +328,7 @@ async fn clean_test() { &"todrop1/todrop11".into(), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); let optres2 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop1/todrop11") .map(|res| Arc::downgrade(&res)); @@ -338,6 +344,7 @@ async fn clean_test() { &WireExpr::from(1).with_suffix("/todrop12"), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); let optres3 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop1/todrop12") .map(|res| Arc::downgrade(&res)); @@ -353,6 +360,7 @@ async fn clean_test() { 1, &WireExpr::empty(), NodeId::default(), + &mut |p, m| p.send_declare(m), ); println!("COUNT2: {}", res3.strong_count()); @@ -368,6 +376,7 @@ async fn clean_test() { 0, &WireExpr::empty(), NodeId::default(), + &mut |p, m| p.send_declare(m), ); assert!(res1.upgrade().is_some()); assert!(res2.upgrade().is_none()); @@ -388,6 +397,7 @@ async fn clean_test() { &"todrop3".into(), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); let optres1 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop3") .map(|res| Arc::downgrade(&res)); @@ -402,6 +412,7 @@ async fn clean_test() { 2, &WireExpr::empty(), NodeId::default(), + &mut |p, m| p.send_declare(m), ); assert!(res1.upgrade().is_some()); @@ -419,6 +430,7 @@ async fn clean_test() { &"todrop5".into(), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); declare_subscription( zlock!(tables.ctrl_lock).as_ref(), @@ -428,6 +440,7 @@ async fn clean_test() { &"todrop6".into(), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); let optres1 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop4") @@ -610,6 +623,7 @@ fn client_test() { &WireExpr::from(11).with_suffix("/**"), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); register_expr( &tables, @@ -660,6 +674,7 @@ fn client_test() { &WireExpr::from(21).with_suffix("/**"), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); register_expr( &tables, @@ -710,6 +725,7 @@ fn client_test() { &WireExpr::from(31).with_suffix("/**"), &sub_info, NodeId::default(), + &mut |p, m| p.send_declare(m), ); primitives0.clear_data(); @@ -735,13 +751,13 @@ fn client_test() { 0, ); - // functionnal check + // functional check assert!(primitives1.get_last_name().is_some()); assert_eq!(primitives1.get_last_name().unwrap(), "test/client/z1_wr1"); // mapping strategy check // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(21, "/z1_wr1".to_string())); - // functionnal check + // functional check assert!(primitives2.get_last_name().is_some()); assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z1_wr1"); // mapping strategy check @@ -769,13 +785,13 @@ fn client_test() { 0, ); - // functionnal check + // functional check assert!(primitives1.get_last_name().is_some()); assert_eq!(primitives1.get_last_name().unwrap(), "test/client/z1_wr2"); // mapping strategy check // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(21, "/z1_wr2".to_string())); - // functionnal check + // functional check assert!(primitives2.get_last_name().is_some()); assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z1_wr2"); // mapping strategy check @@ -803,13 +819,13 @@ fn client_test() { 0, ); - // functionnal check + // functional check assert!(primitives0.get_last_name().is_some()); assert_eq!(primitives0.get_last_name().unwrap(), "test/client/**"); // mapping strategy check // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(11, "/**".to_string())); - // functionnal check + // functional check assert!(primitives2.get_last_name().is_some()); assert_eq!(primitives2.get_last_name().unwrap(), "test/client/**"); // mapping strategy check @@ -837,13 +853,13 @@ fn client_test() { 0, ); - // functionnal check + // functional check assert!(primitives1.get_last_name().is_some()); assert_eq!(primitives1.get_last_name().unwrap(), "test/client/z1_pub1"); // mapping strategy check // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(21, "/z1_pub1".to_string())); - // functionnal check + // functional check assert!(primitives2.get_last_name().is_some()); assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z1_pub1"); // mapping strategy check @@ -871,13 +887,13 @@ fn client_test() { 0, ); - // functionnal check + // functional check assert!(primitives0.get_last_name().is_some()); assert_eq!(primitives0.get_last_name().unwrap(), "test/client/z2_pub1"); // mapping strategy check // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(11, "/z2_pub1".to_string())); - // functionnal check + // functional check assert!(primitives2.get_last_name().is_some()); assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z2_pub1"); // mapping strategy check diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index b632434c08..7f61f459d6 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -322,7 +322,7 @@ impl Recipe { // node_task_tracker.close(); // node_task_tracker.wait().await; - // Close the session once all the task assoicated with the node are done. + // Close the session once all the task associated with the node are done. ztimeout!(Arc::try_unwrap(session).unwrap().close())?; println!("Node: {} is closed.", &node.name); diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 123f6fc656..bf7f4841a1 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -129,7 +129,7 @@ fn config_from_args(args: &Args) -> Config { if let Some(id) = &args.id { config.set_id(id.parse().unwrap()).unwrap(); } - // apply '--rest-http-port' to config only if explicitly set (overwritting config), + // apply '--rest-http-port' to config only if explicitly set (overwriting config), // or if no config file is set (to apply its default value) if args.rest_http_port.is_some() || args.config.is_none() { let value = args.rest_http_port.as_deref().unwrap_or("8000"); From 5bce0ea5a4e99a0f07fe198e755322dd26393cca Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 19 Jun 2024 11:44:51 +0200 Subject: [PATCH 484/598] Disable downsampling_by_keyexpr on windows (#1170) --- zenoh/tests/interceptors.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 3ee2c51828..48a3f1b3e8 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -180,6 +180,7 @@ fn downsampling_by_keyexpr_impl(flow: InterceptorFlow) { downsampling_test(pub_config, sub_config, ke_prefix, ke_of_rates, rate_check); } +#[cfg(unix)] #[test] fn downsampling_by_keyexpr() { zenoh::try_init_log_from_env(); From 39ea5e50eecede3c61da0afdcdc1d489e5383efd Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 19 Jun 2024 14:40:37 +0200 Subject: [PATCH 485/598] Fix lints check --- zenoh/tests/interceptors.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 48a3f1b3e8..7b82b23814 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -11,6 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // +#![cfg(unix)] + use std::{ collections::HashMap, sync::{ @@ -180,7 +182,6 @@ fn downsampling_by_keyexpr_impl(flow: InterceptorFlow) { downsampling_test(pub_config, sub_config, ke_prefix, ke_of_rates, rate_check); } -#[cfg(unix)] #[test] fn downsampling_by_keyexpr() { zenoh::try_init_log_from_env(); From 5a893440b139b1413037b02d045b33a72211ba19 Mon Sep 17 00:00:00 2001 From: Charles Schleich Date: Thu, 20 Jun 2024 00:04:08 +0200 Subject: [PATCH 486/598] Add new_timestamp --- zenoh/src/api/session.rs | 28 +++++++++++++++++++++++++--- zenoh/src/api/time.rs | 1 + 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index e874cd2393..81444ccbaa 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -21,11 +21,11 @@ use std::{ atomic::{AtomicU16, Ordering}, Arc, RwLock, }, - time::Duration, + time::{Duration, SystemTime, UNIX_EPOCH}, }; use tracing::{error, trace, warn}; -use uhlc::HLC; +use uhlc::{Timestamp, HLC}; use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; use zenoh_config::{unwrap_or_default, wrappers::ZenohId, Config, Notifier}; @@ -630,7 +630,7 @@ impl Session { /// The returned configuration [`Notifier`](Notifier) can be used to read the current /// zenoh configuration through the `get` function or /// modify the zenoh configuration through the `insert`, - /// or `insert_json5` funtion. + /// or `insert_json5` function. /// /// # Examples /// ### Read current zenoh configuration @@ -657,6 +657,28 @@ impl Session { pub fn config(&self) -> &Notifier { self.runtime.config() } + + /// Get a new Timestamp from a Zenoh session [`Session`](Session). + /// + /// The returned timestamp has the current time, with the Session's runtime ZenohID + /// + /// # Examples + /// ### Read current zenoh configuration + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::*; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let timestamp = session.new_timestamp(); + /// # } + /// ``` + pub fn new_timestamp(&self) -> Timestamp { + let id = self.runtime.zid(); + // TODO: Should we make this an Result return type ? + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().into(); + Timestamp::new(now, id.into()) + } } impl<'a> SessionDeclarations<'a, 'a> for Session { diff --git a/zenoh/src/api/time.rs b/zenoh/src/api/time.rs index a617c2004c..1879143389 100644 --- a/zenoh/src/api/time.rs +++ b/zenoh/src/api/time.rs @@ -15,6 +15,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use zenoh_protocol::core::{Timestamp, TimestampId}; +// TODO: Shall we remove this new_timestamp in favoir of the src/api/session::Session::new_timestamp(); /// Generates a [`Timestamp`] with [`TimestampId`] and current system time /// The [`TimestampId`] can be taken from session id returned by [`SessionInfo::zid()`](crate::api::info::SessionInfo::zid). pub fn new_timestamp>(id: T) -> Timestamp { From e478df66f08b2ac09db13666231f66a6521d53e3 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 20 Jun 2024 09:50:58 +0200 Subject: [PATCH 487/598] Clarify queue size config (#1171) * Clarigy queue size config * Clarigy queue size config * Fix typo --- DEFAULT_CONFIG.json5 | 1 + 1 file changed, 1 insertion(+) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 7a14eba2bd..0e180a0e07 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -287,6 +287,7 @@ /// Each zenoh link has a transmission queue that can be configured queue: { /// The size of each priority queue indicates the number of batches a given queue can contain. + /// NOTE: the number of batches in each priority must be included between 1 and 16. Different values will result in an error. /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. From 6ce0aadfa34baf6a7a1b75c23044ae8e5cc17d5b Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Thu, 20 Jun 2024 15:11:55 +0200 Subject: [PATCH 488/598] Remove `zenoh-shm` dev dependency on `zenoh-shm test` (#1177) * Remove `zenoh-shm` dev dependency on `zenoh-shm test` * Enable `test` feature in CI --- .github/workflows/ci.yml | 12 ++++++------ Cargo.lock | 1 - commons/zenoh-shm/Cargo.toml | 3 +-- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 35250bdec5..a62257446e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -67,10 +67,10 @@ jobs: run: cargo +stable clippy -p zenoh --all-targets --features unstable,shared-memory -- --deny warnings - name: Clippy workspace - run: cargo +stable clippy --all-targets -- --deny warnings + run: cargo +stable clippy --all-targets --features test -- --deny warnings - name: Clippy workspace unstable - run: cargo +stable clippy --all-targets --features unstable -- --deny warnings + run: cargo +stable clippy --all-targets --features unstable,test -- --deny warnings - name: Clippy all features if: ${{ matrix.os == 'ubuntu-latest' || matrix.os == 'macOS-latest' }} @@ -118,15 +118,15 @@ jobs: uses: taiki-e/install-action@nextest - name: Run tests - run: cargo nextest run --exclude zenoh-examples --exclude zenoh-plugin-example --workspace + run: cargo nextest run -F test --exclude zenoh-examples --exclude zenoh-plugin-example --workspace - name: Run tests with SHM if: ${{ matrix.os == 'macOS-latest' || matrix.os == 'windows-latest' }} - run: cargo nextest run -F shared-memory -F unstable -E 'not (test(test_default_features))' --exclude zenoh-examples --exclude zenoh-plugin-example --workspace + run: cargo nextest run -F test -F shared-memory -F unstable -E 'not (test(test_default_features))' --exclude zenoh-examples --exclude zenoh-plugin-example --workspace - name: Run tests with SHM + unixpipe if: ${{ matrix.os == 'ubuntu-latest' }} - run: cargo nextest run -F shared-memory -F unstable -F transport_unixpipe -E 'not (test(test_default_features))' --exclude zenoh-examples --exclude zenoh-plugin-example --workspace + run: cargo nextest run -F test -F shared-memory -F unstable -F transport_unixpipe -E 'not (test(test_default_features))' --exclude zenoh-examples --exclude zenoh-plugin-example --workspace - name: Check for feature leaks if: ${{ matrix.os == 'ubuntu-latest' }} @@ -156,7 +156,7 @@ jobs: name: Typos Check runs-on: ubuntu-latest steps: - - name: Clone this repository + - name: Clone this repository uses: actions/checkout@v4 - name: Check spelling diff --git a/Cargo.lock b/Cargo.lock index e794fd289c..b8a03280c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5950,7 +5950,6 @@ dependencies = [ "zenoh-core", "zenoh-macros", "zenoh-result", - "zenoh-shm", ] [[package]] diff --git a/commons/zenoh-shm/Cargo.toml b/commons/zenoh-shm/Cargo.toml index 60b9acde1d..a76cf896d3 100644 --- a/commons/zenoh-shm/Cargo.toml +++ b/commons/zenoh-shm/Cargo.toml @@ -35,7 +35,7 @@ test = ["num_cpus"] async-trait = { workspace = true } bincode = { workspace = true } crc = { workspace = true } -tracing = {workspace = true} +tracing = { workspace = true } serde = { workspace = true, features = ["default"] } shared_memory = { workspace = true } tokio = { workspace = true } @@ -52,5 +52,4 @@ lockfree = { workspace = true } stabby = { workspace = true } [dev-dependencies] -zenoh-shm = { workspace = true, features = ["test"] } libc = { workspace = true } From 3799dc11d4798c8a746ddbeae046eff30fa77ed6 Mon Sep 17 00:00:00 2001 From: Charles Schleich Date: Thu, 20 Jun 2024 16:18:38 +0200 Subject: [PATCH 489/598] Comment on unwrap being permissable --- zenoh/src/api/session.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 81444ccbaa..ff6d2daef0 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -675,8 +675,7 @@ impl Session { /// ``` pub fn new_timestamp(&self) -> Timestamp { let id = self.runtime.zid(); - // TODO: Should we make this an Result return type ? - let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().into(); + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().into(); // UNIX_EPOCH is Returns a Timespec::zero(), Unwrap Should be permissable here Timestamp::new(now, id.into()) } } From db6e0bfc8ffb368585fc7038ee4c3611a691adb5 Mon Sep 17 00:00:00 2001 From: Charles Schleich Date: Thu, 20 Jun 2024 16:36:49 +0200 Subject: [PATCH 490/598] Value fields public --- zenoh/src/api/value.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/api/value.rs b/zenoh/src/api/value.rs index 006767e427..88470b3360 100644 --- a/zenoh/src/api/value.rs +++ b/zenoh/src/api/value.rs @@ -19,8 +19,8 @@ use super::{bytes::ZBytes, encoding::Encoding}; #[non_exhaustive] #[derive(Clone, Debug, PartialEq, Eq)] pub struct Value { - pub(crate) payload: ZBytes, - pub(crate) encoding: Encoding, + pub payload: ZBytes, + pub encoding: Encoding, } impl Value { From 755cdc21830742c5f5280197ef272b72a4ee5280 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 25 Jun 2024 09:58:47 +0200 Subject: [PATCH 491/598] Improve bytes (#1174) * Use generic associated types for ZBytes * Return proper error for serialize/deserialize * Relax static lifetime requirements * Improve docs * Refine trait bounds * ZSerde support for HashMap * Leverage writer for from_iter * ZSerde support for (A, B, C) * ZSerde support for u128 and i128 * ZSerde support for (A, B, C, D) * Rework tests * Improve Encoding doc * Improve docs * Improve docs * Rename ZReadOrDeserializeError * Add ZBytes char support * Add ZBytes char support * Improve docs * Cargo fmt * Add ZENOH_INT/ZENOU_UINT{8,16,32,64,128} Encoding * Use EncodingId instead of u16 --- zenoh/src/api/bytes.rs | 1301 ++++++++++++++++++++++++++++--------- zenoh/src/api/encoding.rs | 519 +++++++++------ 2 files changed, 1325 insertions(+), 495 deletions(-) diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index d8a9ec7feb..60254ff321 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -14,15 +14,15 @@ //! ZBytes primitives. use std::{ - borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, str::Utf8Error, - string::FromUtf8Error, sync::Arc, + borrow::Cow, collections::HashMap, convert::Infallible, fmt::Debug, marker::PhantomData, + str::Utf8Error, string::FromUtf8Error, sync::Arc, }; use uhlc::Timestamp; use unwrap_infallible::UnwrapInfallible; use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, - reader::{HasReader, Reader}, + reader::{DidntRead, HasReader, Reader}, writer::HasWriter, ZBuf, ZBufReader, ZBufWriter, ZSlice, }; @@ -31,7 +31,6 @@ use zenoh_protocol::{ core::{Encoding as EncodingProto, Parameters}, zenoh::ext::AttachmentType, }; -use zenoh_result::{ZError, ZResult}; #[cfg(feature = "shared-memory")] use zenoh_shm::{ api::buffer::{ @@ -43,6 +42,50 @@ use zenoh_shm::{ use super::{encoding::Encoding, value::Value}; +/// Wrapper type for API ergonomicity to allow any type `T` to be converted into `Option` where `T` implements `Into`. +#[repr(transparent)] +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct OptionZBytes(Option); + +impl From for OptionZBytes +where + T: Into, +{ + fn from(value: T) -> Self { + Self(Some(value.into())) + } +} + +impl From> for OptionZBytes +where + T: Into, +{ + fn from(mut value: Option) -> Self { + match value.take() { + Some(v) => Self(Some(v.into())), + None => Self(None), + } + } +} + +impl From<&Option> for OptionZBytes +where + for<'a> &'a T: Into, +{ + fn from(value: &Option) -> Self { + match value.as_ref() { + Some(v) => Self(Some(v.into())), + None => Self(None), + } + } +} + +impl From for Option { + fn from(value: OptionZBytes) -> Self { + value.0 + } +} + /// Trait to encode a type `T` into a [`Value`]. pub trait Serialize { type Output; @@ -51,15 +94,97 @@ pub trait Serialize { fn serialize(self, t: T) -> Self::Output; } -pub trait Deserialize<'a, T> { - type Input: 'a; +pub trait Deserialize { + type Input<'a>; type Error; /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. - fn deserialize(self, t: Self::Input) -> Result; + fn deserialize(self, t: Self::Input<'_>) -> Result; } /// ZBytes contains the serialized bytes of user data. +/// +/// `ZBytes` provides convenient methods to the user for serialization/deserialization based on the default Zenoh serializer [`ZSerde`]. +/// +/// **NOTE:** Zenoh semantic and protocol take care of sending and receiving bytes without restricting the actual data types. +/// [`ZSerde`] is the default serializer/deserializer provided for convenience to the users to deal with primitives data types via +/// a simple out-of-the-box encoding. [`ZSerde`] is **NOT** by any means the only serializer/deserializer users can use nor a limitation +/// to the types supported by Zenoh. Users are free and encouraged to use any serializer/deserializer of their choice like *serde*, +/// *protobuf*, *bincode*, *flatbuffers*, etc. +/// +/// `ZBytes` can be used to serialize a single type: +/// ```rust +/// use zenoh::bytes::ZBytes; +/// +/// let start = String::from("abc"); +/// let bytes = ZBytes::serialize(start.clone()); +/// let end: String = bytes.deserialize().unwrap(); +/// assert_eq!(start, end); +/// ``` +/// +/// A tuple of serializable types: +/// ```rust +/// use zenoh::bytes::ZBytes; +/// +/// let start = (String::from("abc"), String::from("def")); +/// let bytes = ZBytes::serialize(start.clone()); +/// let end: (String, String) = bytes.deserialize().unwrap(); +/// assert_eq!(start, end); +/// +/// let start = (1_u8, 3.14_f32, String::from("abc")); +/// let bytes = ZBytes::serialize(start.clone()); +/// let end: (u8, f32, String) = bytes.deserialize().unwrap(); +/// assert_eq!(start, end); +/// `````` +/// +/// An iterator of serializable types: +/// ```rust +/// use zenoh::bytes::ZBytes; +/// +/// let start = vec![String::from("abc"), String::from("def")]; +/// let bytes = ZBytes::from_iter(start.iter()); +/// +/// let mut i = 0; +/// let mut iter = bytes.iter::(); +/// while let Some(Ok(t)) = iter.next() { +/// assert_eq!(start[i], t); +/// i += 1; +/// } +/// ``` +/// +/// A writer and a reader of serializable types: +/// ```rust +/// use zenoh::bytes::ZBytes; +/// +/// #[derive(Debug, PartialEq)] +/// struct Foo { +/// one: usize, +/// two: String, +/// three: Vec, +/// } +/// +/// let start = Foo { +/// one: 42, +/// two: String::from("Forty-Two"), +/// three: vec![42u8; 42], +/// }; +/// +/// let mut bytes = ZBytes::empty(); +/// let mut writer = bytes.writer(); +/// +/// writer.serialize(&start.one); +/// writer.serialize(&start.two); +/// writer.serialize(&start.three); +/// +/// let mut reader = bytes.reader(); +/// let end = Foo { +/// one: reader.deserialize().unwrap(), +/// two: reader.deserialize().unwrap(), +/// three: reader.deserialize().unwrap(), +/// }; +/// assert_eq!(start, end); +/// ``` +/// #[repr(transparent)] #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct ZBytes(ZBuf); @@ -111,9 +236,8 @@ impl ZBytes { /// Get a [`ZBytesReader`] implementing [`std::io::Read`] trait. pub fn iter(&self) -> ZBytesIterator<'_, T> where - T: for<'b> TryFrom<&'b ZBytes>, - for<'b> ZSerde: Deserialize<'b, T>, - for<'b> >::Error: Debug, + for<'b> ZSerde: Deserialize = &'b ZBytes>, + for<'b> >::Error: Debug, { ZBytesIterator { reader: self.0.reader(), @@ -121,7 +245,7 @@ impl ZBytes { } } - /// Serialize an object of type `T` as a [`Value`] using the [`ZSerde`]. + /// Serialize an object of type `T` as a [`ZBytes`] using the [`ZSerde`]. /// /// ```rust /// use zenoh::bytes::ZBytes; @@ -133,38 +257,65 @@ impl ZBytes { /// ``` pub fn serialize(t: T) -> Self where - ZSerde: Serialize, + ZSerde: Serialize, + { + ZSerde.serialize(t) + } + + /// Try serializing an object of type `T` as a [`ZBytes`] using the [`ZSerde`]. + /// + /// ```rust + /// use serde_json::Value; + /// use zenoh::bytes::ZBytes; + /// + /// // Some JSON input data as a &str. Maybe this comes from the user. + /// let data = r#" + /// { + /// "name": "John Doe", + /// "age": 43, + /// "phones": [ + /// "+44 1234567", + /// "+44 2345678" + /// ] + /// }"#; + /// + /// // Parse the string of data into serde_json::Value. + /// let start: Value = serde_json::from_str(data).unwrap(); + /// // The serialization of a serde_json::Value is faillable (see `serde_json::to_string()`). + /// let bytes = ZBytes::try_serialize(start.clone()).unwrap(); + /// let end: Value = bytes.deserialize().unwrap(); + /// assert_eq!(start, end); + /// ``` + pub fn try_serialize(t: T) -> Result + where + ZSerde: Serialize>, { ZSerde.serialize(t) } /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. - pub fn deserialize<'a, T>(&'a self) -> ZResult + pub fn deserialize<'a, T>(&'a self) -> Result>::Error> where - ZSerde: Deserialize<'a, T, Input = &'a ZBytes>, - >::Error: Debug, + ZSerde: Deserialize = &'a ZBytes>, + >::Error: Debug, { - ZSerde - .deserialize(self) - .map_err(|e| zerror!("{:?}", e).into()) + ZSerde.deserialize(self) } /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. - pub fn deserialize_mut<'a, T>(&'a mut self) -> ZResult + pub fn deserialize_mut<'a, T>(&'a mut self) -> Result>::Error> where - ZSerde: Deserialize<'a, T, Input = &'a mut ZBytes>, - >::Error: Debug, + ZSerde: Deserialize = &'a mut ZBytes>, + >::Error: Debug, { - ZSerde - .deserialize(self) - .map_err(|e| zerror!("{:?}", e).into()) + ZSerde.deserialize(self) } /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. pub fn into<'a, T>(&'a self) -> T where - ZSerde: Deserialize<'a, T, Input = &'a ZBytes, Error = Infallible>, - >::Error: Debug, + ZSerde: Deserialize = &'a ZBytes, Error = Infallible>, + >::Error: Debug, { ZSerde.deserialize(self).unwrap_infallible() } @@ -172,8 +323,8 @@ impl ZBytes { /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. pub fn into_mut<'a, T>(&'a mut self) -> T where - ZSerde: Deserialize<'a, T, Input = &'a mut ZBytes, Error = Infallible>, - >::Error: Debug, + ZSerde: Deserialize = &'a mut ZBytes, Error = Infallible>, + >::Error: Debug, { ZSerde.deserialize(self).unwrap_infallible() } @@ -184,6 +335,16 @@ impl ZBytes { #[derive(Debug)] pub struct ZBytesReader<'a>(ZBufReader<'a>); +#[derive(Debug)] +pub enum ZReadOrDeserializeError +where + T: TryFrom, + >::Error: Debug, +{ + Read(DidntRead), + Deserialize(>::Error), +} + impl ZBytesReader<'_> { /// Returns the number of bytes that can still be read pub fn remaining(&self) -> usize { @@ -196,16 +357,16 @@ impl ZBytesReader<'_> { } /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. - pub fn deserialize(&mut self) -> ZResult + pub fn deserialize(&mut self) -> Result>::Error> where - T: TryFrom, - >::Error: Debug, + for<'a> ZSerde: Deserialize = &'a ZBytes>, + >::Error: Debug, { let codec = Zenoh080::new(); - let abuf: ZBuf = codec.read(&mut self.0).map_err(|e| zerror!("{:?}", e))?; + let abuf: ZBuf = codec.read(&mut self.0).unwrap(); let apld = ZBytes::new(abuf); - let a = T::try_from(apld).map_err(|e| zerror!("{:?}", e))?; + let a = ZSerde.deserialize(&apld)?; Ok(a) } } @@ -228,17 +389,28 @@ impl std::io::Seek for ZBytesReader<'_> { pub struct ZBytesWriter<'a>(ZBufWriter<'a>); impl ZBytesWriter<'_> { - pub fn serialize(&mut self, t: T) -> ZResult<()> + fn write(&mut self, bytes: &ZBuf) { + let codec = Zenoh080::new(); + // SAFETY: we are serializing slices on a ZBuf, so serialization will never + // fail unless we run out of memory. In that case, Rust memory allocator + // will panic before the serializer has any chance to fail. + unsafe { codec.write(&mut self.0, bytes).unwrap_unchecked() }; + } + + pub fn serialize(&mut self, t: T) where - T: TryInto, - >::Error: Debug, + ZSerde: Serialize, { - let tpld: ZBytes = t.try_into().map_err(|e| zerror!("{:?}", e))?; - let codec = Zenoh080::new(); - codec - .write(&mut self.0, &tpld.0) - .map_err(|e| zerror!("{:?}", e))?; + let tpld = ZSerde.serialize(t); + self.write(&tpld.0); + } + pub fn try_serialize(&mut self, t: T) -> Result<(), E> + where + ZSerde: Serialize>, + { + let tpld = ZSerde.serialize(t)?; + self.write(&tpld.0); Ok(()) } } @@ -264,10 +436,10 @@ pub struct ZBytesIterator<'a, T> { impl Iterator for ZBytesIterator<'_, T> where - for<'a> ZSerde: Deserialize<'a, T, Input = &'a ZBytes>, - for<'a> >::Error: Debug, + for<'a> ZSerde: Deserialize = &'a ZBytes>, + >::Error: Debug, { - type Item = ZResult; + type Item = Result>::Error>; fn next(&mut self) -> Option { let codec = Zenoh080::new(); @@ -275,10 +447,7 @@ where let kbuf: ZBuf = codec.read(&mut self.reader).ok()?; let kpld = ZBytes::new(kbuf); - let result = ZSerde - .deserialize(&kpld) - .map_err(|err| zerror!("{err:?}").into()); - Some(result) + Some(ZSerde.deserialize(&kpld)) } } @@ -287,69 +456,24 @@ where ZSerde: Serialize, { fn from_iter>(iter: T) -> Self { - let codec = Zenoh080::new(); - let mut buffer: ZBuf = ZBuf::empty(); - let mut writer = buffer.writer(); + let mut bytes = ZBytes::empty(); + let mut writer = bytes.writer(); for t in iter { - let tpld = ZSerde.serialize(t); - // SAFETY: we are serializing slices on a ZBuf, so serialization will never - // fail unless we run out of memory. In that case, Rust memory allocator - // will panic before the serializer has any chance to fail. - unsafe { - codec.write(&mut writer, &tpld.0).unwrap_unchecked(); - } - } - - ZBytes::new(buffer) - } -} - -/// Wrapper type for API ergonomicity to allow any type `T` to be converted into `Option` where `T` implements `Into`. -#[repr(transparent)] -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct OptionZBytes(Option); - -impl From for OptionZBytes -where - T: Into, -{ - fn from(value: T) -> Self { - Self(Some(value.into())) - } -} - -impl From> for OptionZBytes -where - T: Into, -{ - fn from(mut value: Option) -> Self { - match value.take() { - Some(v) => Self(Some(v.into())), - None => Self(None), - } - } -} - -impl From<&Option> for OptionZBytes -where - for<'a> &'a T: Into, -{ - fn from(value: &Option) -> Self { - match value.as_ref() { - Some(v) => Self(Some(v.into())), - None => Self(None), + writer.serialize(t); } - } -} -impl From for Option { - fn from(value: OptionZBytes) -> Self { - value.0 + ZBytes::new(bytes) } } -/// The default serializer for ZBytes. It supports primitives types, such as: Vec, int, uint, float, string, bool. -/// It also supports common Rust serde values. +/// The default serializer for [`ZBytes`]. It supports primitives types, such as: `Vec`, `uX`, `iX`, `fX`, `String`, `bool`. +/// It also supports common Rust serde values like `serde_json::Value`. +/// +/// **NOTE:** Zenoh semantic and protocol take care of sending and receiving bytes without restricting the actual data types. +/// [`ZSerde`] is the default serializer/deserializer provided for convenience to the users to deal with primitives data types via +/// a simple out-of-the-box encoding. [`ZSerde`] is **NOT** by any means the only serializer/deserializer users can use nor a limitation +/// to the types supported by Zenoh. Users are free and encouraged to use any serializer/deserializer of their choice like *serde*, +/// *protobuf*, *bincode*, *flatbuffers*, etc. #[derive(Clone, Copy, Debug)] pub struct ZSerde; @@ -393,11 +517,11 @@ impl Serialize<&mut ZBytes> for ZSerde { } } -impl<'a> Deserialize<'a, ZBytes> for ZSerde { - type Input = &'a ZBytes; +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: Self::Input) -> Result { + fn deserialize(self, v: Self::Input<'_>) -> Result { Ok(v.clone()) } } @@ -445,11 +569,11 @@ impl From<&mut ZBuf> for ZBytes { } } -impl<'a> Deserialize<'a, ZBuf> for ZSerde { - type Input = &'a ZBytes; +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: Self::Input) -> Result { + fn deserialize(self, v: Self::Input<'_>) -> Result { Ok(v.0.clone()) } } @@ -515,11 +639,11 @@ impl From<&mut ZSlice> for ZBytes { } } -impl<'a> Deserialize<'a, ZSlice> for ZSerde { - type Input = &'a ZBytes; +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: Self::Input) -> Result { + fn deserialize(self, v: Self::Input<'_>) -> Result { Ok(v.0.to_zslice()) } } @@ -585,11 +709,11 @@ impl From<&mut [u8; N]> for ZBytes { } } -impl<'a, const N: usize> Deserialize<'a, [u8; N]> for ZSerde { - type Input = &'a ZBytes; +impl Deserialize<[u8; N]> for ZSerde { + type Input<'a> = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<[u8; N], Self::Error> { + fn deserialize(self, v: Self::Input<'_>) -> Result<[u8; N], Self::Error> { use std::io::Read; if v.0.len() != N { @@ -669,11 +793,11 @@ impl From<&mut Vec> for ZBytes { } } -impl<'a> Deserialize<'a, Vec> for ZSerde { - type Input = &'a ZBytes; +impl Deserialize> for ZSerde { + type Input<'a> = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: Self::Input) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input<'_>) -> Result, Self::Error> { Ok(v.0.contiguous().to_vec()) } } @@ -768,11 +892,11 @@ impl From<&mut Cow<'_, [u8]>> for ZBytes { } } -impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { - type Input = &'a ZBytes; +impl<'a> Deserialize> for ZSerde { + type Input<'b> = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: Self::Input) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input<'a>) -> Result, Self::Error> { Ok(v.0.contiguous()) } } @@ -841,11 +965,11 @@ impl From<&mut String> for ZBytes { } } -impl<'a> Deserialize<'a, String> for ZSerde { - type Input = &'a ZBytes; +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; type Error = FromUtf8Error; - fn deserialize(self, v: Self::Input) -> Result { + fn deserialize(self, v: Self::Input<'_>) -> Result { let v: Vec = ZSerde.deserialize(v).unwrap_infallible(); String::from_utf8(v) } @@ -946,11 +1070,11 @@ impl From<&mut Cow<'_, str>> for ZBytes { } } -impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { - type Input = &'a ZBytes; +impl<'a> Deserialize> for ZSerde { + type Input<'b> = &'a ZBytes; type Error = Utf8Error; - fn deserialize(self, v: Self::Input) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input<'a>) -> Result, Self::Error> { Cow::try_from(v) } } @@ -1041,11 +1165,11 @@ macro_rules! impl_int { } } - impl<'a> Deserialize<'a, $t> for ZSerde { - type Input = &'a ZBytes; + impl Deserialize<$t> for ZSerde { + type Input<'a> = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<$t, Self::Error> { + fn deserialize(self, v: Self::Input<'_>) -> Result<$t, Self::Error> { use std::io::Read; let mut r = v.reader(); @@ -1091,6 +1215,7 @@ impl_int!(u8); impl_int!(u16); impl_int!(u32); impl_int!(u64); +impl_int!(u128); impl_int!(usize); // Zenoh signed integers @@ -1098,6 +1223,7 @@ impl_int!(i8); impl_int!(i16); impl_int!(i32); impl_int!(i64); +impl_int!(i128); impl_int!(isize); // Zenoh floats @@ -1149,11 +1275,11 @@ impl From<&mut bool> for ZBytes { } } -impl<'a> Deserialize<'a, bool> for ZSerde { - type Input = &'a ZBytes; +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result { + fn deserialize(self, v: Self::Input<'_>) -> Result { let p = v.deserialize::().map_err(|_| ZDeserializeError)?; match p { 0 => Ok(false), @@ -1187,6 +1313,86 @@ impl TryFrom<&mut ZBytes> for bool { } } +// Zenoh char +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: char) -> Self::Output { + // We can convert char to u32 and encode it as such + // See https://doc.rust-lang.org/std/primitive.char.html#method.from_u32 + ZSerde.serialize(t as u32) + } +} + +impl From for ZBytes { + fn from(t: char) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&char> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &char) -> Self::Output { + ZSerde.serialize(*t) + } +} + +impl From<&char> for ZBytes { + fn from(t: &char) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut char> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut char) -> Self::Output { + ZSerde.serialize(*t) + } +} + +impl From<&mut char> for ZBytes { + fn from(t: &mut char) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = ZDeserializeError; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + let c = v.deserialize::()?; + let c = char::try_from(c).map_err(|_| ZDeserializeError)?; + Ok(c) + } +} + +impl TryFrom for char { + type Error = ZDeserializeError; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for char { + type Error = ZDeserializeError; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for char { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // - Zenoh advanced types serializer/deserializer // Parameters impl Serialize> for ZSerde { @@ -1231,13 +1437,13 @@ impl<'s> From<&'s mut Parameters<'s>> for ZBytes { } } -impl<'s> Deserialize<'s, Parameters<'s>> for ZSerde { - type Input = &'s ZBytes; +impl<'a> Deserialize> for ZSerde { + type Input<'b> = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input<'a>) -> Result, Self::Error> { let s = v - .deserialize::>() + .deserialize::>() .map_err(|_| ZDeserializeError)?; Ok(Parameters::from(s)) } @@ -1320,11 +1526,11 @@ impl From<&mut Timestamp> for ZBytes { } } -impl<'a> Deserialize<'a, Timestamp> for ZSerde { - type Input = &'a ZBytes; +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; type Error = zenoh_buffers::reader::DidntRead; - fn deserialize(self, v: Self::Input) -> Result { + fn deserialize(self, v: Self::Input<'_>) -> Result { let codec = Zenoh080::new(); let mut reader = v.0.reader(); let e: Timestamp = codec.read(&mut reader)?; @@ -1409,11 +1615,11 @@ impl From<&mut Encoding> for ZBytes { } } -impl<'a> Deserialize<'a, Encoding> for ZSerde { - type Input = &'a ZBytes; +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; type Error = zenoh_buffers::reader::DidntRead; - fn deserialize(self, v: Self::Input) -> Result { + fn deserialize(self, v: Self::Input<'_>) -> Result { let codec = Zenoh080::new(); let mut reader = v.0.reader(); let e: EncodingProto = codec.read(&mut reader)?; @@ -1488,20 +1694,18 @@ impl From<&mut Value> for ZBytes { } } -impl<'a> Deserialize<'a, Value> for ZSerde { - type Input = &'a ZBytes; - type Error = ZError; +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = ZReadOrDeserializeErrorTuple2; - fn deserialize(self, v: Self::Input) -> Result { - let (payload, encoding) = v - .deserialize::<(ZBytes, Encoding)>() - .map_err(|e| zerror!("{:?}", e))?; + fn deserialize(self, v: Self::Input<'_>) -> Result { + let (payload, encoding) = v.deserialize::<(ZBytes, Encoding)>()?; Ok(Value::new(payload, encoding)) } } impl TryFrom for Value { - type Error = ZError; + type Error = ZReadOrDeserializeErrorTuple2; fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) @@ -1509,7 +1713,7 @@ impl TryFrom for Value { } impl TryFrom<&ZBytes> for Value { - type Error = ZError; + type Error = ZReadOrDeserializeErrorTuple2; fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) @@ -1517,7 +1721,7 @@ impl TryFrom<&ZBytes> for Value { } impl TryFrom<&mut ZBytes> for Value { - type Error = ZError; + type Error = ZReadOrDeserializeErrorTuple2; fn try_from(value: &mut ZBytes) -> Result { ZSerde.deserialize(&*value) @@ -1577,11 +1781,11 @@ impl TryFrom<&mut serde_json::Value> for ZBytes { } } -impl<'a> Deserialize<'a, serde_json::Value> for ZSerde { - type Input = &'a ZBytes; +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; type Error = serde_json::Error; - fn deserialize(self, v: Self::Input) -> Result { + fn deserialize(self, v: Self::Input<'_>) -> Result { serde_json::from_reader(v.reader()) } } @@ -1663,11 +1867,11 @@ impl TryFrom<&mut serde_yaml::Value> for ZBytes { } } -impl<'a> Deserialize<'a, serde_yaml::Value> for ZSerde { - type Input = &'a ZBytes; +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; type Error = serde_yaml::Error; - fn deserialize(self, v: Self::Input) -> Result { + fn deserialize(self, v: Self::Input<'_>) -> Result { serde_yaml::from_reader(v.reader()) } } @@ -1747,11 +1951,11 @@ impl TryFrom<&mut serde_cbor::Value> for ZBytes { } } -impl<'a> Deserialize<'a, serde_cbor::Value> for ZSerde { - type Input = &'a ZBytes; +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; type Error = serde_cbor::Error; - fn deserialize(self, v: Self::Input) -> Result { + fn deserialize(self, v: Self::Input<'_>) -> Result { serde_cbor::from_reader(v.reader()) } } @@ -1835,11 +2039,11 @@ impl TryFrom<&mut serde_pickle::Value> for ZBytes { } } -impl<'a> Deserialize<'a, serde_pickle::Value> for ZSerde { - type Input = &'a ZBytes; +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; type Error = serde_pickle::Error; - fn deserialize(self, v: Self::Input) -> Result { + fn deserialize(self, v: Self::Input<'_>) -> Result { serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) } } @@ -1905,11 +2109,11 @@ impl From for ZBytes { } #[cfg(feature = "shared-memory")] -impl<'a> Deserialize<'a, &'a zshm> for ZSerde { - type Input = &'a ZBytes; +impl<'a> Deserialize<&'a zshm> for ZSerde { + type Input<'b> = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<&'a zshm, Self::Error> { + fn deserialize(self, v: Self::Input<'a>) -> Result<&'a zshm, Self::Error> { // A ZShm is expected to have only one slice let mut zslices = v.0.zslices(); if let Some(zs) = zslices.next() { @@ -1940,11 +2144,11 @@ impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zshm { } #[cfg(feature = "shared-memory")] -impl<'a> Deserialize<'a, &'a mut zshm> for ZSerde { - type Input = &'a mut ZBytes; +impl<'a> Deserialize<&'a mut zshm> for ZSerde { + type Input<'b> = &'a mut ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<&'a mut zshm, Self::Error> { + fn deserialize(self, v: Self::Input<'a>) -> Result<&'a mut zshm, Self::Error> { // A ZSliceShmBorrowMut is expected to have only one slice let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { @@ -1957,11 +2161,11 @@ impl<'a> Deserialize<'a, &'a mut zshm> for ZSerde { } #[cfg(feature = "shared-memory")] -impl<'a> Deserialize<'a, &'a mut zshmmut> for ZSerde { - type Input = &'a mut ZBytes; +impl<'a> Deserialize<&'a mut zshmmut> for ZSerde { + type Input<'b> = &'a mut ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<&'a mut zshmmut, Self::Error> { + fn deserialize(self, v: Self::Input<'a>) -> Result<&'a mut zshmmut, Self::Error> { // A ZSliceShmBorrowMut is expected to have only one slice let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { @@ -1982,8 +2186,8 @@ impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zshmmut { } } -// Tuple -macro_rules! impl_tuple { +// Tuple (a, b) +macro_rules! impl_tuple2 { ($t:expr) => {{ let (a, b) = $t; @@ -2004,6 +2208,7 @@ macro_rules! impl_tuple { ZBytes::new(buffer) }}; } + impl Serialize<(A, B)> for ZSerde where A: Into, @@ -2012,7 +2217,7 @@ where type Output = ZBytes; fn serialize(self, t: (A, B)) -> Self::Output { - impl_tuple!(t) + impl_tuple2!(t) } } @@ -2024,7 +2229,7 @@ where type Output = ZBytes; fn serialize(self, t: &(A, B)) -> Self::Output { - impl_tuple!(t) + impl_tuple2!(t) } } @@ -2038,40 +2243,60 @@ where } } -impl<'s, A, B> Deserialize<'s, (A, B)> for ZSerde +#[derive(Debug)] +pub enum ZReadOrDeserializeErrorTuple2 where - A: TryFrom + 'static, - >::Error: Debug + 'static, - B: TryFrom + 'static, - >::Error: Debug + 'static, + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, { - type Input = &'s ZBytes; - type Error = ZError; + One(ZReadOrDeserializeError), + Two(ZReadOrDeserializeError), +} - fn deserialize(self, bytes: Self::Input) -> Result<(A, B), Self::Error> { +impl Deserialize<(A, B)> for ZSerde +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, +{ + type Input<'a> = &'a ZBytes; + type Error = ZReadOrDeserializeErrorTuple2; + + fn deserialize(self, bytes: Self::Input<'_>) -> Result<(A, B), Self::Error> { let codec = Zenoh080::new(); let mut reader = bytes.0.reader(); - let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; + let abuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple2::One(ZReadOrDeserializeError::Read(e)))?; let apld = ZBytes::new(abuf); + let a = A::try_from(apld).map_err(|e| { + ZReadOrDeserializeErrorTuple2::One(ZReadOrDeserializeError::Deserialize(e)) + })?; - let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; + let bbuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple2::Two(ZReadOrDeserializeError::Read(e)))?; let bpld = ZBytes::new(bbuf); + let b = B::try_from(bpld).map_err(|e| { + ZReadOrDeserializeErrorTuple2::Two(ZReadOrDeserializeError::Deserialize(e)) + })?; - let a = A::try_from(apld).map_err(|e| zerror!("{:?}", e))?; - let b = B::try_from(bpld).map_err(|e| zerror!("{:?}", e))?; Ok((a, b)) } } impl TryFrom for (A, B) where - A: TryFrom + 'static, - >::Error: Debug + 'static, - B: TryFrom + 'static, - >::Error: Debug + 'static, + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, { - type Error = ZError; + type Error = ZReadOrDeserializeErrorTuple2; fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) @@ -2080,12 +2305,12 @@ where impl TryFrom<&ZBytes> for (A, B) where - A: TryFrom + 'static, - >::Error: Debug + 'static, - B: TryFrom + 'static, - >::Error: Debug + 'static, + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, { - type Error = ZError; + type Error = ZReadOrDeserializeErrorTuple2; fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) @@ -2094,12 +2319,471 @@ where impl TryFrom<&mut ZBytes> for (A, B) where - A: TryFrom + 'static, - >::Error: Debug + 'static, - B: TryFrom + 'static, - >::Error: Debug + 'static, + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, { - type Error = ZError; + type Error = ZReadOrDeserializeErrorTuple2; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// Tuple (a, b, c) +macro_rules! impl_tuple3 { + ($t:expr) => {{ + let (a, b, c) = $t; + + let codec = Zenoh080::new(); + let mut buffer: ZBuf = ZBuf::empty(); + let mut writer = buffer.writer(); + let apld: ZBytes = a.into(); + let bpld: ZBytes = b.into(); + let cpld: ZBytes = c.into(); + + // SAFETY: we are serializing slices on a ZBuf, so serialization will never + // fail unless we run out of memory. In that case, Rust memory allocator + // will panic before the serializer has any chance to fail. + unsafe { + codec.write(&mut writer, &apld.0).unwrap_unchecked(); + codec.write(&mut writer, &bpld.0).unwrap_unchecked(); + codec.write(&mut writer, &cpld.0).unwrap_unchecked(); + } + + ZBytes::new(buffer) + }}; +} + +impl Serialize<(A, B, C)> for ZSerde +where + A: Into, + B: Into, + C: Into, +{ + type Output = ZBytes; + + fn serialize(self, t: (A, B, C)) -> Self::Output { + impl_tuple3!(t) + } +} + +impl Serialize<&(A, B, C)> for ZSerde +where + for<'a> &'a A: Into, + for<'b> &'b B: Into, + for<'b> &'b C: Into, +{ + type Output = ZBytes; + + fn serialize(self, t: &(A, B, C)) -> Self::Output { + impl_tuple3!(t) + } +} + +impl From<(A, B, C)> for ZBytes +where + A: Into, + B: Into, + C: Into, +{ + fn from(value: (A, B, C)) -> Self { + ZSerde.serialize(value) + } +} + +#[derive(Debug)] +pub enum ZReadOrDeserializeErrorTuple3 +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, +{ + One(ZReadOrDeserializeError), + Two(ZReadOrDeserializeError), + Three(ZReadOrDeserializeError), +} + +impl Deserialize<(A, B, C)> for ZSerde +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, +{ + type Input<'a> = &'a ZBytes; + type Error = ZReadOrDeserializeErrorTuple3; + + fn deserialize(self, bytes: Self::Input<'_>) -> Result<(A, B, C), Self::Error> { + let codec = Zenoh080::new(); + let mut reader = bytes.0.reader(); + + let abuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple3::One(ZReadOrDeserializeError::Read(e)))?; + let apld = ZBytes::new(abuf); + let a = A::try_from(apld).map_err(|e| { + ZReadOrDeserializeErrorTuple3::One(ZReadOrDeserializeError::Deserialize(e)) + })?; + + let bbuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple3::Two(ZReadOrDeserializeError::Read(e)))?; + let bpld = ZBytes::new(bbuf); + let b = B::try_from(bpld).map_err(|e| { + ZReadOrDeserializeErrorTuple3::Two(ZReadOrDeserializeError::Deserialize(e)) + })?; + + let cbuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple3::Three(ZReadOrDeserializeError::Read(e)))?; + let cpld = ZBytes::new(cbuf); + let c = C::try_from(cpld).map_err(|e| { + ZReadOrDeserializeErrorTuple3::Three(ZReadOrDeserializeError::Deserialize(e)) + })?; + + Ok((a, b, c)) + } +} + +impl TryFrom for (A, B, C) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple3; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for (A, B, C) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple3; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for (A, B, C) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple3; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// Tuple (a, b, c, d) +macro_rules! impl_tuple4 { + ($t:expr) => {{ + let (a, b, c, d) = $t; + + let codec = Zenoh080::new(); + let mut buffer: ZBuf = ZBuf::empty(); + let mut writer = buffer.writer(); + let apld: ZBytes = a.into(); + let bpld: ZBytes = b.into(); + let cpld: ZBytes = c.into(); + let dpld: ZBytes = d.into(); + + // SAFETY: we are serializing slices on a ZBuf, so serialization will never + // fail unless we run out of memory. In that case, Rust memory allocator + // will panic before the serializer has any chance to fail. + unsafe { + codec.write(&mut writer, &apld.0).unwrap_unchecked(); + codec.write(&mut writer, &bpld.0).unwrap_unchecked(); + codec.write(&mut writer, &cpld.0).unwrap_unchecked(); + codec.write(&mut writer, &dpld.0).unwrap_unchecked(); + } + + ZBytes::new(buffer) + }}; +} + +impl Serialize<(A, B, C, D)> for ZSerde +where + A: Into, + B: Into, + C: Into, + D: Into, +{ + type Output = ZBytes; + + fn serialize(self, t: (A, B, C, D)) -> Self::Output { + impl_tuple4!(t) + } +} + +impl Serialize<&(A, B, C, D)> for ZSerde +where + for<'a> &'a A: Into, + for<'b> &'b B: Into, + for<'b> &'b C: Into, + for<'b> &'b D: Into, +{ + type Output = ZBytes; + + fn serialize(self, t: &(A, B, C, D)) -> Self::Output { + impl_tuple4!(t) + } +} + +impl From<(A, B, C, D)> for ZBytes +where + A: Into, + B: Into, + C: Into, + D: Into, +{ + fn from(value: (A, B, C, D)) -> Self { + ZSerde.serialize(value) + } +} + +#[derive(Debug)] +pub enum ZReadOrDeserializeErrorTuple4 +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, + D: TryFrom, + >::Error: Debug, +{ + One(ZReadOrDeserializeError), + Two(ZReadOrDeserializeError), + Three(ZReadOrDeserializeError), + Four(ZReadOrDeserializeError), +} + +impl Deserialize<(A, B, C, D)> for ZSerde +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, + D: TryFrom, + >::Error: Debug, +{ + type Input<'a> = &'a ZBytes; + type Error = ZReadOrDeserializeErrorTuple4; + + fn deserialize(self, bytes: Self::Input<'_>) -> Result<(A, B, C, D), Self::Error> { + let codec = Zenoh080::new(); + let mut reader = bytes.0.reader(); + + let abuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple4::One(ZReadOrDeserializeError::Read(e)))?; + let apld = ZBytes::new(abuf); + let a = A::try_from(apld).map_err(|e| { + ZReadOrDeserializeErrorTuple4::One(ZReadOrDeserializeError::Deserialize(e)) + })?; + + let bbuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple4::Two(ZReadOrDeserializeError::Read(e)))?; + let bpld = ZBytes::new(bbuf); + let b = B::try_from(bpld).map_err(|e| { + ZReadOrDeserializeErrorTuple4::Two(ZReadOrDeserializeError::Deserialize(e)) + })?; + + let cbuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple4::Three(ZReadOrDeserializeError::Read(e)))?; + let cpld = ZBytes::new(cbuf); + let c = C::try_from(cpld).map_err(|e| { + ZReadOrDeserializeErrorTuple4::Three(ZReadOrDeserializeError::Deserialize(e)) + })?; + + let dbuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple4::Four(ZReadOrDeserializeError::Read(e)))?; + let dpld = ZBytes::new(dbuf); + let d = D::try_from(dpld).map_err(|e| { + ZReadOrDeserializeErrorTuple4::Four(ZReadOrDeserializeError::Deserialize(e)) + })?; + + Ok((a, b, c, d)) + } +} + +impl TryFrom for (A, B, C, D) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, + D: TryFrom, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple4; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for (A, B, C, D) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, + D: TryFrom, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple4; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for (A, B, C, D) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, + D: TryFrom, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple4; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// HashMap +impl Serialize> for ZSerde +where + A: Into, + B: Into, +{ + type Output = ZBytes; + + fn serialize(self, mut t: HashMap) -> Self::Output { + ZBytes::from_iter(t.drain()) + } +} + +impl Serialize<&HashMap> for ZSerde +where + for<'a> &'a A: Into, + for<'b> &'b B: Into, +{ + type Output = ZBytes; + + fn serialize(self, t: &HashMap) -> Self::Output { + ZBytes::from_iter(t.iter()) + } +} + +impl From> for ZBytes +where + A: Into, + B: Into, +{ + fn from(value: HashMap) -> Self { + ZSerde.serialize(value) + } +} + +impl Deserialize> for ZSerde +where + A: TryFrom + Debug + std::cmp::Eq + std::hash::Hash, + >::Error: Debug, + B: TryFrom + Debug, + >::Error: Debug, +{ + type Input<'a> = &'a ZBytes; + type Error = ZReadOrDeserializeErrorTuple2; + + fn deserialize(self, bytes: Self::Input<'_>) -> Result, Self::Error> { + let mut hm = HashMap::new(); + for res in bytes.iter::<(A, B)>() { + let (k, v) = res?; + hm.insert(k, v); + } + Ok(hm) + } +} + +impl TryFrom for HashMap +where + A: TryFrom + Debug + std::cmp::Eq + std::hash::Hash, + >::Error: Debug, + B: TryFrom + Debug, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple2; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for HashMap +where + A: TryFrom + Debug + std::cmp::Eq + std::hash::Hash, + >::Error: Debug, + B: TryFrom + Debug, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple2; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for HashMap +where + A: TryFrom + Debug + std::cmp::Eq + std::hash::Hash, + >::Error: Debug, + B: TryFrom + Debug, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple2; fn try_from(value: &mut ZBytes) -> Result { ZSerde.deserialize(&*value) @@ -2141,6 +2825,7 @@ mod tests { }; use super::ZBytes; + use crate::bytes::{Deserialize, Serialize, ZSerde}; const NUM: usize = 1_000; @@ -2221,6 +2906,26 @@ mod tests { // WARN: test function body produces stack overflow, so I split it into subroutines #[inline(never)] fn basic() { + let mut rng = rand::thread_rng(); + + // bool + serialize_deserialize!(bool, true); + serialize_deserialize!(bool, false); + + // char + serialize_deserialize!(char, char::MAX); + serialize_deserialize!(char, rng.gen::()); + + let a = 'a'; + let bytes = ZSerde.serialize(a); + let s: String = ZSerde.deserialize(&bytes).unwrap(); + assert_eq!(a.to_string(), s); + + let a = String::from("a"); + let bytes = ZSerde.serialize(&a); + let s: char = ZSerde.deserialize(&bytes).unwrap(); + assert_eq!(a, s.to_string()); + // String serialize_deserialize!(String, ""); serialize_deserialize!(String, String::from("abcdef")); @@ -2254,11 +2959,11 @@ mod tests { let i3 = vec![2u8; 64]; println!("Write: {:?}", i1); - writer.serialize(i1).unwrap(); + writer.serialize(i1); println!("Write: {:?}", i2); - writer.serialize(&i2).unwrap(); + writer.serialize(&i2); println!("Write: {:?}", i3); - writer.serialize(&i3).unwrap(); + writer.serialize(&i3); let mut reader = bytes.reader(); let o1: u8 = reader.deserialize().unwrap(); @@ -2278,7 +2983,7 @@ mod tests { // SHM #[cfg(feature = "shared-memory")] - { + fn shm() { // create an SHM backend... let backend = PosixShmProviderBackend::builder() .with_size(4096) @@ -2302,6 +3007,8 @@ mod tests { serialize_deserialize!(&zshm, immutable_shm_buf); } + #[cfg(feature = "shared-memory")] + shm(); // Parameters serialize_deserialize!(Parameters, Parameters::from("")); @@ -2320,100 +3027,84 @@ mod tests { (Cow::from("a"), Cow::from("b")) ); - // Iterator - let v: [usize; 5] = [0, 1, 2, 3, 4]; - println!("Serialize:\t{:?}", v); - let p = ZBytes::from_iter(v.iter()); - println!("Deserialize:\t{:?}\n", p); - for (i, t) in p.iter::().enumerate() { - assert_eq!(i, t.unwrap()); - } + fn iterator() { + let v: [usize; 5] = [0, 1, 2, 3, 4]; + println!("Serialize:\t{:?}", v); + let p = ZBytes::from_iter(v.iter()); + println!("Deserialize:\t{:?}\n", p); + for (i, t) in p.iter::().enumerate() { + assert_eq!(i, t.unwrap()); + } - let mut v = vec![[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]; - println!("Serialize:\t{:?}", v); - let p = ZBytes::from_iter(v.drain(..)); - println!("Deserialize:\t{:?}\n", p); - let mut iter = p.iter::<[u8; 4]>(); - assert_eq!(iter.next().unwrap().unwrap(), [0, 1, 2, 3]); - assert_eq!(iter.next().unwrap().unwrap(), [4, 5, 6, 7]); - assert_eq!(iter.next().unwrap().unwrap(), [8, 9, 10, 11]); - assert_eq!(iter.next().unwrap().unwrap(), [12, 13, 14, 15]); - assert!(iter.next().is_none()); - - use std::collections::HashMap; - let mut hm: HashMap = HashMap::new(); - hm.insert(0, 0); - hm.insert(1, 1); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, usize)>().map(Result::unwrap)); - assert_eq!(hm, o); - - let mut hm: HashMap> = HashMap::new(); - hm.insert(0, vec![0u8; 8]); - hm.insert(1, vec![1u8; 16]); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, Vec)>().map(Result::unwrap)); - assert_eq!(hm, o); - - let mut hm: HashMap> = HashMap::new(); - hm.insert(0, vec![0u8; 8]); - hm.insert(1, vec![1u8; 16]); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, Vec)>().map(Result::unwrap)); - assert_eq!(hm, o); - - let mut hm: HashMap = HashMap::new(); - hm.insert(0, ZSlice::from(vec![0u8; 8])); - hm.insert(1, ZSlice::from(vec![1u8; 16])); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, ZSlice)>().map(Result::unwrap)); - assert_eq!(hm, o); - - let mut hm: HashMap = HashMap::new(); - hm.insert(0, ZBuf::from(vec![0u8; 8])); - hm.insert(1, ZBuf::from(vec![1u8; 16])); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, ZBuf)>().map(Result::unwrap)); - assert_eq!(hm, o); - - let mut hm: HashMap> = HashMap::new(); - hm.insert(0, vec![0u8; 8]); - hm.insert(1, vec![1u8; 16]); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().iter().map(|(k, v)| (k, Cow::from(v)))); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, Vec)>().map(Result::unwrap)); - assert_eq!(hm, o); - - let mut hm: HashMap = HashMap::new(); - hm.insert(String::from("0"), String::from("a")); - hm.insert(String::from("1"), String::from("b")); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.iter()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(String, String)>().map(Result::unwrap)); - assert_eq!(hm, o); - - let mut hm: HashMap, Cow<'static, str>> = HashMap::new(); - hm.insert(Cow::from("0"), Cow::from("a")); - hm.insert(Cow::from("1"), Cow::from("b")); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.iter()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter( - p.iter::<(Cow<'static, str>, Cow<'static, str>)>() - .map(Result::unwrap), - ); - assert_eq!(hm, o); + let mut v = vec![[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]; + println!("Serialize:\t{:?}", v); + let p = ZBytes::from_iter(v.drain(..)); + println!("Deserialize:\t{:?}\n", p); + let mut iter = p.iter::<[u8; 4]>(); + assert_eq!(iter.next().unwrap().unwrap(), [0, 1, 2, 3]); + assert_eq!(iter.next().unwrap().unwrap(), [4, 5, 6, 7]); + assert_eq!(iter.next().unwrap().unwrap(), [8, 9, 10, 11]); + assert_eq!(iter.next().unwrap().unwrap(), [12, 13, 14, 15]); + assert!(iter.next().is_none()); + } + iterator(); + + fn hashmap() { + use std::collections::HashMap; + let mut hm: HashMap = HashMap::new(); + hm.insert(0, 0); + hm.insert(1, 1); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from(hm.clone()); + println!("Deserialize:\t{:?}\n", p); + let o = p.deserialize::>().unwrap(); + assert_eq!(hm, o); + + let mut hm: HashMap> = HashMap::new(); + hm.insert(0, vec![0u8; 8]); + hm.insert(1, vec![1u8; 16]); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from(hm.clone()); + println!("Deserialize:\t{:?}\n", p); + let o = p.deserialize::>>().unwrap(); + assert_eq!(hm, o); + + let mut hm: HashMap = HashMap::new(); + hm.insert(0, ZSlice::from(vec![0u8; 8])); + hm.insert(1, ZSlice::from(vec![1u8; 16])); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from(hm.clone()); + println!("Deserialize:\t{:?}\n", p); + let o = p.deserialize::>().unwrap(); + assert_eq!(hm, o); + + let mut hm: HashMap = HashMap::new(); + hm.insert(0, ZBuf::from(vec![0u8; 8])); + hm.insert(1, ZBuf::from(vec![1u8; 16])); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from(hm.clone()); + println!("Deserialize:\t{:?}\n", p); + let o = p.deserialize::>().unwrap(); + assert_eq!(hm, o); + + let mut hm: HashMap = HashMap::new(); + hm.insert(String::from("0"), String::from("a")); + hm.insert(String::from("1"), String::from("b")); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from(hm.clone()); + println!("Deserialize:\t{:?}\n", p); + let o = p.deserialize::>().unwrap(); + assert_eq!(hm, o); + + let mut hm: HashMap, Cow<'static, str>> = HashMap::new(); + hm.insert(Cow::from("0"), Cow::from("a")); + hm.insert(Cow::from("1"), Cow::from("b")); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from(hm.clone()); + println!("Deserialize:\t{:?}\n", p); + let o = p.deserialize::, Cow>>().unwrap(); + assert_eq!(hm, o); + } + hashmap(); } } diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index ac0c5c5ad2..2b6cee2b23 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -23,13 +23,13 @@ use super::bytes::ZBytes; /// Default encoding values used by Zenoh. /// -/// An encoding has a similar role to Content-type in HTTP: it indicates, when present, how data should be interpreted by the application. +/// An encoding has a similar role to *Content-type* in HTTP: it indicates, when present, how data should be interpreted by the application. /// /// Please note the Zenoh protocol does not impose any encoding value nor it operates on it. /// It can be seen as some optional metadata that is carried over by Zenoh in such a way the application may perform different operations depending on the encoding value. /// /// A set of associated constants are provided to cover the most common encodings for user convenience. -/// This is parcticular useful in helping Zenoh to perform additional network optimizations. +/// This is parcticular useful in helping Zenoh to perform additional wire-level optimizations. /// /// # Examples /// @@ -85,50 +85,145 @@ impl Encoding { /// Just some bytes. /// /// Constant alias for string: `"zenoh/bytes"`. + /// + /// Usually used for types: `Vec`, `&[u8]`, `[u8; N]`, `Cow<[u8]>`. pub const ZENOH_BYTES: Encoding = Self(zenoh_protocol::core::Encoding { id: 0, schema: None, }); - /// A VLE-encoded signed little-endian integer. Either 8bit, 16bit, 32bit, or 64bit. Binary representation uses two's complement. + /// A VLE-encoded signed little-endian 8bit integer. Binary representation uses two's complement. + /// + /// Constant alias for string: `"zenoh/int8"`. /// - /// Constant alias for string: `"zenoh/int"`. - pub const ZENOH_INT: Encoding = Self(zenoh_protocol::core::Encoding { + /// Usually used for types: `i8`. + pub const ZENOH_INT8: Encoding = Self(zenoh_protocol::core::Encoding { id: 1, schema: None, }); - /// A VLE-encoded little-endian unsigned integer. Either 8bit, 16bit, 32bit, or 64bit. + /// A VLE-encoded signed little-endian 16bit integer. Binary representation uses two's complement. + /// + /// Constant alias for string: `"zenoh/int16"`. /// - /// Constant alias for string: `"zenoh/uint"`. - pub const ZENOH_UINT: Encoding = Self(zenoh_protocol::core::Encoding { + /// Usually used for types: `i16`. + pub const ZENOH_INT16: Encoding = Self(zenoh_protocol::core::Encoding { id: 2, schema: None, }); - /// A VLE-encoded float. Either little-endian 32bit or 64bit. Binary representation uses *IEEE 754-2008* *binary32* or *binary64*, respectively. + /// A VLE-encoded signed little-endian 32bit integer. Binary representation uses two's complement. /// - /// Constant alias for string: `"zenoh/float"`. - pub const ZENOH_FLOAT: Encoding = Self(zenoh_protocol::core::Encoding { + /// Constant alias for string: `"zenoh/int32"`. + /// + /// Usually used for types: `i32`. + pub const ZENOH_INT32: Encoding = Self(zenoh_protocol::core::Encoding { id: 3, schema: None, }); + /// A VLE-encoded signed little-endian 64bit integer. Binary representation uses two's complement. + /// + /// Constant alias for string: `"zenoh/int64"`. + /// + /// Usually used for types: `i64`. + pub const ZENOH_INT64: Encoding = Self(zenoh_protocol::core::Encoding { + id: 4, + schema: None, + }); + /// A VLE-encoded signed little-endian 128bit integer. Binary representation uses two's complement. + /// + /// Constant alias for string: `"zenoh/int128"`. + /// + /// Usually used for types: `i128`. + pub const ZENOH_INT128: Encoding = Self(zenoh_protocol::core::Encoding { + id: 5, + schema: None, + }); + /// A VLE-encoded unsigned little-endian 8bit integer. + /// + /// Constant alias for string: `"zenoh/uint8"`. + /// + /// Usually used for types: `u8`. + pub const ZENOH_UINT8: Encoding = Self(zenoh_protocol::core::Encoding { + id: 6, + schema: None, + }); + /// A VLE-encoded unsigned little-endian 16bit integer. + /// + /// Constant alias for string: `"zenoh/uint16"`. + /// + /// Usually used for types: `u16`. + pub const ZENOH_UINT16: Encoding = Self(zenoh_protocol::core::Encoding { + id: 7, + schema: None, + }); + /// A VLE-encoded unsigned little-endian 32bit integer. + /// + /// Constant alias for string: `"zenoh/uint32"`. + /// + /// Usually used for types: `u32`. + pub const ZENOH_UINT32: Encoding = Self(zenoh_protocol::core::Encoding { + id: 8, + schema: None, + }); + /// A VLE-encoded unsigned little-endian 64bit integer. + /// + /// Constant alias for string: `"zenoh/uint64"`. + /// + /// Usually used for types: `u64`. + pub const ZENOH_UINT64: Encoding = Self(zenoh_protocol::core::Encoding { + id: 9, + schema: None, + }); + /// A VLE-encoded unsigned little-endian 128bit integer. + /// + /// Constant alias for string: `"zenoh/uint128"`. + /// + /// Usually used for types: `u128`. + pub const ZENOH_UINT128: Encoding = Self(zenoh_protocol::core::Encoding { + id: 10, + schema: None, + }); + /// A VLE-encoded 32bit float. Binary representation uses *IEEE 754-2008* *binary32* . + /// + /// Constant alias for string: `"zenoh/float32"`. + /// + /// Usually used for types: `f32`. + pub const ZENOH_FLOAT32: Encoding = Self(zenoh_protocol::core::Encoding { + id: 11, + schema: None, + }); + /// A VLE-encoded 64bit float. Binary representation uses *IEEE 754-2008* *binary64*. + /// + /// Constant alias for string: `"zenoh/float64"`. + /// + /// Usually used for types: `f64`. + pub const ZENOH_FLOAT64: Encoding = Self(zenoh_protocol::core::Encoding { + id: 12, + schema: None, + }); /// A boolean. `0` is `false`, `1` is `true`. Other values are invalid. /// /// Constant alias for string: `"zenoh/bool"`. + /// + /// Usually used for types: `bool`. pub const ZENOH_BOOL: Encoding = Self(zenoh_protocol::core::Encoding { - id: 4, + id: 13, schema: None, }); /// A UTF-8 string. /// /// Constant alias for string: `"zenoh/string"`. + /// + /// Usually used for types: `String`, `&str`, `Cow`, `char`. pub const ZENOH_STRING: Encoding = Self(zenoh_protocol::core::Encoding { - id: 5, + id: 14, schema: None, }); /// A zenoh error. /// /// Constant alias for string: `"zenoh/error"`. + /// + /// Usually used for types: `ReplyError`. pub const ZENOH_ERROR: Encoding = Self(zenoh_protocol::core::Encoding { - id: 6, + id: 15, schema: None, }); @@ -137,63 +232,63 @@ impl Encoding { /// /// Constant alias for string: `"application/octet-stream"`. pub const APPLICATION_OCTET_STREAM: Encoding = Self(zenoh_protocol::core::Encoding { - id: 7, + id: 16, schema: None, }); /// A textual file. /// /// Constant alias for string: `"text/plain"`. pub const TEXT_PLAIN: Encoding = Self(zenoh_protocol::core::Encoding { - id: 8, + id: 17, schema: None, }); /// JSON data intended to be consumed by an application. /// /// Constant alias for string: `"application/json"`. pub const APPLICATION_JSON: Encoding = Self(zenoh_protocol::core::Encoding { - id: 9, + id: 18, schema: None, }); /// JSON data intended to be human readable. /// /// Constant alias for string: `"text/json"`. pub const TEXT_JSON: Encoding = Self(zenoh_protocol::core::Encoding { - id: 10, + id: 19, schema: None, }); /// A Common Data Representation (CDR)-encoded data. /// /// Constant alias for string: `"application/cdr"`. pub const APPLICATION_CDR: Encoding = Self(zenoh_protocol::core::Encoding { - id: 11, + id: 20, schema: None, }); /// A Concise Binary Object Representation (CBOR)-encoded data. /// /// Constant alias for string: `"application/cbor"`. pub const APPLICATION_CBOR: Encoding = Self(zenoh_protocol::core::Encoding { - id: 12, + id: 21, schema: None, }); /// YAML data intended to be consumed by an application. /// /// Constant alias for string: `"application/yaml"`. pub const APPLICATION_YAML: Encoding = Self(zenoh_protocol::core::Encoding { - id: 13, + id: 22, schema: None, }); /// YAML data intended to be human readable. /// /// Constant alias for string: `"text/yaml"`. pub const TEXT_YAML: Encoding = Self(zenoh_protocol::core::Encoding { - id: 14, + id: 23, schema: None, }); /// JSON5 encoded data that are human readable. /// /// Constant alias for string: `"text/json5"`. pub const TEXT_JSON5: Encoding = Self(zenoh_protocol::core::Encoding { - id: 15, + id: 24, schema: None, }); /// A Python object serialized using [pickle](https://docs.python.org/3/library/pickle.html). @@ -201,408 +296,426 @@ impl Encoding { /// Constant alias for string: `"application/python-serialized-object"`. pub const APPLICATION_PYTHON_SERIALIZED_OBJECT: Encoding = Self(zenoh_protocol::core::Encoding { - id: 16, + id: 25, schema: None, }); /// An application-specific protobuf-encoded data. /// /// Constant alias for string: `"application/protobuf"`. pub const APPLICATION_PROTOBUF: Encoding = Self(zenoh_protocol::core::Encoding { - id: 17, + id: 26, schema: None, }); /// A Java serialized object. /// /// Constant alias for string: `"application/java-serialized-object"`. pub const APPLICATION_JAVA_SERIALIZED_OBJECT: Encoding = Self(zenoh_protocol::core::Encoding { - id: 18, + id: 27, schema: None, }); /// An [openmetrics](https://github.com/OpenObservability/OpenMetrics) data, common used by [Prometheus](https://prometheus.io/). /// /// Constant alias for string: `"application/openmetrics-text"`. pub const APPLICATION_OPENMETRICS_TEXT: Encoding = Self(zenoh_protocol::core::Encoding { - id: 19, + id: 28, schema: None, }); /// A Portable Network Graphics (PNG) image. /// /// Constant alias for string: `"image/png"`. pub const IMAGE_PNG: Encoding = Self(zenoh_protocol::core::Encoding { - id: 20, + id: 29, schema: None, }); /// A Joint Photographic Experts Group (JPEG) image. /// /// Constant alias for string: `"image/jpeg"`. pub const IMAGE_JPEG: Encoding = Self(zenoh_protocol::core::Encoding { - id: 21, + id: 30, schema: None, }); /// A Graphics Interchange Format (GIF) image. /// /// Constant alias for string: `"image/gif"`. pub const IMAGE_GIF: Encoding = Self(zenoh_protocol::core::Encoding { - id: 22, + id: 31, schema: None, }); /// A BitMap (BMP) image. /// /// Constant alias for string: `"image/bmp"`. pub const IMAGE_BMP: Encoding = Self(zenoh_protocol::core::Encoding { - id: 23, + id: 32, schema: None, }); /// A Web Portable (WebP) image. /// /// Constant alias for string: `"image/webp"`. pub const IMAGE_WEBP: Encoding = Self(zenoh_protocol::core::Encoding { - id: 24, + id: 33, schema: None, }); /// An XML file intended to be consumed by an application.. /// /// Constant alias for string: `"application/xml"`. pub const APPLICATION_XML: Encoding = Self(zenoh_protocol::core::Encoding { - id: 25, + id: 34, schema: None, }); /// An encoded a list of tuples, each consisting of a name and a value. /// /// Constant alias for string: `"application/x-www-form-urlencoded"`. pub const APPLICATION_X_WWW_FORM_URLENCODED: Encoding = Self(zenoh_protocol::core::Encoding { - id: 26, + id: 35, schema: None, }); /// An HTML file. /// /// Constant alias for string: `"text/html"`. pub const TEXT_HTML: Encoding = Self(zenoh_protocol::core::Encoding { - id: 27, + id: 36, schema: None, }); /// An XML file that is human readable. /// /// Constant alias for string: `"text/xml"`. pub const TEXT_XML: Encoding = Self(zenoh_protocol::core::Encoding { - id: 28, + id: 37, schema: None, }); /// A CSS file. /// /// Constant alias for string: `"text/css"`. pub const TEXT_CSS: Encoding = Self(zenoh_protocol::core::Encoding { - id: 29, + id: 38, schema: None, }); /// A JavaScript file. /// /// Constant alias for string: `"text/javascript"`. pub const TEXT_JAVASCRIPT: Encoding = Self(zenoh_protocol::core::Encoding { - id: 30, + id: 39, schema: None, }); /// A MarkDown file. /// /// Constant alias for string: `"text/markdown"`. pub const TEXT_MARKDOWN: Encoding = Self(zenoh_protocol::core::Encoding { - id: 31, + id: 40, schema: None, }); /// A CSV file. /// /// Constant alias for string: `"text/csv"`. pub const TEXT_CSV: Encoding = Self(zenoh_protocol::core::Encoding { - id: 32, + id: 41, schema: None, }); /// An application-specific SQL query. /// /// Constant alias for string: `"application/sql"`. pub const APPLICATION_SQL: Encoding = Self(zenoh_protocol::core::Encoding { - id: 33, + id: 42, schema: None, }); /// Constrained Application Protocol (CoAP) data intended for CoAP-to-HTTP and HTTP-to-CoAP proxies. /// /// Constant alias for string: `"application/coap-payload"`. pub const APPLICATION_COAP_PAYLOAD: Encoding = Self(zenoh_protocol::core::Encoding { - id: 34, + id: 43, schema: None, }); /// Defines a JSON document structure for expressing a sequence of operations to apply to a JSON document. /// /// Constant alias for string: `"application/json-patch+json"`. pub const APPLICATION_JSON_PATCH_JSON: Encoding = Self(zenoh_protocol::core::Encoding { - id: 35, + id: 44, schema: None, }); /// A JSON text sequence consists of any number of JSON texts, all encoded in UTF-8. /// /// Constant alias for string: `"application/json-seq"`. pub const APPLICATION_JSON_SEQ: Encoding = Self(zenoh_protocol::core::Encoding { - id: 36, + id: 45, schema: None, }); /// A JSONPath defines a string syntax for selecting and extracting JSON values from within a given JSON value. /// /// Constant alias for string: `"application/jsonpath"`. pub const APPLICATION_JSONPATH: Encoding = Self(zenoh_protocol::core::Encoding { - id: 37, + id: 46, schema: None, }); /// A JSON Web Token (JWT). /// /// Constant alias for string: `"application/jwt"`. pub const APPLICATION_JWT: Encoding = Self(zenoh_protocol::core::Encoding { - id: 38, + id: 47, schema: None, }); /// An application-specific MPEG-4 encoded data, either audio or video. /// /// Constant alias for string: `"application/mp4"`. pub const APPLICATION_MP4: Encoding = Self(zenoh_protocol::core::Encoding { - id: 39, + id: 48, schema: None, }); /// A SOAP 1.2 message serialized as XML 1.0. /// /// Constant alias for string: `"application/soap+xml"`. pub const APPLICATION_SOAP_XML: Encoding = Self(zenoh_protocol::core::Encoding { - id: 40, + id: 49, schema: None, }); /// A YANG-encoded data commonly used by the Network Configuration Protocol (NETCONF). /// /// Constant alias for string: `"application/yang"`. pub const APPLICATION_YANG: Encoding = Self(zenoh_protocol::core::Encoding { - id: 41, + id: 50, schema: None, }); /// A MPEG-4 Advanced Audio Coding (AAC) media. /// /// Constant alias for string: `"audio/aac"`. pub const AUDIO_AAC: Encoding = Self(zenoh_protocol::core::Encoding { - id: 42, + id: 51, schema: None, }); /// A Free Lossless Audio Codec (FLAC) media. /// /// Constant alias for string: `"audio/flac"`. pub const AUDIO_FLAC: Encoding = Self(zenoh_protocol::core::Encoding { - id: 43, + id: 52, schema: None, }); /// An audio codec defined in MPEG-1, MPEG-2, MPEG-4, or registered at the MP4 registration authority. /// /// Constant alias for string: `"audio/mp4"`. pub const AUDIO_MP4: Encoding = Self(zenoh_protocol::core::Encoding { - id: 44, + id: 53, schema: None, }); /// An Ogg-encapsulated audio stream. /// /// Constant alias for string: `"audio/ogg"`. pub const AUDIO_OGG: Encoding = Self(zenoh_protocol::core::Encoding { - id: 45, + id: 54, schema: None, }); /// A Vorbis-encoded audio stream. /// /// Constant alias for string: `"audio/vorbis"`. pub const AUDIO_VORBIS: Encoding = Self(zenoh_protocol::core::Encoding { - id: 46, + id: 55, schema: None, }); /// A h261-encoded video stream. /// /// Constant alias for string: `"video/h261"`. pub const VIDEO_H261: Encoding = Self(zenoh_protocol::core::Encoding { - id: 47, + id: 56, schema: None, }); /// A h263-encoded video stream. /// /// Constant alias for string: `"video/h263"`. pub const VIDEO_H263: Encoding = Self(zenoh_protocol::core::Encoding { - id: 48, + id: 57, schema: None, }); /// A h264-encoded video stream. /// /// Constant alias for string: `"video/h264"`. pub const VIDEO_H264: Encoding = Self(zenoh_protocol::core::Encoding { - id: 49, + id: 58, schema: None, }); /// A h265-encoded video stream. /// /// Constant alias for string: `"video/h265"`. pub const VIDEO_H265: Encoding = Self(zenoh_protocol::core::Encoding { - id: 50, + id: 59, schema: None, }); /// A h266-encoded video stream. /// /// Constant alias for string: `"video/h266"`. pub const VIDEO_H266: Encoding = Self(zenoh_protocol::core::Encoding { - id: 51, + id: 60, schema: None, }); /// A video codec defined in MPEG-1, MPEG-2, MPEG-4, or registered at the MP4 registration authority. /// /// Constant alias for string: `"video/mp4"`. pub const VIDEO_MP4: Encoding = Self(zenoh_protocol::core::Encoding { - id: 52, + id: 61, schema: None, }); /// An Ogg-encapsulated video stream. /// /// Constant alias for string: `"video/ogg"`. pub const VIDEO_OGG: Encoding = Self(zenoh_protocol::core::Encoding { - id: 53, + id: 62, schema: None, }); /// An uncompressed, studio-quality video stream. /// /// Constant alias for string: `"video/raw"`. pub const VIDEO_RAW: Encoding = Self(zenoh_protocol::core::Encoding { - id: 54, + id: 63, schema: None, }); /// A VP8-encoded video stream. /// /// Constant alias for string: `"video/vp8"`. pub const VIDEO_VP8: Encoding = Self(zenoh_protocol::core::Encoding { - id: 55, + id: 64, schema: None, }); /// A VP9-encoded video stream. /// /// Constant alias for string: `"video/vp9"`. pub const VIDEO_VP9: Encoding = Self(zenoh_protocol::core::Encoding { - id: 56, + id: 65, schema: None, }); const ID_TO_STR: phf::Map = phf_map! { 0u16 => "zenoh/bytes", - 1u16 => "zenoh/int", - 2u16 => "zenoh/uint", - 3u16 => "zenoh/float", - 4u16 => "zenoh/bool", - 5u16 => "zenoh/string", - 6u16 => "zenoh/error", - 7u16 => "application/octet-stream", - 8u16 => "text/plain", - 9u16 => "application/json", - 10u16 => "text/json", - 11u16 => "application/cdr", - 12u16 => "application/cbor", - 13u16 => "application/yaml", - 14u16 => "text/yaml", - 15u16 => "text/json5", - 16u16 => "application/python-serialized-object", - 17u16 => "application/protobuf", - 18u16 => "application/java-serialized-object", - 19u16 => "application/openmetrics-text", - 20u16 => "image/png", - 21u16 => "image/jpeg", - 22u16 => "image/gif", - 23u16 => "image/bmp", - 24u16 => "image/webp", - 25u16 => "application/xml", - 26u16 => "application/x-www-form-urlencoded", - 27u16 => "text/html", - 28u16 => "text/xml", - 29u16 => "text/css", - 30u16 => "text/javascript", - 31u16 => "text/markdown", - 32u16 => "text/csv", - 33u16 => "application/sql", - 34u16 => "application/coap-payload", - 35u16 => "application/json-patch+json", - 36u16 => "application/json-seq", - 37u16 => "application/jsonpath", - 38u16 => "application/jwt", - 39u16 => "application/mp4", - 40u16 => "application/soap+xml", - 41u16 => "application/yang", - 42u16 => "audio/aac", - 43u16 => "audio/flac", - 44u16 => "audio/mp4", - 45u16 => "audio/ogg", - 46u16 => "audio/vorbis", - 47u16 => "video/h261", - 48u16 => "video/h263", - 49u16 => "video/h264", - 50u16 => "video/h265", - 51u16 => "video/h266", - 52u16 => "video/mp4", - 53u16 => "video/ogg", - 54u16 => "video/raw", - 55u16 => "video/vp8", - 56u16 => "video/vp9", + 1u16 => "zenoh/int8", + 2u16 => "zenoh/int16", + 3u16 => "zenoh/int32", + 4u16 => "zenoh/int64", + 5u16 => "zenoh/int128", + 6u16 => "zenoh/uint8", + 7u16 => "zenoh/uint16", + 8u16 => "zenoh/uint32", + 9u16 => "zenoh/uint64", + 10u16 => "zenoh/uint128", + 11u16 => "zenoh/float32", + 12u16 => "zenoh/float64", + 13u16 => "zenoh/bool", + 14u16 => "zenoh/string", + 15u16 => "zenoh/error", + 16u16 => "application/octet-stream", + 17u16 => "text/plain", + 18u16 => "application/json", + 19u16 => "text/json", + 20u16 => "application/cdr", + 21u16 => "application/cbor", + 22u16 => "application/yaml", + 23u16 => "text/yaml", + 24u16 => "text/json5", + 25u16 => "application/python-serialized-object", + 26u16 => "application/protobuf", + 27u16 => "application/java-serialized-object", + 28u16 => "application/openmetrics-text", + 29u16 => "image/png", + 30u16 => "image/jpeg", + 31u16 => "image/gif", + 32u16 => "image/bmp", + 33u16 => "image/webp", + 34u16 => "application/xml", + 35u16 => "application/x-www-form-urlencoded", + 36u16 => "text/html", + 37u16 => "text/xml", + 38u16 => "text/css", + 39u16 => "text/javascript", + 40u16 => "text/markdown", + 41u16 => "text/csv", + 42u16 => "application/sql", + 43u16 => "application/coap-payload", + 44u16 => "application/json-patch+json", + 45u16 => "application/json-seq", + 46u16 => "application/jsonpath", + 47u16 => "application/jwt", + 48u16 => "application/mp4", + 49u16 => "application/soap+xml", + 50u16 => "application/yang", + 51u16 => "audio/aac", + 52u16 => "audio/flac", + 53u16 => "audio/mp4", + 54u16 => "audio/ogg", + 55u16 => "audio/vorbis", + 56u16 => "video/h261", + 57u16 => "video/h263", + 58u16 => "video/h264", + 59u16 => "video/h265", + 60u16 => "video/h266", + 61u16 => "video/mp4", + 62u16 => "video/ogg", + 63u16 => "video/raw", + 64u16 => "video/vp8", + 65u16 => "video/vp9", }; const STR_TO_ID: phf::Map<&'static str, EncodingId> = phf_map! { "zenoh/bytes" => 0u16, - "zenoh/int" => 1u16, - "zenoh/uint" => 2u16, - "zenoh/float" => 3u16, - "zenoh/bool" => 4u16, - "zenoh/string" => 5u16, - "zenoh/error" => 6u16, - "application/octet-stream" => 7u16, - "text/plain" => 8u16, - "application/json" => 9u16, - "text/json" => 10u16, - "application/cdr" => 11u16, - "application/cbor" => 12u16, - "application/yaml" => 13u16, - "text/yaml" => 14u16, - "text/json5" => 15u16, - "application/python-serialized-object" => 16u16, - "application/protobuf" => 17u16, - "application/java-serialized-object" => 18u16, - "application/openmetrics-text" => 19u16, - "image/png" => 20u16, - "image/jpeg" => 21u16, - "image/gif" => 22u16, - "image/bmp" => 23u16, - "image/webp" => 24u16, - "application/xml" => 25u16, - "application/x-www-form-urlencoded" => 26u16, - "text/html" => 27u16, - "text/xml" => 28u16, - "text/css" => 29u16, - "text/javascript" => 30u16, - "text/markdown" => 31u16, - "text/csv" => 32u16, - "application/sql" => 33u16, - "application/coap-payload" => 34u16, - "application/json-patch+json" => 35u16, - "application/json-seq" => 36u16, - "application/jsonpath" => 37u16, - "application/jwt" => 38u16, - "application/mp4" => 39u16, - "application/soap+xml" => 40u16, - "application/yang" => 41u16, - "audio/aac" => 42u16, - "audio/flac" => 43u16, - "audio/mp4" => 44u16, - "audio/ogg" => 45u16, - "audio/vorbis" => 46u16, - "video/h261" => 47u16, - "video/h263" => 48u16, - "video/h264" => 49u16, - "video/h265" => 50u16, - "video/h266" => 51u16, - "video/mp4" => 52u16, - "video/ogg" => 53u16, - "video/raw" => 54u16, - "video/vp8" => 55u16, - "video/vp9" => 56u16, + "zenoh/int8" => 1u16, + "zenoh/int16" => 2u16, + "zenoh/int32" => 3u16, + "zenoh/int64" => 4u16, + "zenoh/int128" => 5u16, + "zenoh/uint8" => 6u16, + "zenoh/uint16" => 7u16, + "zenoh/uint32" => 8u16, + "zenoh/uint64" => 9u16, + "zenoh/uint128" => 10u16, + "zenoh/float32" => 11u16, + "zenoh/float64" => 12u16, + "zenoh/bool" => 13u16, + "zenoh/string" => 14u16, + "zenoh/error" => 15u16, + "application/octet-stream" => 16u16, + "text/plain" => 17u16, + "application/json" => 18u16, + "text/json" => 19u16, + "application/cdr" => 20u16, + "application/cbor" => 21u16, + "application/yaml" => 22u16, + "text/yaml" => 23u16, + "text/json5" => 24u16, + "application/python-serialized-object" => 25u16, + "application/protobuf" => 26u16, + "application/java-serialized-object" => 27u16, + "application/openmetrics-text" => 28u16, + "image/png" => 29u16, + "image/jpeg" => 30u16, + "image/gif" => 31u16, + "image/bmp" => 32u16, + "image/webp" => 33u16, + "application/xml" => 34u16, + "application/x-www-form-urlencoded" => 35u16, + "text/html" => 36u16, + "text/xml" => 37u16, + "text/css" => 38u16, + "text/javascript" => 39u16, + "text/markdown" => 40u16, + "text/csv" => 41u16, + "application/sql" => 42u16, + "application/coap-payload" => 43u16, + "application/json-patch+json" => 44u16, + "application/json-seq" => 45u16, + "application/jsonpath" => 46u16, + "application/jwt" => 47u16, + "application/mp4" => 48u16, + "application/soap+xml" => 49u16, + "application/yang" => 50u16, + "audio/aac" => 51u16, + "audio/flac" => 52u16, + "audio/mp4" => 53u16, + "audio/ogg" => 54u16, + "audio/vorbis" => 55u16, + "video/h261" => 56u16, + "video/h263" => 57u16, + "video/h264" => 58u16, + "video/h265" => 59u16, + "video/h266" => 60u16, + "video/mp4" => 61u16, + "video/ogg" => 62u16, + "video/raw" => 63u16, + "video/vp8" => 64u16, + "video/vp9" => 65u16, }; /// The default [`Encoding`] is [`ZENOH_BYTES`](Encoding::ZENOH_BYTES). @@ -768,53 +881,79 @@ impl EncodingMapping for Cow<'_, str> { // Zenoh unsigned integers impl EncodingMapping for u8 { - const ENCODING: Encoding = Encoding::ZENOH_UINT; + const ENCODING: Encoding = Encoding::ZENOH_UINT8; } impl EncodingMapping for u16 { - const ENCODING: Encoding = Encoding::ZENOH_UINT; + const ENCODING: Encoding = Encoding::ZENOH_UINT16; } impl EncodingMapping for u32 { - const ENCODING: Encoding = Encoding::ZENOH_UINT; + const ENCODING: Encoding = Encoding::ZENOH_UINT32; } impl EncodingMapping for u64 { - const ENCODING: Encoding = Encoding::ZENOH_UINT; + const ENCODING: Encoding = Encoding::ZENOH_UINT64; +} + +impl EncodingMapping for u128 { + const ENCODING: Encoding = Encoding::ZENOH_UINT128; } impl EncodingMapping for usize { - const ENCODING: Encoding = Encoding::ZENOH_UINT; + #[cfg(target_pointer_width = "8")] + const ENCODING: Encoding = Encoding::ZENOH_UINT8; + #[cfg(target_pointer_width = "16")] + const ENCODING: Encoding = Encoding::ZENOH_UINT16; + #[cfg(target_pointer_width = "32")] + const ENCODING: Encoding = Encoding::ZENOH_UINT32; + #[cfg(target_pointer_width = "64")] + const ENCODING: Encoding = Encoding::ZENOH_UINT64; + #[cfg(target_pointer_width = "128")] + const ENCODING: Encoding = Encoding::ZENOH_UINT128; } // Zenoh signed integers impl EncodingMapping for i8 { - const ENCODING: Encoding = Encoding::ZENOH_INT; + const ENCODING: Encoding = Encoding::ZENOH_INT8; } impl EncodingMapping for i16 { - const ENCODING: Encoding = Encoding::ZENOH_INT; + const ENCODING: Encoding = Encoding::ZENOH_INT16; } impl EncodingMapping for i32 { - const ENCODING: Encoding = Encoding::ZENOH_INT; + const ENCODING: Encoding = Encoding::ZENOH_INT32; } impl EncodingMapping for i64 { - const ENCODING: Encoding = Encoding::ZENOH_INT; + const ENCODING: Encoding = Encoding::ZENOH_INT64; +} + +impl EncodingMapping for i128 { + const ENCODING: Encoding = Encoding::ZENOH_INT128; } impl EncodingMapping for isize { - const ENCODING: Encoding = Encoding::ZENOH_INT; + #[cfg(target_pointer_width = "8")] + const ENCODING: Encoding = Encoding::ZENOH_INT8; + #[cfg(target_pointer_width = "16")] + const ENCODING: Encoding = Encoding::ZENOH_INT16; + #[cfg(target_pointer_width = "32")] + const ENCODING: Encoding = Encoding::ZENOH_INT32; + #[cfg(target_pointer_width = "64")] + const ENCODING: Encoding = Encoding::ZENOH_INT64; + #[cfg(target_pointer_width = "128")] + const ENCODING: Encoding = Encoding::ZENOH_INT128; } // Zenoh floats impl EncodingMapping for f32 { - const ENCODING: Encoding = Encoding::ZENOH_FLOAT; + const ENCODING: Encoding = Encoding::ZENOH_FLOAT32; } impl EncodingMapping for f64 { - const ENCODING: Encoding = Encoding::ZENOH_FLOAT; + const ENCODING: Encoding = Encoding::ZENOH_FLOAT64; } // Zenoh bool @@ -841,7 +980,7 @@ impl EncodingMapping for serde_pickle::Value { impl Encoding { #[zenoh_macros::internal] - pub fn id(&self) -> u16 { + pub fn id(&self) -> EncodingId { self.0.id } #[zenoh_macros::internal] @@ -849,7 +988,7 @@ impl Encoding { self.0.schema.as_ref() } #[zenoh_macros::internal] - pub fn new(id: u16, schema: Option) -> Self { + pub fn new(id: EncodingId, schema: Option) -> Self { Encoding(zenoh_protocol::core::Encoding { id, schema }) } } From 8f94b089aeb98570547c93cf92bcf2be794d802f Mon Sep 17 00:00:00 2001 From: Gabriele Baldoni Date: Tue, 25 Jun 2024 08:52:35 +0000 Subject: [PATCH 492/598] fix: adminspace encodings (#1182) * fix(adminspace-encoding): explicit set of encoding for adminspace replies Signed-off-by: Gabriele Baldoni * fix: using TEXT_PLAIN with simple strings Signed-off-by: Gabriele Baldoni * fix: better matching of encodings in REST plugin Signed-off-by: Gabriele Baldoni * chore: addressing comments Signed-off-by: Gabriele Baldoni --------- Signed-off-by: Gabriele Baldoni --- plugins/zenoh-plugin-rest/src/lib.rs | 11 ++++++++++- zenoh/src/net/runtime/adminspace.rs | 21 +++++++++++++++------ 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 107f241a87..a35025e26e 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -77,10 +77,19 @@ fn payload_to_json(payload: &ZBytes, encoding: &Encoding) -> serde_json::Value { &Encoding::APPLICATION_JSON | &Encoding::TEXT_JSON | &Encoding::TEXT_JSON5 => { payload .deserialize::() - .unwrap_or_else(|_| { + .unwrap_or_else(|e| { + tracing::warn!("Encoding is JSON but data is not JSON, converting to base64, Error: {e:?}"); serde_json::Value::String(base64_encode(&Cow::from(payload))) }) } + &Encoding::TEXT_PLAIN | &Encoding::ZENOH_STRING => serde_json::Value::String( + payload + .deserialize::() + .unwrap_or_else(|e| { + tracing::warn!("Encoding is String but data is not String, converting to base64, Error: {e:?}"); + base64_encode(&Cow::from(payload)) + }), + ), // otherwise convert to JSON string _ => serde_json::Value::String(base64_encode(&Cow::from(payload))), } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 1e5c7e499e..eb010f9037 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -664,7 +664,11 @@ zenoh_build{{version="{}"}} 1 .openmetrics_text(), ); - if let Err(e) = query.reply(reply_key, metrics).wait() { + if let Err(e) = query + .reply(reply_key, metrics) + .encoding(Encoding::TEXT_PLAIN) + .wait() + { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -681,6 +685,7 @@ fn routers_linkstate_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(reply_key, tables.hat_code.info(&tables, WhatAmI::Router)) + .encoding(Encoding::TEXT_PLAIN) .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); @@ -699,6 +704,7 @@ fn peers_linkstate_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(reply_key, tables.hat_code.info(&tables, WhatAmI::Peer)) + .encoding(Encoding::TEXT_PLAIN) .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); @@ -770,7 +776,11 @@ fn plugins_data(context: &AdminContext, query: Query) { let status = serde_json::to_value(status).unwrap(); match ZBytes::try_from(status) { Ok(zbuf) => { - if let Err(e) = query.reply(key, zbuf).wait() { + if let Err(e) = query + .reply(key, zbuf) + .encoding(Encoding::APPLICATION_JSON) + .wait() + { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -782,8 +792,6 @@ fn plugins_data(context: &AdminContext, query: Query) { #[cfg(feature = "plugins")] fn plugins_status(context: &AdminContext, query: Query) { - use crate::bytes::{Serialize, ZSerde}; - let key_expr = query.key_expr(); let guard = context.runtime.plugins_manager(); let mut root_key = format!( @@ -798,7 +806,8 @@ fn plugins_status(context: &AdminContext, query: Query) { if let Ok(key_expr) = KeyExpr::try_from(plugin_path_key.clone()) { if query.key_expr().intersects(&key_expr) { if let Err(e) = query - .reply(key_expr, ZSerde.serialize(plugin.path())) + .reply(key_expr, plugin.path()) + .encoding(Encoding::TEXT_PLAIN) .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); @@ -824,7 +833,7 @@ fn plugins_status(context: &AdminContext, query: Query) { if let Ok(key_expr) = KeyExpr::try_from(response.key) { match ZBytes::try_from(response.value) { Ok(zbuf) => { - if let Err(e) = query.reply(key_expr, zbuf).wait() { + if let Err(e) = query.reply(key_expr, zbuf).encoding(Encoding::APPLICATION_JSON).wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } }, From c4d887767feadddf514d1f5a8223594c3e178f01 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Tue, 25 Jun 2024 12:08:07 +0200 Subject: [PATCH 493/598] fix: fix zenoh-shm test requiring "test" feature to compile (#1183) --- commons/zenoh-shm/tests/common/mod.rs | 1 + commons/zenoh-shm/tests/header.rs | 2 +- commons/zenoh-shm/tests/periodic_task.rs | 2 +- commons/zenoh-shm/tests/posix_segment.rs | 2 +- commons/zenoh-shm/tests/watchdog.rs | 2 +- 5 files changed, 5 insertions(+), 4 deletions(-) diff --git a/commons/zenoh-shm/tests/common/mod.rs b/commons/zenoh-shm/tests/common/mod.rs index 23f55d8c2a..e1d4222b0e 100644 --- a/commons/zenoh-shm/tests/common/mod.rs +++ b/commons/zenoh-shm/tests/common/mod.rs @@ -84,6 +84,7 @@ impl CpuLoad { Self::new(1000) } + #[cfg(feature = "test")] pub fn optimal_high() -> Self { Self::new(num_cpus::get()) } diff --git a/commons/zenoh-shm/tests/header.rs b/commons/zenoh-shm/tests/header.rs index 1feb06dba9..f417f0b86e 100644 --- a/commons/zenoh-shm/tests/header.rs +++ b/commons/zenoh-shm/tests/header.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // - +#![cfg(feature = "test")] use std::sync::atomic::Ordering::Relaxed; use rand::Rng; diff --git a/commons/zenoh-shm/tests/periodic_task.rs b/commons/zenoh-shm/tests/periodic_task.rs index 701fe742ca..7465c9bcd3 100644 --- a/commons/zenoh-shm/tests/periodic_task.rs +++ b/commons/zenoh-shm/tests/periodic_task.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // - +#![cfg(feature = "test")] use std::{ sync::{Arc, Mutex}, time::{Duration, Instant}, diff --git a/commons/zenoh-shm/tests/posix_segment.rs b/commons/zenoh-shm/tests/posix_segment.rs index 907f70cc4e..094ae40a85 100644 --- a/commons/zenoh-shm/tests/posix_segment.rs +++ b/commons/zenoh-shm/tests/posix_segment.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // - +#![cfg(feature = "test")] use std::{fmt::Display, slice}; use zenoh_shm::posix_shm::segment::Segment; diff --git a/commons/zenoh-shm/tests/watchdog.rs b/commons/zenoh-shm/tests/watchdog.rs index 7f55e4a92d..fbdf672f87 100644 --- a/commons/zenoh-shm/tests/watchdog.rs +++ b/commons/zenoh-shm/tests/watchdog.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // - +#![cfg(feature = "test")] use std::{ sync::{atomic::AtomicBool, Arc}, time::Duration, From de0a7c374bb96dd00cdb850ca38d0b6980f79ec0 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 25 Jun 2024 14:39:36 +0200 Subject: [PATCH 494/598] Update rust version in README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index af08db7260..9a1a0dd35f 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ Then you can start run `zenohd`. > [!WARNING] > Zenoh and its ecosystem are under active development. When you build from git, make sure you also build from git any other Zenoh repository you plan to use (e.g. binding, plugin, backend, etc.). It may happen that some changes in git are not compatible with the most recent packaged Zenoh release (e.g. deb, docker, pip). We put particular effort in maintaining compatibility between the various git repositories in the Zenoh project. -Install [Cargo and Rust](https://doc.rust-lang.org/cargo/getting-started/installation.html). Zenoh can be successfully compiled with Rust stable (>= 1.71.0), so no special configuration is required from your side. If you already have the Rust toolchain installed, make sure it is up-to-date with: +Install [Cargo and Rust](https://doc.rust-lang.org/cargo/getting-started/installation.html). Zenoh can be successfully compiled with Rust stable (>= 1.75.0), so no special configuration is required from your side. If you already have the Rust toolchain installed, make sure it is up-to-date with: ```bash $ rustup update From 2b16fed854bbcbcfcbdef58ba704c9fa6585d3cc Mon Sep 17 00:00:00 2001 From: "ChenYing Kuo (CY)" Date: Tue, 25 Jun 2024 20:40:52 +0800 Subject: [PATCH 495/598] Fix README format (#1186) * Fix the README format. Signed-off-by: ChenYing Kuo * Fix some out-dated contents. Signed-off-by: ChenYing Kuo --------- Signed-off-by: ChenYing Kuo --- README.md | 174 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 91 insertions(+), 83 deletions(-) diff --git a/README.md b/README.md index 9a1a0dd35f..9a6216d3ef 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,7 @@ [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) # Eclipse Zenoh + The Eclipse Zenoh: Zero Overhead Pub/sub, Store/Query and Compute. Zenoh (pronounce _/zeno/_) unifies data in motion, data at rest and computations. It carefully blends traditional pub/sub with geo-distributed storages, queries and computations, while retaining a level of time and space efficiency that is well beyond any of the mainstream stacks. @@ -15,21 +16,22 @@ Zenoh (pronounce _/zeno/_) unifies data in motion, data at rest and computations Check the website [zenoh.io](http://zenoh.io) and the [roadmap](https://github.com/eclipse-zenoh/roadmap) for more detailed information. ------------------------------- + ## Getting Started Zenoh is extremely easy to learn, the best place to master the fundamentals is our [getting started guide](https://zenoh.io/docs/getting-started/first-app/). ------------------------------- + ## How to install it To install the latest release of the Zenoh router (`zenohd`) and its default plugins (REST API plugin and Storages Manager plugin) you can do as follows: ### Manual installation (all platforms) -All release packages can be downloaded from: - - https://download.eclipse.org/zenoh/zenoh/latest/ +All release packages can be downloaded from [https://download.eclipse.org/zenoh/zenoh/latest/](https://download.eclipse.org/zenoh/zenoh/latest/). -Each subdirectory has the name of the Rust target. See the platforms each target corresponds to on https://doc.rust-lang.org/stable/rustc/platform-support.html +Each subdirectory has the name of the Rust target. See the platforms each target corresponds to on [https://doc.rust-lang.org/stable/rustc/platform-support.html](https://doc.rust-lang.org/stable/rustc/platform-support.html). Choose your platform and download the `.zip` file. Unzip it where you want, and run the extracted `zenohd` binary. @@ -43,6 +45,7 @@ echo "deb [trusted=yes] https://download.eclipse.org/zenoh/debian-repo/ /" | sud sudo apt update sudo apt install zenoh ``` + Then you can start run `zenohd`. ### MacOS @@ -53,12 +56,17 @@ Tap our brew package repository and install the `zenoh` formula: brew tap eclipse-zenoh/homebrew-zenoh brew install zenoh ``` + Then you can start run `zenohd`. +------------------------------- + +## Rust API -### Rust API +* [Docs.rs for Zenoh](https://docs.rs/zenoh/latest/zenoh/) ------------------------------- + ## How to build it > [!WARNING] @@ -67,72 +75,72 @@ Then you can start run `zenohd`. Install [Cargo and Rust](https://doc.rust-lang.org/cargo/getting-started/installation.html). Zenoh can be successfully compiled with Rust stable (>= 1.75.0), so no special configuration is required from your side. If you already have the Rust toolchain installed, make sure it is up-to-date with: ```bash -$ rustup update +rustup update ``` To build Zenoh, just type the following command after having followed the previous instructions: ```bash -$ cargo build --release --all-targets +cargo build --release --all-targets ``` Zenoh's router is built as `target/release/zenohd`. All the examples are built into the `target/release/examples` directory. They can all work in peer-to-peer, or interconnected via the zenoh router. ------------------------------- -## Quick tests of your build: -**Peer-to-peer tests:** +## Quick tests of your build - - **pub/sub** - - run: `./target/release/examples/z_sub` - - in another shell run: `./target/release/examples/z_put` - - the subscriber should receive the publication. +### Peer-to-peer tests - - **get/queryable** - - run: `./target/release/examples/z_queryable` - - in another shell run: `./target/release/examples/z_get` - - the queryable should display the log in its listener, and the get should receive the queryable result. +* **pub/sub** + * run: `./target/release/examples/z_sub` + * in another shell run: `./target/release/examples/z_put` + * the subscriber should receive the publication. -**Routed tests:** +* **get/queryable** + * run: `./target/release/examples/z_queryable` + * in another shell run: `./target/release/examples/z_get` + * the queryable should display the log in its listener, and the get should receive the queryable result. + +### Routed tests > [!NOTE] > **Windows users**: to properly execute the commands below in PowerShell you need to escape `"` characters as `\"`. - - **put/store/get** - - run the Zenoh router with a memory storage: - `./target/release/zenohd --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` - - in another shell run: `./target/release/examples/z_put` - - then run `./target/release/examples/z_get` - - the get should receive the stored publication. - - - **REST API using `curl` tool** - - run the Zenoh router with a memory storage: - `./target/release/zenohd --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` - - in another shell, do a publication via the REST API: - `curl -X PUT -d '"Hello World!"' http://localhost:8000/demo/example/test` - - get it back via the REST API: - `curl http://localhost:8000/demo/example/test` - - - **router admin space via the REST API** - - run the Zenoh router with permission to perform config changes via the admin space, and with a memory storage: - `./target/release/zenohd --adminspace-permissions=rw --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` - - in another shell, get info of the zenoh router via the zenoh admin space: - `curl http://localhost:8000/@/router/local` - - get the volumes of the router (only memory by default): - `curl 'http://localhost:8000/@/router/local/**/volumes/*'` - - get the storages of the local router (the memory storage configured at startup on '/demo/example/**' should be present): - `curl 'http://localhost:8000/@/router/local/**/storages/*'` - - add another memory storage on `/demo/mystore/**`: - `curl -X PUT -H 'content-type:application/json' -d '{"key_expr":"demo/mystore/**","volume":"memory"}' http://localhost:8000/@/router/local/config/plugins/storage_manager/storages/mystore` - - check it has been created: - `curl 'http://localhost:8000/@/router/local/**/storages/*'` - -**Configuration options:** +* **put/store/get** + * run the Zenoh router with a memory storage: + `./target/release/zenohd --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` + * in another shell run: `./target/release/examples/z_put` + * then run `./target/release/examples/z_get` + * the get should receive the stored publication. + +* **REST API using `curl` tool** + * run the Zenoh router with a memory storage: + `./target/release/zenohd --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` + * in another shell, do a publication via the REST API: + `curl -X PUT -d '"Hello World!"' http://localhost:8000/demo/example/test` + * get it back via the REST API: + `curl http://localhost:8000/demo/example/test` + +* **router admin space via the REST API** + * run the Zenoh router with permission to perform config changes via the admin space, and with a memory storage: + `./target/release/zenohd --adminspace-permissions=rw --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` + * in another shell, get info of the zenoh router via the zenoh admin space: + `curl http://localhost:8000/@/router/local` + * get the volumes of the router (only memory by default): + `curl 'http://localhost:8000/@/router/local/**/volumes/*'` + * get the storages of the local router (the memory storage configured at startup on '/demo/example/**' should be present): + `curl 'http://localhost:8000/@/router/local/**/storages/*'` + * add another memory storage on `/demo/mystore/**`: + `curl -X PUT -H 'content-type:application/json' -d '{"key_expr":"demo/mystore/**","volume":"memory"}' http://localhost:8000/@/router/local/config/plugins/storage_manager/storages/mystore` + * check it has been created: + `curl 'http://localhost:8000/@/router/local/**/storages/*'` + +### Configuration options A Zenoh configuration file can be provided via CLI to all Zenoh examples and the Zenoh router. - * `-c, --config `: a [JSON5](https://json5.org) configuration file. [DEFAULT_CONFIG.json5](DEFAULT_CONFIG.json5) shows the schema of this file and the available options. - +* `-c, --config `: a [JSON5](https://json5.org) configuration file. [DEFAULT_CONFIG.json5](DEFAULT_CONFIG.json5) shows the schema of this file and the available options. See other examples of Zenoh usage in [examples/](examples) @@ -140,45 +148,44 @@ See other examples of Zenoh usage in [examples/](examples) > **Zenoh Runtime Configuration**: Starting from version 0.11.0-rc, Zenoh allows for configuring the number of worker threads and other advanced options of the runtime. For guidance on utilizing it, please refer to the [doc](https://docs.rs/zenoh-runtime/latest/zenoh_runtime/enum.ZRuntime.html). ------------------------------- + ## Zenoh router command line arguments -`zenohd` accepts the following arguments: - * `--adminspace-permissions <[r|w|rw|none]>`: Configure the read and/or write permissions on the admin space. Default is read only. - * `-c, --config `: a [JSON5](https://json5.org) configuration file. [DEFAULT_CONFIG.json5](DEFAULT_CONFIG.json5) shows the schema of this file. All properties of this configuration are optional, so you may not need such a large configuration for your use-case. - * `--cfg :`: allows you to change specific parts of the configuration right after it has been constructed. VALUE must be a valid JSON5 value, and key must be a path through the configuration file, where each element is separated by a `/`. When inserting in parts of the config that are arrays, you may use indexes, or may use `+` to indicate that you want to append your value to the array. `--cfg` passed values will always override any previously existing value for their key in the configuration. - * `-l, --listen ...`: An endpoint on which this router will listen for incoming sessions. - Repeat this option to open several listeners. By default, `tcp/[::]:7447` is used. The following endpoints are currently supported: - - TCP: `tcp/:` - - UDP: `udp/:` - - [TCP+TLS](https://zenoh.io/docs/manual/tls/): `tls/:` - - [QUIC](https://zenoh.io/docs/manual/quic/): `quic/:` - * `-e, --connect ...`: An endpoint this router will try to connect to. Repeat this option to connect to several peers or routers. - * `--no-multicast-scouting`: By default zenohd replies to multicast scouting messages for being discovered by peers and clients. - This option disables this feature. - * `-i, --id `: The identifier (as an hexadecimal string - e.g.: A0B23...) that zenohd must use. - **WARNING**: this identifier must be unique in the system! If not set, a random unsigned 128bit integer will be used. - * `--no-timestamp`: By default zenohd adds a HLC-generated Timestamp to each routed Data if there isn't already one. - This option disables this feature. - * `-P, --plugin [ | :]...`: A [plugin](https://zenoh.io/docs/manual/plugins/) that must be loaded. Accepted values: - - a plugin name; zenohd will search for a library named `libzenoh_plugin_.so` on Unix, `libzenoh_plugin_.dylib` on MacOS or `zenoh_plugin_.dll` on Windows. - - `":"`; the plugin will be loaded from library file at ``. - - Repeat this option to load several plugins. - * `--plugin-search-dir ...`: A directory where to search for [plugins](https://zenoh.io/docs/manual/plugins/) libraries to load. - Repeat this option to specify several search directories'. By default, the plugins libraries will be searched in: - `'/usr/local/lib:/usr/lib:~/.zenoh/lib:.'` - * `--rest-http-port `: Configures the [REST plugin](https://zenoh.io/docs/manual/plugin-http/)'s HTTP port. Accepted values: - - a port number - - a string with format `:` (to bind the HTTP server to a specific interface) - - `"None"` to deactivate the REST plugin - - If not specified, the REST plugin will be active on any interface (`[::]`) and port `8000`. +`zenohd` accepts the following arguments: -> [!WARNING] -> The following documentation pertains to the v0.6+ API, which comes many changes to the behaviour and configuration of Zenoh. -To access the v0.5 version of the code and matching README, please go to the [0.5.0-beta.9](https://github.com/eclipse-zenoh/zenoh/tree/0.5.0-beta.9) tagged version. +* `--adminspace-permissions <[r|w|rw|none]>`: Configure the read and/or write permissions on the admin space. Default is read only. +* `-c, --config `: a [JSON5](https://json5.org) configuration file. [DEFAULT_CONFIG.json5](DEFAULT_CONFIG.json5) shows the schema of this file. All properties of this configuration are optional, so you may not need such a large configuration for your use-case. +* `--cfg :`: allows you to change specific parts of the configuration right after it has been constructed. VALUE must be a valid JSON5 value, and key must be a path through the configuration file, where each element is separated by a `/`. When inserting in parts of the config that are arrays, you may use indexes, or may use `+` to indicate that you want to append your value to the array. `--cfg` passed values will always override any previously existing value for their key in the configuration. +* `-l, --listen ...`: An endpoint on which this router will listen for incoming sessions. + Repeat this option to open several listeners. By default, `tcp/[::]:7447` is used. The following endpoints are currently supported: + * TCP: `tcp/:` + * UDP: `udp/:` + * [TCP+TLS](https://zenoh.io/docs/manual/tls/): `tls/:` + * [QUIC](https://zenoh.io/docs/manual/quic/): `quic/:` +* `-e, --connect ...`: An endpoint this router will try to connect to. Repeat this option to connect to several peers or routers. +* `--no-multicast-scouting`: By default zenohd replies to multicast scouting messages for being discovered by peers and clients. + This option disables this feature. +* `-i, --id `: The identifier (as an hexadecimal string - e.g.: A0B23...) that zenohd must use. + **WARNING**: this identifier must be unique in the system! If not set, a random unsigned 128bit integer will be used. +* `--no-timestamp`: By default zenohd adds a HLC-generated Timestamp to each routed Data if there isn't already one. + This option disables this feature. +* `-P, --plugin [ | :]...`: A [plugin](https://zenoh.io/docs/manual/plugins/) that must be loaded. Accepted values: + * a plugin name; zenohd will search for a library named `libzenoh_plugin_.so` on Unix, `libzenoh_plugin_.dylib` on MacOS or `zenoh_plugin_.dll` on Windows. + * `":"`; the plugin will be loaded from library file at ``. + + Repeat this option to load several plugins. +* `--plugin-search-dir ...`: A directory where to search for [plugins](https://zenoh.io/docs/manual/plugins/) libraries to load. + Repeat this option to specify several search directories'. By default, the plugins libraries will be searched in: + `'/usr/local/lib:/usr/lib:~/.zenoh/lib:.'` +* `--rest-http-port `: Configures the [REST plugin](https://zenoh.io/docs/manual/plugin-http/)'s HTTP port. Accepted values: + * a port number + * a string with format `:` (to bind the HTTP server to a specific interface) + * `"None"` to deactivate the REST plugin + + If not specified, the REST plugin will be active on any interface (`[::]`) and port `8000`. ------------------------------- + ## Plugins > [!WARNING] @@ -200,6 +207,7 @@ This plugin converts GET and PUT REST requests into Zenoh gets and puts respecti This plugin allows you to easily define storages. These will store key-value pairs they subscribed to, and send the most recent ones when queried. Check out [DEFAULT_CONFIG.json5](DEFAULT_CONFIG.json5) for info on how to configure them. ------------------------------- + ## Troubleshooting In case of troubles, please first check on [this page](https://zenoh.io/docs/getting-started/troubleshooting/) if the trouble and cause are already known. From e1beef1b5f360be479aeaf17713fc01f83d8383e Mon Sep 17 00:00:00 2001 From: C Schleich Date: Tue, 25 Jun 2024 15:07:47 +0000 Subject: [PATCH 496/598] make HatTokenTrait pub(crate) (#1189) --- zenoh/src/net/routing/hat/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 649e41f4c5..17c71d4162 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -256,7 +256,7 @@ pub(crate) fn new_hat(whatami: WhatAmI, config: &Config) -> Box Date: Wed, 26 Jun 2024 10:06:23 +0200 Subject: [PATCH 497/598] Implement Error trait for ZBytes error types (#1192) * Implement Error trait for ZBytes error types * Improve error display message --- zenoh/src/api/bytes.rs | 153 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 60254ff321..b1d1ff079f 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -345,6 +345,28 @@ where Deserialize(>::Error), } +impl std::fmt::Display for ZReadOrDeserializeError +where + T: Debug, + T: TryFrom, + >::Error: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ZReadOrDeserializeError::Read(_) => f.write_str("Read error"), + ZReadOrDeserializeError::Deserialize(e) => f.write_fmt(format_args!("{:?}", e)), + } + } +} + +impl std::error::Error for ZReadOrDeserializeError +where + T: Debug, + T: TryFrom, + >::Error: Debug, +{ +} + impl ZBytesReader<'_> { /// Returns the number of bytes that can still be read pub fn remaining(&self) -> usize { @@ -480,6 +502,14 @@ pub struct ZSerde; #[derive(Debug, Clone, Copy)] pub struct ZDeserializeError; +impl std::fmt::Display for ZDeserializeError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("Deserialize error") + } +} + +impl std::error::Error for ZDeserializeError {} + // ZBytes impl Serialize for ZSerde { type Output = ZBytes; @@ -2255,6 +2285,38 @@ where Two(ZReadOrDeserializeError), } +impl std::fmt::Display for ZReadOrDeserializeErrorTuple2 +where + A: Debug, + A: TryFrom, + >::Error: Debug, + B: Debug, + B: TryFrom, + >::Error: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ZReadOrDeserializeErrorTuple2::One(e) => { + f.write_fmt(format_args!("1st tuple element: {}", e)) + } + ZReadOrDeserializeErrorTuple2::Two(e) => { + f.write_fmt(format_args!("2nd tuple element: {}", e)) + } + } + } +} + +impl std::error::Error for ZReadOrDeserializeErrorTuple2 +where + A: Debug, + A: TryFrom, + >::Error: Debug, + B: Debug, + B: TryFrom, + >::Error: Debug, +{ +} + impl Deserialize<(A, B)> for ZSerde where A: TryFrom, @@ -2408,6 +2470,47 @@ where Three(ZReadOrDeserializeError), } +impl std::fmt::Display for ZReadOrDeserializeErrorTuple3 +where + A: Debug, + A: TryFrom, + >::Error: Debug, + B: Debug, + B: TryFrom, + >::Error: Debug, + C: Debug, + C: TryFrom, + >::Error: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ZReadOrDeserializeErrorTuple3::One(e) => { + f.write_fmt(format_args!("1st tuple element: {}", e)) + } + ZReadOrDeserializeErrorTuple3::Two(e) => { + f.write_fmt(format_args!("2nd tuple element: {}", e)) + } + ZReadOrDeserializeErrorTuple3::Three(e) => { + f.write_fmt(format_args!("3rd tuple element: {}", e)) + } + } + } +} + +impl std::error::Error for ZReadOrDeserializeErrorTuple3 +where + A: Debug, + A: TryFrom, + >::Error: Debug, + B: Debug, + B: TryFrom, + >::Error: Debug, + C: Debug, + C: TryFrom, + >::Error: Debug, +{ +} + impl Deserialize<(A, B, C)> for ZSerde where A: TryFrom, @@ -2585,6 +2688,56 @@ where Four(ZReadOrDeserializeError), } +impl std::fmt::Display for ZReadOrDeserializeErrorTuple4 +where + A: Debug, + A: TryFrom, + >::Error: Debug, + B: Debug, + B: TryFrom, + >::Error: Debug, + C: Debug, + C: TryFrom, + >::Error: Debug, + D: Debug, + D: TryFrom, + >::Error: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ZReadOrDeserializeErrorTuple4::One(e) => { + f.write_fmt(format_args!("1st tuple element: {}", e)) + } + ZReadOrDeserializeErrorTuple4::Two(e) => { + f.write_fmt(format_args!("2nd tuple element: {}", e)) + } + ZReadOrDeserializeErrorTuple4::Three(e) => { + f.write_fmt(format_args!("3rd tuple element: {}", e)) + } + ZReadOrDeserializeErrorTuple4::Four(e) => { + f.write_fmt(format_args!("4th tuple element: {}", e)) + } + } + } +} + +impl std::error::Error for ZReadOrDeserializeErrorTuple4 +where + A: Debug, + A: TryFrom, + >::Error: Debug, + B: Debug, + B: TryFrom, + >::Error: Debug, + C: Debug, + C: TryFrom, + >::Error: Debug, + D: Debug, + D: TryFrom, + >::Error: Debug, +{ +} + impl Deserialize<(A, B, C, D)> for ZSerde where A: TryFrom, From 73bdb7f66c773017a8845f83afdb7d9e2877e068 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 26 Jun 2024 15:47:25 +0200 Subject: [PATCH 498/598] Fix and Improve liveliness doc (#1195) --- zenoh/src/api/liveliness.rs | 59 ++++++++++++++++++++++++++++--------- zenoh/src/lib.rs | 52 ++++++++++++++++++++++++++++++++ 2 files changed, 97 insertions(+), 14 deletions(-) diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 11cfc78918..04b69183a3 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -40,14 +40,12 @@ use super::{ /// A [`LivelinessToken`](LivelinessToken) is a token which liveliness is tied /// to the Zenoh [`Session`](Session) and can be monitored by remote applications. /// -/// A [`LivelinessToken`](LivelinessToken) with key `key/expression` can be -/// queried or subscribed to on key `@/liveliness/key/expression`. -/// /// The `Liveliness` structure can be obtained with the /// [`Session::liveliness()`](Session::liveliness) function /// of the [`Session`] struct. /// /// # Examples +/// ### Declaring a token /// ``` /// # #[tokio::main] /// # async fn main() { @@ -61,6 +59,39 @@ use super::{ /// .unwrap(); /// # } /// ``` +/// +/// ### Querying tokens +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let replies = session.liveliness().get("key/**").await.unwrap(); +/// while let Ok(reply) = replies.recv_async().await { +/// if let Ok(sample) = reply.result() { +/// println!(">> Liveliness token {}", sample.key_expr()); +/// } +/// } +/// # } +/// ``` +/// +/// ### Subscribing to liveliness changes +/// ```no_run +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::{prelude::*, sample::SampleKind}; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let subscriber = session.liveliness().declare_subscriber("key/**").await.unwrap(); +/// while let Ok(sample) = subscriber.recv_async().await { +/// match sample.kind() { +/// SampleKind::Put => println!("New liveliness: {}", sample.key_expr()), +/// SampleKind::Delete => println!("Lost liveliness: {}", sample.key_expr()), +/// } +/// } +/// # } +/// ``` #[zenoh_macros::unstable] pub struct Liveliness<'a> { pub(crate) session: SessionRef<'a>, @@ -250,9 +281,6 @@ pub(crate) struct LivelinessTokenState { /// A token whose liveliness is tied to the Zenoh [`Session`](Session) /// and can be monitored by remote applications. /// -/// A `LivelinessToken` with key `key/expression` can be queried or subscribed -/// to on key `@/liveliness/key/expression`. -/// /// A declared liveliness token will be seen as alive by any other Zenoh /// application in the system that monitors it while the liveliness token /// is not undeclared or dropped, while the Zenoh application that declared @@ -388,7 +416,7 @@ impl Drop for LivelinessToken<'_> { } } -/// A builder for initializing a [`FlumeSubscriber`](FlumeSubscriber). +/// A builder for initializing a liveliness [`FlumeSubscriber`](FlumeSubscriber). /// /// # Examples /// ``` @@ -398,8 +426,8 @@ impl Drop for LivelinessToken<'_> { /// /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session +/// .liveliness() /// .declare_subscriber("key/expression") -/// .best_effort() /// .await /// .unwrap(); /// # } @@ -415,7 +443,7 @@ pub struct LivelinessSubscriberBuilder<'a, 'b, Handler> { #[zenoh_macros::unstable] impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { - /// Receive the samples for this subscription with a callback. + /// Receive the samples for this liveliness subscription with a callback. /// /// # Examples /// ``` @@ -425,6 +453,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session + /// .liveliness() /// .declare_subscriber("key/expression") /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) /// .await @@ -452,10 +481,10 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { } } - /// Receive the samples for this subscription with a mutable callback. + /// Receive the samples for this liveliness subscription with a mutable callback. /// /// Using this guarantees that your callback will never be called concurrently. - /// If your callback is also accepted by the [`callback`](SubscriberBuilder::callback) method, we suggest you use it instead of `callback_mut` + /// If your callback is also accepted by the [`callback`](LivelinessSubscriberBuilder::callback) method, we suggest you use it instead of `callback_mut` /// /// # Examples /// ``` @@ -466,6 +495,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let mut n = 0; /// let subscriber = session + /// .liveliness() /// .declare_subscriber("key/expression") /// .callback_mut(move |_sample| { n += 1; }) /// .await @@ -484,7 +514,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the samples for this liveliness subscription with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run @@ -494,6 +524,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session + /// .liveliness() /// .declare_subscriber("key/expression") /// .with(flume::bounded(32)) /// .await @@ -642,7 +673,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { } } - /// Receive the replies for this query with a mutable callback. + /// Receive the replies for this liveliness query with a mutable callback. /// /// Using this guarantees that your callback will never be called concurrently. /// If your callback is also accepted by the [`callback`](LivelinessGetBuilder::callback) method, we suggest you use it instead of `callback_mut` @@ -674,7 +705,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the replies for this liveliness query with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ``` diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c75e31aa3a..7299453f54 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -326,6 +326,58 @@ pub mod scouting { } /// Liveliness primitives +/// +/// A [`LivelinessToken`](liveliness::LivelinessToken) is a token which liveliness is tied +/// to the Zenoh [`Session`](Session) and can be monitored by remote applications. +/// +/// # Examples +/// ### Declaring a token +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let liveliness = session +/// .liveliness() +/// .declare_token("key/expression") +/// .await +/// .unwrap(); +/// # } +/// ``` +/// +/// ### Querying tokens +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let replies = session.liveliness().get("key/**").await.unwrap(); +/// while let Ok(reply) = replies.recv_async().await { +/// if let Ok(sample) = reply.result() { +/// println!(">> Liveliness token {}", sample.key_expr()); +/// } +/// } +/// # } +/// ``` +/// +/// ### Subscribing to liveliness changes +/// ```no_run +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::{prelude::*, sample::SampleKind}; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let subscriber = session.liveliness().declare_subscriber("key/**").await.unwrap(); +/// while let Ok(sample) = subscriber.recv_async().await { +/// match sample.kind() { +/// SampleKind::Put => println!("New liveliness: {}", sample.key_expr()), +/// SampleKind::Delete => println!("Lost liveliness: {}", sample.key_expr()), +/// } +/// } +/// # } +/// ``` #[zenoh_macros::unstable] pub mod liveliness { pub use crate::api::liveliness::{ From ebc684cd54d8ba31fe2b938fda673cf507cff320 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 26 Jun 2024 21:10:01 +0200 Subject: [PATCH 499/598] Allow to enable/disable batching from config (#1196) --- DEFAULT_CONFIG.json5 | 2 ++ commons/zenoh-config/src/defaults.rs | 1 + commons/zenoh-config/src/lib.rs | 4 +++- io/zenoh-transport/src/common/pipeline.rs | 23 ++++++++++++++----- io/zenoh-transport/src/manager.rs | 10 ++++++++ io/zenoh-transport/src/multicast/link.rs | 1 + .../src/unicast/universal/link.rs | 1 + 7 files changed, 35 insertions(+), 7 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 0e180a0e07..6906d15cf5 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -284,6 +284,8 @@ /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). /// The default batch size value is the maximum batch size: 65535. batch_size: 65535, + /// Perform batching of messages if they are smaller of the batch_size + batching: true, /// Each zenoh link has a transmission queue that can be configured queue: { /// The size of each priority queue indicates the number of batches a given queue can contain. diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index 9d593fabb1..a6be460bcb 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -163,6 +163,7 @@ impl Default for LinkTxConf { batch_size: BatchSize::MAX, queue: QueueConf::default(), threads: num, + batching: true, } } } diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index b7530e91a6..51dce4ffb4 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -384,8 +384,10 @@ validated_struct::validator! { lease: u64, /// Number of keep-alive messages in a link lease duration (default: 4) keep_alive: usize, - /// Zenoh's MTU equivalent (default: 2^16-1) + /// Zenoh's MTU equivalent (default: 2^16-1) (max: 2^16-1) batch_size: BatchSize, + /// Perform batching of messages if they are smaller of the batch_size + batching: bool, pub queue: QueueConf { /// The size of each priority queue indicates the number of batches a given queue can contain. /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index e497199010..68a4b87d24 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -124,6 +124,7 @@ struct StageIn { s_out: StageInOut, mutex: StageInMutex, fragbuf: ZBuf, + batching: bool, } impl StageIn { @@ -179,7 +180,7 @@ impl StageIn { macro_rules! zretok { ($batch:expr, $msg:expr) => {{ - if $msg.is_express() { + if !self.batching || $msg.is_express() { // Move out existing batch self.s_out.move_batch($batch); return true; @@ -315,11 +316,17 @@ impl StageIn { macro_rules! zretok { ($batch:expr) => {{ - let bytes = $batch.len(); - *c_guard = Some($batch); - drop(c_guard); - self.s_out.notify(bytes); - return true; + if !self.batching { + // Move out existing batch + self.s_out.move_batch($batch); + return true; + } else { + let bytes = $batch.len(); + *c_guard = Some($batch); + drop(c_guard); + self.s_out.notify(bytes); + return true; + } }}; } @@ -494,6 +501,7 @@ pub(crate) struct TransmissionPipelineConf { pub(crate) batch: BatchConfig, pub(crate) queue_size: [usize; Priority::NUM], pub(crate) wait_before_drop: Duration, + pub(crate) batching: bool, pub(crate) backoff: Duration, } @@ -554,6 +562,7 @@ impl TransmissionPipeline { priority: priority[prio].clone(), }, fragbuf: ZBuf::empty(), + batching: config.batching, })); // The stage out for this priority @@ -765,6 +774,7 @@ mod tests { is_compression: true, }, queue_size: [1; Priority::NUM], + batching: true, wait_before_drop: Duration::from_millis(1), backoff: Duration::from_micros(1), }; @@ -777,6 +787,7 @@ mod tests { is_compression: false, }, queue_size: [1; Priority::NUM], + batching: true, wait_before_drop: Duration::from_millis(1), backoff: Duration::from_micros(1), }; diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index 0d8c29ea9d..9df02dfc67 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -100,6 +100,7 @@ pub struct TransportManagerConfig { pub whatami: WhatAmI, pub resolution: Resolution, pub batch_size: BatchSize, + pub batching: bool, pub wait_before_drop: Duration, pub queue_size: [usize; Priority::NUM], pub queue_backoff: Duration, @@ -129,6 +130,7 @@ pub struct TransportManagerBuilder { whatami: WhatAmI, resolution: Resolution, batch_size: BatchSize, + batching: bool, wait_before_drop: Duration, queue_size: QueueSizeConf, queue_backoff: Duration, @@ -170,6 +172,11 @@ impl TransportManagerBuilder { self } + pub fn batching(mut self, batching: bool) -> Self { + self.batching = batching; + self + } + pub fn wait_before_drop(mut self, wait_before_drop: Duration) -> Self { self.wait_before_drop = wait_before_drop; self @@ -231,6 +238,7 @@ impl TransportManagerBuilder { resolution.set(Field::FrameSN, *link.tx().sequence_number_resolution()); self = self.resolution(resolution); self = self.batch_size(*link.tx().batch_size()); + self = self.batching(*link.tx().batching()); self = self.defrag_buff_size(*link.rx().max_message_size()); self = self.link_rx_buffer_size(*link.rx().buffer_size()); self = self.wait_before_drop(Duration::from_micros( @@ -293,6 +301,7 @@ impl TransportManagerBuilder { whatami: self.whatami, resolution: self.resolution, batch_size: self.batch_size, + batching: self.batching, wait_before_drop: self.wait_before_drop, queue_size, queue_backoff: self.queue_backoff, @@ -339,6 +348,7 @@ impl Default for TransportManagerBuilder { whatami: zenoh_config::defaults::mode, resolution: Resolution::default(), batch_size: BatchSize::MAX, + batching: true, wait_before_drop: Duration::from_micros(wait_before_drop), queue_size: queue.size, queue_backoff: Duration::from_nanos(backoff), diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 794d36d9e7..9c2bdbe1f1 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -323,6 +323,7 @@ impl TransportLinkMulticastUniversal { batch: self.link.config.batch, queue_size: self.transport.manager.config.queue_size, wait_before_drop: self.transport.manager.config.wait_before_drop, + batching: self.transport.manager.config.batching, backoff: self.transport.manager.config.queue_backoff, }; // The pipeline diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index 07de4fb744..9655d0964d 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -62,6 +62,7 @@ impl TransportLinkUnicastUniversal { }, queue_size: transport.manager.config.queue_size, wait_before_drop: transport.manager.config.wait_before_drop, + batching: transport.manager.config.batching, backoff: transport.manager.config.queue_backoff, }; From 1790d59d1a9aa4995b3997f6fa96e90b24d28a25 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 26 Jun 2024 21:20:21 +0200 Subject: [PATCH 500/598] Fix doc warnings (#1197) * Fix and Improve liveliness doc * Fix doc warnings * Add doc warnings check in CI --- .github/workflows/ci.yml | 5 ++++ .../zenoh-keyexpr/src/key_expr/format/mod.rs | 10 +++---- commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs | 2 +- commons/zenoh-keyexpr/src/lib.rs | 8 ++--- commons/zenoh-protocol/src/core/mod.rs | 2 +- commons/zenoh-protocol/src/core/parameters.rs | 6 ++-- commons/zenoh-protocol/src/network/declare.rs | 8 +++-- commons/zenoh-protocol/src/network/mod.rs | 2 ++ commons/zenoh-protocol/src/network/request.rs | 3 +- commons/zenoh-protocol/src/scouting/hello.rs | 10 +++---- commons/zenoh-protocol/src/scouting/scout.rs | 2 +- .../zenoh-protocol/src/transport/fragment.rs | 2 +- commons/zenoh-protocol/src/transport/frame.rs | 6 ++-- commons/zenoh-protocol/src/transport/mod.rs | 2 ++ commons/zenoh-protocol/src/zenoh/mod.rs | 2 ++ commons/zenoh-util/src/log.rs | 4 +-- io/zenoh-transport/src/common/batch.rs | 30 +++++++++---------- .../unicast/establishment/ext/auth/pubkey.rs | 8 +++++ .../unicast/establishment/ext/auth/usrpwd.rs | 8 +++++ .../src/unicast/establishment/ext/shm.rs | 8 +++++ plugins/zenoh-backend-traits/src/lib.rs | 2 +- plugins/zenoh-plugin-trait/src/lib.rs | 6 ++-- plugins/zenoh-plugin-trait/src/manager.rs | 2 +- plugins/zenoh-plugin-trait/src/plugin.rs | 4 +-- zenoh/src/api/info.rs | 2 +- zenoh/src/api/liveliness.rs | 2 +- zenoh/src/api/query.rs | 4 +-- zenoh/src/api/queryable.rs | 8 ++--- zenoh/src/api/scouting.rs | 8 ++--- zenoh/src/api/selector.rs | 2 +- zenoh/src/api/session.rs | 16 +++++----- zenoh/src/api/subscriber.rs | 6 ++-- zenoh/src/lib.rs | 24 +++++++-------- zenohd/src/main.rs | 2 +- 34 files changed, 128 insertions(+), 88 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a62257446e..123085319e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -86,6 +86,11 @@ jobs: - name: Run doctests run: cargo test --doc + - name: Build doc + run: cargo doc --no-deps --features unstable + env: + RUSTDOCFLAGS: -Dwarnings + - name: Check licenses run: cargo deny check licenses diff --git a/commons/zenoh-keyexpr/src/key_expr/format/mod.rs b/commons/zenoh-keyexpr/src/key_expr/format/mod.rs index bf5536ec63..d4eccd6d41 100644 --- a/commons/zenoh-keyexpr/src/key_expr/format/mod.rs +++ b/commons/zenoh-keyexpr/src/key_expr/format/mod.rs @@ -17,8 +17,8 @@ //! The same issue arises naturally when designing a KE space, and [`KeFormat`] was designed to help you with this, //! both in constructing and in parsing KEs that fit the formats you've defined. //! -//! [`kedefine`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.kedefine.html) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, -//! as the [`keformat`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.keformat.html) and [`kewrite`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.kewrite.html) macros will be able to tell you if you're attempting to set fields of the format that do not exist. +//! [`kedefine`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kedefine.html) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, +//! as the [`keformat`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.keformat.htmll) and [`kewrite`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kewrite.html) macros will be able to tell you if you're attempting to set fields of the format that do not exist. //! //! ## The format syntax //! KE formats are defined following a syntax that extends the [`keyexpr`] syntax. In addition to existing chunk types, KE formmats support "specification" chunks. @@ -67,8 +67,8 @@ use support::{IterativeConstructor, Spec}; /// The same issue arises naturally when designing a KE space, and [`KeFormat`] was designed to help you with this, /// both in constructing and in parsing KEs that fit the formats you've defined. /// -/// [`zenoh::kedefine`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.kedefine.html) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, -/// as the [`zenoh::keformat`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.keformat.html) and [`zenoh::kewrite`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.kewrite.html) macros will be able to tell you if you're attempting to set fields of the format that do not exist. +/// [`kedefine`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kedefine.html) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, +/// as the [`keformat`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.keformat.html) and [`kewrite`](https://docs.rs/zenoh/latest/zenoh/macro.kewrite.html) macros will be able to tell you if you're attempting to set fields of the format that do not exist. /// /// ## The format syntax /// KE formats are defined following a syntax that extends the [`keyexpr`] syntax. In addition to existing chunk types, KE formmats support "specification" chunks. @@ -120,7 +120,7 @@ impl<'s> KeFormat<'s, Vec>> { /// /// `N` is simply the number of specifications in `value`. If this number of specs isn't known at compile-time, use [`KeFormat::new`] instead. /// - /// If you know `value` at compile time, using [`zenoh::kedefine`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.kedefine.html) instead is advised, + /// If you know `value` at compile time, using [`kedefine`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kedefine.html) instead is advised, /// as it will provide more features and construct higher performance formats than this constructor. pub fn noalloc_new(value: &'s str) -> ZResult; N]>> { value.try_into() diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs index 5d7991289e..5bd0f7dae3 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs @@ -47,7 +47,7 @@ //! # Iterators //! KeTrees provide iterators for the following operations: //! - Iterating on all nodes ([`IKeyExprTree::tree_iter`]/[`IKeyExprTreeMut::tree_iter_mut`]) -//! - Iterating on key-value pairs in the KeTree ([`IKeyExprTreeExt::key_value_pairs`]) +//! - Iterating on key-value pairs in the KeTree ([`IKeyExprTree::key_value_pairs`]) //! - Iterating on nodes whose KE intersects with a queried KE ([`IKeyExprTree::intersecting_nodes`], [`IKeyExprTreeMut::intersecting_nodes_mut`]) //! - Iterating on nodes whose KE are included by a queried KE ([`IKeyExprTree::included_nodes`], [`IKeyExprTreeMut::included_nodes_mut`]) //! - Iterating on nodes whose KE includes a queried KE ([`IKeyExprTree::nodes_including`], [`IKeyExprTreeMut::nodes_including_mut`]) diff --git a/commons/zenoh-keyexpr/src/lib.rs b/commons/zenoh-keyexpr/src/lib.rs index f80a9c177c..5142076b6d 100644 --- a/commons/zenoh-keyexpr/src/lib.rs +++ b/commons/zenoh-keyexpr/src/lib.rs @@ -22,8 +22,8 @@ //! # Storing Key Expressions //! This module provides 2 flavours to store strings that have been validated to respect the KE syntax, and a third is provided by [`zenoh`](https://docs.rs/zenoh): //! - [`keyexpr`] is the equivalent of a [`str`], -//! - [`OwnedKeyExpr`] works like an [`Arc`], -//! - [`KeyExpr`](https://docs.rs/zenoh/latest/zenoh/key_expr/struct.KeyExpr.html) works like a [`Cow`], but also stores some additional context internal to Zenoh to optimize +//! - [`OwnedKeyExpr`] works like an [`Arc`](std::sync::Arc), +//! - [`KeyExpr`](https://docs.rs/zenoh/latest/zenoh/key_expr/struct.KeyExpr.html) works like a [`Cow`](std::borrow::Cow), but also stores some additional context internal to Zenoh to optimize //! routing and network usage. //! //! All of these types [`Deref`](core::ops::Deref) to [`keyexpr`], which notably has methods to check whether a given [`keyexpr::intersects`] with another, @@ -40,8 +40,8 @@ //! The same issue arises naturally when designing a KE space, and [`KeFormat`](format::KeFormat) was designed to help you with this, //! both in constructing and in parsing KEs that fit the formats you've defined. //! -//! [`kedefine`] also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, -//! as the [`keformat`] and [`kewrite`] macros will be able to tell you if you're attempting to set fields of the format that do not exist. +//! [`kedefine`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kedefine.html) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, +//! as the [`keformat`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.keformat.html) and [`kewrite`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kewrite.html) macros will be able to tell you if you're attempting to set fields of the format that do not exist. #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index e9bc700318..ebf1bb7f85 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -273,7 +273,7 @@ impl<'de> serde::Deserialize<'de> for ZenohIdProto { } } -/// The unique id of a zenoh entity inside it's parent [`Session`]. +/// The unique id of a zenoh entity inside it's parent `Session`. pub type EntityId = u32; /// The global unique id of a zenoh entity. diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs index e44f2f6284..38cb368b5b 100644 --- a/commons/zenoh-protocol/src/core/parameters.rs +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -50,7 +50,7 @@ pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { .map(|p| split_once(p, FIELD_SEPARATOR)) } -/// Same as [`Self::from_iter_into`] but keys are sorted in alphabetical order. +/// Same as [`from_iter_into`] but keys are sorted in alphabetical order. pub fn sort<'s, I>(iter: I) -> impl Iterator where I: Iterator, @@ -84,7 +84,7 @@ where into } -/// Same as [`Self::from_iter`] but it writes into a user-provided string instead of allocating a new one. +/// Same as [`from_iter`] but it writes into a user-provided string instead of allocating a new one. pub fn from_iter_into<'s, I>(iter: I, into: &mut String) where I: Iterator, @@ -131,7 +131,7 @@ pub fn insert<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str (from_iter(iter), item) } -/// Same as [`Self::insert`] but keys are sorted in alphabetical order. +/// Same as [`insert`] but keys are sorted in alphabetical order. pub fn insert_sort<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { let (iter, item) = _insert(iter(s), k, v); (from_iter(sort(iter)), item) diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index d8c66559ce..8f31e0ff2a 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -32,6 +32,7 @@ pub mod flag { pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } +/// ```text /// Flags: /// - I: Interest If I==1 then interest_id is present /// - X: Reserved @@ -47,7 +48,7 @@ pub mod flag { /// +---------------+ /// ~ declaration ~ /// +---------------+ -/// +/// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct Declare { pub interest_id: Option, @@ -178,6 +179,7 @@ pub mod common { pub mod ext { use super::*; + /// ```text /// Flags: /// - N: Named If N==1 then the key expr has name/suffix /// - M: Mapping if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver @@ -190,7 +192,7 @@ pub mod common { /// +---------------+ /// ~ key_suffix ~ if N==1 -- /// +---------------+ - /// + /// ``` pub type WireExprExt = zextzbuf!(0x0f, true); #[derive(Debug, Clone, PartialEq, Eq)] pub struct WireExprType { @@ -513,6 +515,7 @@ pub mod queryable { pub const C: u8 = 1; // 0x01 Complete if C==1 then the queryable is complete } /// + /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// |Z|0_1| ID | @@ -521,6 +524,7 @@ pub mod queryable { /// +---------------+ /// ~ distance ~ /// +---------------+ + /// ``` #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct QueryableInfoType { pub complete: bool, // Default false: incomplete diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index b9f3076581..407df6dd52 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -418,6 +418,7 @@ pub mod ext { } } + /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// |zid_len|X|X|X|X| @@ -426,6 +427,7 @@ pub mod ext { /// +---------------+ /// % eid % /// +---------------+ + /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct EntityGlobalIdType { pub zid: ZenohIdProto, diff --git a/commons/zenoh-protocol/src/network/request.rs b/commons/zenoh-protocol/src/network/request.rs index 09e8e6b2b6..ceeec85043 100644 --- a/commons/zenoh-protocol/src/network/request.rs +++ b/commons/zenoh-protocol/src/network/request.rs @@ -82,12 +82,13 @@ pub mod ext { pub type NodeIdType = crate::network::ext::NodeIdType<{ NodeId::ID }>; pub type Target = zextz64!(0x4, true); + /// ```text /// - Target (0x03) /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// % target % /// +---------------+ - /// + /// ``` /// The `zenoh::queryable::Queryable`s that should be target of a `zenoh::Session::get()`. #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] pub enum TargetType { diff --git a/commons/zenoh-protocol/src/scouting/hello.rs b/commons/zenoh-protocol/src/scouting/hello.rs index 61c7db4ce6..69109ed611 100644 --- a/commons/zenoh-protocol/src/scouting/hello.rs +++ b/commons/zenoh-protocol/src/scouting/hello.rs @@ -17,8 +17,8 @@ use crate::core::{Locator, WhatAmI, ZenohIdProto}; /// # Hello message /// -/// The [`Hello`] message is used to advertise the locators a zenoh node is reachable at. -/// The [`Hello`] message SHOULD be sent in a unicast fashion in response to a [`super::Scout`] +/// The `Hello` message is used to advertise the locators a zenoh node is reachable at. +/// The `Hello` message SHOULD be sent in a unicast fashion in response to a [`super::Scout`] /// message as shown below: /// /// ```text @@ -34,7 +34,7 @@ use crate::core::{Locator, WhatAmI, ZenohIdProto}; /// | | | /// ``` /// -/// Moreover, a [`Hello`] message MAY be sent in the network in a multicast +/// Moreover, a `Hello` message MAY be sent in the network in a multicast /// fashion to advertise the presence of zenoh node. The advertisement operation MAY be performed /// periodically as shown below: /// @@ -54,7 +54,7 @@ use crate::core::{Locator, WhatAmI, ZenohIdProto}; /// | | | /// ``` /// -/// Examples of locators included in the [`Hello`] message are: +/// Examples of locators included in the `Hello` message are: /// /// ```text /// udp/192.168.1.1:7447 @@ -63,7 +63,7 @@ use crate::core::{Locator, WhatAmI, ZenohIdProto}; /// tcp/localhost:7447 /// ``` /// -/// The [`Hello`] message structure is defined as follows: +/// The `Hello` message structure is defined as follows: /// /// ```text /// Header flags: diff --git a/commons/zenoh-protocol/src/scouting/scout.rs b/commons/zenoh-protocol/src/scouting/scout.rs index 6d2b49f335..a65c10a4f5 100644 --- a/commons/zenoh-protocol/src/scouting/scout.rs +++ b/commons/zenoh-protocol/src/scouting/scout.rs @@ -18,7 +18,7 @@ use crate::core::{whatami::WhatAmIMatcher, ZenohIdProto}; /// The [`Scout`] message MAY be sent at any point in time to discover the available zenoh nodes in the /// network. The [`Scout`] message SHOULD be sent in a multicast or broadcast fashion. Upon receiving a /// [`Scout`] message, a zenoh node MUST first verify whether the matching criteria are satisfied, then -/// it SHOULD reply with a [`super::Hello`] message in a unicast fashion including all the requested +/// it SHOULD reply with a [`super::HelloProto`] message in a unicast fashion including all the requested /// information. /// /// The scouting message flow is the following: diff --git a/commons/zenoh-protocol/src/transport/fragment.rs b/commons/zenoh-protocol/src/transport/fragment.rs index d60df23227..eccc7b80c0 100644 --- a/commons/zenoh-protocol/src/transport/fragment.rs +++ b/commons/zenoh-protocol/src/transport/fragment.rs @@ -18,7 +18,7 @@ pub use crate::transport::TransportSn; /// # Fragment message /// -/// The [`Fragment`] message is used to transmit on the wire large [`crate::zenoh::ZenohMessage`] +/// The [`Fragment`] message is used to transmit on the wire large [`crate::network::NetworkMessage`] /// that require fragmentation because they are larger than the maximum batch size /// (i.e. 2^16-1) and/or the link MTU. /// diff --git a/commons/zenoh-protocol/src/transport/frame.rs b/commons/zenoh-protocol/src/transport/frame.rs index 480bebe08e..b3ef1d819f 100644 --- a/commons/zenoh-protocol/src/transport/frame.rs +++ b/commons/zenoh-protocol/src/transport/frame.rs @@ -18,11 +18,11 @@ use crate::{core::Reliability, network::NetworkMessage, transport::TransportSn}; /// # Frame message /// /// The [`Frame`] message is used to transmit one ore more complete serialized -/// [`crate::net::protocol::message::ZenohMessage`]. I.e., the total length of the -/// serialized [`crate::net::protocol::message::ZenohMessage`] (s) MUST be smaller +/// [`crate::network::NetworkMessage`]. I.e., the total length of the +/// serialized [`crate::network::NetworkMessage`] (s) MUST be smaller /// than the maximum batch size (i.e. 2^16-1) and the link MTU. /// The [`Frame`] message is used as means to aggregate multiple -/// [`crate::net::protocol::message::ZenohMessage`] in a single atomic message that +/// [`crate::network::NetworkMessage`] in a single atomic message that /// goes on the wire. By doing so, many small messages can be batched together and /// share common information like the sequence number. /// diff --git a/commons/zenoh-protocol/src/transport/mod.rs b/commons/zenoh-protocol/src/transport/mod.rs index e92860f441..ba2ac32c4a 100644 --- a/commons/zenoh-protocol/src/transport/mod.rs +++ b/commons/zenoh-protocol/src/transport/mod.rs @@ -255,11 +255,13 @@ impl fmt::Display for TransportMessage { pub mod ext { use crate::{common::ZExtZ64, core::Priority}; + /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// %0| rsv |prio % /// +---------------+ /// - prio: Priority class + /// ``` #[repr(transparent)] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct QoSType { diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index eeb1a63c1d..320db6884d 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -138,6 +138,7 @@ pub mod ext { use crate::core::{Encoding, EntityGlobalIdProto}; + /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// |zid_len|X|X|X|X| @@ -148,6 +149,7 @@ pub mod ext { /// +---------------+ /// % sn % /// +---------------+ + /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct SourceInfoType { pub id: EntityGlobalIdProto, diff --git a/commons/zenoh-util/src/log.rs b/commons/zenoh-util/src/log.rs index 67f1a45df7..023c77121f 100644 --- a/commons/zenoh-util/src/log.rs +++ b/commons/zenoh-util/src/log.rs @@ -27,7 +27,7 @@ use tracing_subscriber::{ /// Calling this function initializes a `lazy_static` in the `tracing` crate /// such static is not deallocated prior to process existing, thus tools such as `valgrind` /// will report a memory leak. -/// Refer to this issue: https://github.com/tokio-rs/tracing/issues/2069 +/// Refer to this issue: pub fn try_init_log_from_env() { if let Ok(env_filter) = EnvFilter::try_from_default_env() { init_env_filter(env_filter); @@ -41,7 +41,7 @@ pub fn try_init_log_from_env() { /// Calling this function initializes a `lazy_static` in the `tracing` crate /// such static is not deallocated prior to process existing, thus tools such as `valgrind` /// will report a memory leak. -/// Refer to this issue: https://github.com/tokio-rs/tracing/issues/2069 +/// Refer to this issue: pub fn init_log_from_env_or(fallback: S) where S: AsRef, diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index 9a58aafd5d..1b065191c0 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -149,7 +149,7 @@ impl BatchHeader { self.0 } - /// Verify that the [`WBatch`][WBatch] is for a stream-based protocol, i.e., the first + /// Verify that the [`WBatch`] is for a stream-based protocol, i.e., the first /// 2 bytes are reserved to encode the total amount of serialized bytes as 16-bits little endian. #[cfg(feature = "transport_compression")] #[inline(always)] @@ -181,22 +181,22 @@ pub enum Finalize { /// Write Batch /// -/// A [`WBatch`][WBatch] is a non-expandable and contiguous region of memory -/// that is used to serialize [`TransportMessage`][TransportMessage] and [`ZenohMessage`][ZenohMessage]. +/// A [`WBatch`] is a non-expandable and contiguous region of memory +/// that is used to serialize [`TransportMessage`] and [`NetworkMessage`]. /// -/// [`TransportMessage`][TransportMessage] are always serialized on the batch as they are, while -/// [`ZenohMessage`][ZenohMessage] are always serializaed on the batch as part of a [`TransportMessage`] +/// [`TransportMessage`] are always serialized on the batch as they are, while +/// [`NetworkMessage`] are always serializaed on the batch as part of a [`TransportMessage`] /// [TransportMessage] Frame. Reliable and Best Effort Frames can be interleaved on the same -/// [`WBatch`][WBatch] as long as they fit in the remaining buffer capacity. +/// [`WBatch`] as long as they fit in the remaining buffer capacity. /// -/// In the serialized form, the [`WBatch`][WBatch] always contains one or more -/// [`TransportMessage`][TransportMessage]. In the particular case of [`TransportMessage`][TransportMessage] Frame, -/// its payload is either (i) one or more complete [`ZenohMessage`][ZenohMessage] or (ii) a fragment of a -/// a [`ZenohMessage`][ZenohMessage]. +/// In the serialized form, the [`WBatch`] always contains one or more +/// [`TransportMessage`]. In the particular case of [`TransportMessage`] Frame, +/// its payload is either (i) one or more complete [`NetworkMessage`] or (ii) a fragment of a +/// a [`NetworkMessage`]. /// -/// As an example, the content of the [`WBatch`][WBatch] in memory could be: +/// As an example, the content of the [`WBatch`] in memory could be: /// -/// | Keep Alive | Frame Reliable | Frame Best Effort | +/// | Keep Alive | Frame Reliable\ | Frame Best Effort\ | /// #[derive(Clone, Debug)] pub struct WBatch { @@ -227,20 +227,20 @@ impl WBatch { batch } - /// Verify that the [`WBatch`][WBatch] has no serialized bytes. + /// Verify that the [`WBatch`] has no serialized bytes. #[inline(always)] pub fn is_empty(&self) -> bool { self.len() == 0 } - /// Get the total number of bytes that have been serialized on the [`WBatch`][WBatch]. + /// Get the total number of bytes that have been serialized on the [`WBatch`]. #[inline(always)] pub fn len(&self) -> BatchSize { let (_l, _h, p) = Self::split(self.buffer.as_slice(), &self.config); p.len() as BatchSize } - /// Clear the [`WBatch`][WBatch] memory buffer and related internal state. + /// Clear the [`WBatch`] memory buffer and related internal state. #[inline(always)] pub fn clear(&mut self) { self.buffer.clear(); diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs index 69b4707bf0..5638a9ee33 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs @@ -213,12 +213,14 @@ where /*************************************/ /* InitSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ public key ~ /// +---------------+ /// /// ZExtZBuf +/// ``` pub(crate) struct InitSyn { pub(crate) alice_pubkey: ZPublicKey, } @@ -250,6 +252,7 @@ where /*************************************/ /* InitAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ public key ~ @@ -258,6 +261,7 @@ where /// +---------------+ /// /// ZExtZBuf +/// ``` pub(crate) struct InitAck { pub(crate) bob_pubkey: ZPublicKey, pub(crate) nonce_encrypted_with_alice_pubkey: Vec, @@ -295,12 +299,14 @@ where /*************************************/ /* OpenSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ ciphered nonce~ /// +---------------+ /// /// ZExtZBuf +/// ``` pub(crate) struct OpenSyn { pub(crate) nonce_encrypted_with_bob_pubkey: Vec, } @@ -334,11 +340,13 @@ where /*************************************/ /* OpenAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// +---------------+ /// /// ZExtUnit +/// ``` pub(crate) struct AuthPubKeyFsm<'a> { inner: &'a RwLock, diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs index 22d7a86817..46d3f349b4 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs @@ -217,25 +217,30 @@ impl<'a> AuthUsrPwdFsm<'a> { /*************************************/ /* InitSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// +---------------+ /// /// ZExtUnit +/// ``` /*************************************/ /* InitAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ nonce ~ /// +---------------+ /// /// ZExtZ64 +/// ``` /*************************************/ /* OpenSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ user ~ @@ -244,6 +249,7 @@ impl<'a> AuthUsrPwdFsm<'a> { /// +---------------+ /// /// ZExtZBuf +/// ``` struct OpenSyn { user: Vec, hmac: Vec, @@ -278,11 +284,13 @@ where /*************************************/ /* OpenAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// +---------------+ /// /// ZExtUnit +/// ``` #[async_trait] impl<'a> OpenFsm for &'a AuthUsrPwdFsm<'a> { diff --git a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs index bc96d2e34a..1a6f272d42 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs @@ -109,10 +109,12 @@ impl AuthUnicast { /*************************************/ /* InitSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ Segment id ~ /// +---------------+ +/// ``` pub(crate) struct InitSyn { pub(crate) alice_segment: AuthSegmentID, } @@ -145,12 +147,14 @@ where /*************************************/ /* InitAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ challenge ~ /// +---------------+ /// ~ Segment id ~ /// +---------------+ +/// ``` struct InitAck { alice_challenge: u64, bob_segment: AuthSegmentID, @@ -188,18 +192,22 @@ where /*************************************/ /* OpenSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ challenge ~ /// +---------------+ +/// ``` /*************************************/ /* OpenAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ ack ~ /// +---------------+ +/// ``` // Extension Fsm pub(crate) struct ShmFsm<'a> { diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index d59d764004..851e9cfbb0 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -225,7 +225,7 @@ pub trait Storage: Send + Sync { /// on the administration space for this storage. fn get_admin_status(&self) -> serde_json::Value; - /// Function called for each incoming data ([`Sample`]) to be stored in this storage. + /// Function called for each incoming data ([`Sample`](zenoh::sample::Sample)) to be stored in this storage. /// A key can be `None` if it matches the `strip_prefix` exactly. /// In order to avoid data loss, the storage must store the `value` and `timestamp` associated with the `None` key /// in a manner suitable for the given backend technology diff --git a/plugins/zenoh-plugin-trait/src/lib.rs b/plugins/zenoh-plugin-trait/src/lib.rs index b9dbb455ab..36c5097795 100644 --- a/plugins/zenoh-plugin-trait/src/lib.rs +++ b/plugins/zenoh-plugin-trait/src/lib.rs @@ -25,13 +25,13 @@ //! //! The actual work of the plugin is performed by the instance, which is created by the [`start`](Plugin::start) function. //! -//! Plugins are loaded, started and stopped by [`PluginsManager`](crate::manager::PluginsManager). Stopping plugin is just dropping it's instance. +//! Plugins are loaded, started and stopped by [`PluginsManager`]. Stopping plugin is just dropping it's instance. //! //! Plugins can be static and dynamic. //! -//! Static plugin is just a type which implements [`Plugin`] trait. It can be added to [`PluginsManager`](crate::manager::PluginsManager) by [`PluginsManager::add_static_plugin`](crate::manager::PluginsManager::add_static_plugin) method. +//! Static plugin is just a type which implements [`Plugin`] trait. It can be added to [`PluginsManager`] by [`PluginsManager::declare_static_plugin`](crate::manager::PluginsManager::declare_static_plugin) method. //! -//! Dynamic plugin is a shared library which exports set of C-repr (unmangled) functions which allows to check plugin compatibility and create plugin instance. These functiuons are defined automatically by [`declare_plugin`](crate::declare_plugin) macro. +//! Dynamic plugin is a shared library which exports set of C-repr (unmangled) functions which allows to check plugin compatibility and create plugin instance. These functiuons are defined automatically by [`declare_plugin`] macro. //! mod compatibility; mod manager; diff --git a/plugins/zenoh-plugin-trait/src/manager.rs b/plugins/zenoh-plugin-trait/src/manager.rs index 5c9c9e8bd2..90651532ec 100644 --- a/plugins/zenoh-plugin-trait/src/manager.rs +++ b/plugins/zenoh-plugin-trait/src/manager.rs @@ -100,7 +100,7 @@ impl DeclaredPlugin { default_lib_prefix: String, loader: Option, diff --git a/plugins/zenoh-plugin-trait/src/plugin.rs b/plugins/zenoh-plugin-trait/src/plugin.rs index 373da64634..b0651d9842 100644 --- a/plugins/zenoh-plugin-trait/src/plugin.rs +++ b/plugins/zenoh-plugin-trait/src/plugin.rs @@ -175,9 +175,9 @@ pub trait Plugin: Sized + 'static { type Instance: PluginInstance; /// Plugins' default name when statically linked. const DEFAULT_NAME: &'static str; - /// Plugin's version. Used only for information purposes. It's recommended to use [plugin_version!] macro to generate this string. + /// Plugin's version. Used only for information purposes. It's recommended to use [plugin_version!](crate::plugin_version!) macro to generate this string. const PLUGIN_VERSION: &'static str; - /// Plugin's long version (with git commit hash). Used only for information purposes. It's recommended to use [plugin_long_version!] macro to generate this string. + /// Plugin's long version (with git commit hash). Used only for information purposes. It's recommended to use [plugin_version!](crate::plugin_version!) macro to generate this string. const PLUGIN_LONG_VERSION: &'static str; /// Starts your plugin. Use `Ok` to return your plugin's control structure fn start(name: &str, args: &Self::StartArgs) -> ZResult; diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index 4a53a60851..32bed0eb53 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -159,7 +159,7 @@ impl<'a> IntoFuture for PeersZenohIdBuilder<'a> { } } -/// Struct returned by [`Session::info()`](crate::SessionDeclarations::info) which allows +/// Struct returned by [`Session::info()`](crate::session::SessionDeclarations::info) which allows /// to access information about the current zenoh [`Session`](crate::Session). /// /// # Examples diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 04b69183a3..038a4b8eab 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -705,7 +705,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the replies for this liveliness query with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the replies for this liveliness query with a [`Handler`](crate::handlers::IntoHandler). /// /// # Examples /// ``` diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 408be5514b..e9598a0064 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -308,7 +308,7 @@ impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { /// Receive the replies for this query with a mutable callback. /// /// Using this guarantees that your callback will never be called concurrently. - /// If your callback is also accepted by the [`callback`](GetBuilder::callback) method, we suggest you use it instead of `callback_mut` + /// If your callback is also accepted by the [`callback`](crate::session::SessionGetBuilder::callback) method, we suggest you use it instead of `callback_mut` /// /// # Examples /// ``` @@ -336,7 +336,7 @@ impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the replies for this query with a [`Handler`](crate::handlers::IntoHandler). /// /// # Examples /// ``` diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 7c610bf2b4..566a903bd1 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -713,7 +713,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the queries for this Queryable with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the queries for this Queryable with a [`Handler`](crate::handlers::IntoHandler). /// /// # Examples /// ```no_run @@ -771,10 +771,10 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { } } -/// A queryable that provides data through a [`Handler`](crate::prelude::IntoHandler). +/// A queryable that provides data through a [`Handler`](crate::handlers::IntoHandler). /// -/// Queryables can be created from a zenoh [`Session`] -/// with the [`declare_queryable`](crate::Session::declare_queryable) function +/// Queryables can be created from a zenoh [`Session`](crate::Session) +/// with the [`declare_queryable`](crate::session::SessionDeclarations::declare_queryable) function /// and the [`with`](QueryableBuilder::with) function /// of the resulting builder. /// diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 59b3d0dfcb..4f08530533 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -116,7 +116,7 @@ impl ScoutBuilder { self.callback(locked(callback)) } - /// Receive the [`Hello`] messages from this scout with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the [`Hello`] messages from this scout with a [`Handler`](crate::handlers::IntoHandler). /// /// # Examples /// ```no_run @@ -238,7 +238,7 @@ impl fmt::Debug for ScoutInner { } } -/// A scout that returns [`Hello`] messages through a [`Handler`](crate::prelude::IntoHandler). +/// A scout that returns [`Hello`] messages through a [`Handler`](crate::handlers::IntoHandler). /// /// # Examples /// ```no_run @@ -348,12 +348,12 @@ fn _scout( /// /// [`scout`] spawns a task that periodically sends scout messages and waits for [`Hello`](crate::scouting::Hello) replies. /// -/// Drop the returned [`Scout`](crate::scouting::Scout) to stop the scouting task. +/// Drop the returned [`Scout`] to stop the scouting task. /// /// # Arguments /// /// * `what` - The kind of zenoh process to scout for -/// * `config` - The configuration [`Config`] to use for scouting +/// * `config` - The configuration [`crate::Config`] to use for scouting /// /// # Examples /// ```no_run diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index e328761cb5..813ae0528d 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -24,7 +24,7 @@ use ::{zenoh_result::ZResult, zenoh_util::time_range::TimeRange}; use super::{key_expr::KeyExpr, queryable::Query}; -/// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the +/// A selector is the combination of a [Key Expression](crate::key_expr::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters /// with a few intendend uses: /// - specifying arguments to a queryable, allowing the passing of Remote Procedure Call parameters diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 0d50fb9a38..893f4725d5 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -514,8 +514,8 @@ impl Session { /// pointer to it (`Arc`). This is equivalent to `Arc::new(session)`. /// /// This is useful to share ownership of the `Session` between several threads - /// and tasks. It also allows to create [`Subscriber`](Subscriber) and - /// [`Queryable`](Queryable) with static lifetime that can be moved to several + /// and tasks. It also allows to create [`Subscriber`](crate::subscriber::Subscriber) and + /// [`Queryable`](crate::queryable::Queryable) with static lifetime that can be moved to several /// threads and tasks /// /// Note: the given zenoh `Session` will be closed when the last reference to @@ -547,7 +547,7 @@ impl Session { /// the program's life. Dropping the returned reference will cause a memory /// leak. /// - /// This is useful to move entities (like [`Subscriber`](Subscriber)) which + /// This is useful to move entities (like [`Subscriber`](crate::subscriber::Subscriber)) which /// lifetimes are bound to the session lifetime in several threads or tasks. /// /// Note: the given zenoh `Session` cannot be closed any more. At process @@ -862,7 +862,7 @@ impl Session { } /// Query data from the matching queryables in the system. /// - /// Unless explicitly requested via [`GetBuilder::accept_replies`], replies are guaranteed to have + /// Unless explicitly requested via [`accept_replies`](crate::session::SessionGetBuilder::accept_replies), replies are guaranteed to have /// key expressions that match the requested `selector`. /// /// # Arguments @@ -1978,7 +1978,7 @@ impl Session { } impl<'s> SessionDeclarations<'s, 'static> for Arc { - /// Create a [`Subscriber`](Subscriber) for the given key expression. + /// Create a [`Subscriber`](crate::subscriber::Subscriber) for the given key expression. /// /// # Arguments /// @@ -2018,12 +2018,12 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { } } - /// Create a [`Queryable`](Queryable) for the given key expression. + /// Create a [`Queryable`](crate::queryable::Queryable) for the given key expression. /// /// # Arguments /// /// * `key_expr` - The key expression matching the queries the - /// [`Queryable`](Queryable) will reply to + /// [`Queryable`](crate::queryable::Queryable) will reply to /// /// # Examples /// ```no_run @@ -2622,7 +2622,7 @@ impl fmt::Debug for Session { /// [`Queryable`](crate::queryable::Queryable) /// /// This trait is implemented by [`Session`](crate::session::Session) itself and -/// by wrappers [`SessionRef`](crate::session::SessionRef) and [`Arc`](crate::session::Arc) +/// by wrappers [`SessionRef`](crate::session::SessionRef) and [`Arc`](std::sync::Arc) /// /// # Examples /// ```no_run diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 79b4429876..f3c1fa14e7 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -285,7 +285,7 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the samples for this subscription with a [`Handler`](crate::handlers::IntoHandler). /// /// # Examples /// ```no_run @@ -410,10 +410,10 @@ where } } -/// A subscriber that provides data through a [`Handler`](crate::prelude::IntoHandler). +/// A subscriber that provides data through a [`Handler`](crate::handlers::IntoHandler). /// /// Subscribers can be created from a zenoh [`Session`](crate::Session) -/// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function +/// with the [`declare_subscriber`](crate::session::SessionDeclarations::declare_subscriber) function /// and the [`with`](SubscriberBuilder::with) function /// of the resulting builder. /// diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 7299453f54..1a01ff922d 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -148,27 +148,27 @@ pub mod core { /// /// # Storing Key Expressions /// This module provides 3 flavours to store strings that have been validated to respect the KE syntax: -/// - [`keyexpr`] is the equivalent of a [`str`], -/// - [`OwnedKeyExpr`] works like an [`std::sync::Arc`], -/// - [`KeyExpr`] works like a [`std::borrow::Cow`], but also stores some additional context internal to Zenoh to optimize +/// - [`keyexpr`](crate::key_expr::keyexpr) is the equivalent of a [`str`], +/// - [`OwnedKeyExpr`](crate::key_expr::OwnedKeyExpr) works like an [`std::sync::Arc`], +/// - [`KeyExpr`](crate::key_expr::KeyExpr) works like a [`std::borrow::Cow`], but also stores some additional context internal to Zenoh to optimize /// routing and network usage. /// -/// All of these types [`Deref`](core::ops::Deref) to [`keyexpr`], which notably has methods to check whether a given [`keyexpr::intersects`] with another, -/// or even if a [`keyexpr::includes`] another. +/// All of these types [`Deref`](std::ops::Deref) to [`keyexpr`](crate::key_expr::keyexpr), which notably has methods to check whether a given [`intersects`](crate::key_expr::keyexpr::includes) with another, +/// or even if a [`includes`](crate::key_expr::keyexpr::includes) another. /// /// # Tying values to Key Expressions /// When storing values tied to Key Expressions, you might want something more specialized than a [`HashMap`](std::collections::HashMap) if you want to respect /// the Key Expression semantics with high performance. /// -/// Enter [KeTrees](keyexpr_tree). These are data-structures specially built to store KE-value pairs in a manner that supports the set-semantics of KEs. +/// Enter [KeTrees](crate::key_expr::keyexpr_tree). These are data-structures specially built to store KE-value pairs in a manner that supports the set-semantics of KEs. /// /// # Building and parsing Key Expressions /// A common issue in REST API is the association of meaning to sections of the URL, and respecting that API in a convenient manner. -/// The same issue arises naturally when designing a KE space, and [`KeFormat`](format::KeFormat) was designed to help you with this, +/// The same issue arises naturally when designing a KE space, and [`KeFormat`](crate::key_expr::format::KeFormat) was designed to help you with this, /// both in constructing and in parsing KEs that fit the formats you've defined. /// -/// [`kedefine`] also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, -/// as the [`keformat`] and [`kewrite`] macros will be able to tell you if you're attempting to set fields of the format that do not exist. +/// [`kedefine`](crate::key_expr::format::kedefine) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, +/// as the [`keformat`](crate::key_expr::format::keformat) and [`kewrite`](crate::key_expr::format::kewrite) macros will be able to tell you if you're attempting to set fields of the format that do not exist. pub mod key_expr { #[zenoh_macros::unstable] pub mod keyexpr_tree { @@ -194,7 +194,7 @@ pub mod key_expr { } } -/// Zenoh [`Session`](crate::session::Session) and associated types +/// Zenoh [`Session`] and associated types pub mod session { #[zenoh_macros::internal] pub use crate::api::session::{init, InitBuilder}; @@ -205,7 +205,7 @@ pub mod session { }; } -/// Tools to access information about the current zenoh [`Session`](crate::Session). +/// Tools to access information about the current zenoh [`Session`]. pub mod info { pub use zenoh_config::wrappers::{EntityGlobalId, ZenohId}; pub use zenoh_protocol::core::EntityId; @@ -393,7 +393,7 @@ pub mod time { pub use crate::api::time::new_timestamp; } -/// Configuration to pass to [`open`](crate::session::open) and [`scout`](crate::scouting::scout) functions and associated constants +/// Configuration to pass to [`open`] and [`scout`] functions and associated constants pub mod config { // pub use zenoh_config::{ // client, default, peer, Config, EndPoint, Locator, ModeDependentValue, PermissionsConf, diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index bf7f4841a1..81ca715f44 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -56,7 +56,7 @@ struct Args { /// WARNING: this identifier must be unique in the system and must be 16 bytes maximum (32 chars)! #[arg(short, long)] id: Option, - /// A plugin that MUST be loaded. You can give just the name of the plugin, zenohd will search for a library named 'libzenoh_plugin_.so' (exact name depending the OS). Or you can give such a string: ": + /// A plugin that MUST be loaded. You can give just the name of the plugin, zenohd will search for a library named 'libzenoh_plugin_\.so' (exact name depending the OS). Or you can give such a string: "\:\" /// Repeat this option to load several plugins. If loading failed, zenohd will exit. #[arg(short = 'P', long)] plugin: Vec, From 655922d85124c59281f5a5b4c798156e5268fc91 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 27 Jun 2024 11:21:44 +0200 Subject: [PATCH 501/598] Fix #1185. Add encoding option to publisher creation (#1194) * Add encoding option to publisher builder * Cargo fmt * Fix PublisherBuilder Clone --- examples/examples/z_pub.rs | 11 +++++++---- zenoh/src/api/builders/publisher.rs | 13 +++++++++++++ zenoh/src/api/publisher.rs | 9 ++++++++- zenoh/src/api/session.rs | 2 ++ zenoh/tests/qos.rs | 16 ++++++++++++---- 5 files changed, 42 insertions(+), 9 deletions(-) diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 2130832fb4..56584f53c4 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -14,7 +14,7 @@ use std::time::Duration; use clap::Parser; -use zenoh::{key_expr::KeyExpr, prelude::*, Config}; +use zenoh::{encoding::Encoding, key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] @@ -35,7 +35,12 @@ async fn main() { tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] {payload}"); println!("Putting Data ('{}': '{}')...", &key_expr, buf); - publisher.put(buf).attachment(&attachment).await.unwrap(); + publisher + .put(buf) + .encoding(Encoding::TEXT_PLAIN) // Optionally set the encoding metadata + .attachment(&attachment) // Optionally add an attachment + .await + .unwrap(); } } @@ -49,8 +54,6 @@ struct Args { payload: String, #[arg(short, long)] /// The attachments to add to each put. - /// - /// The key-value pairs are &-separated, and = serves as the separator between key and value. attach: Option, #[command(flatten)] common: CommonArgs, diff --git a/zenoh/src/api/builders/publisher.rs b/zenoh/src/api/builders/publisher.rs index 923689d0bc..380a9251d5 100644 --- a/zenoh/src/api/builders/publisher.rs +++ b/zenoh/src/api/builders/publisher.rs @@ -113,6 +113,15 @@ impl PublicationBuilder, T> { } } +impl EncodingBuilderTrait for PublisherBuilder<'_, '_> { + fn encoding>(self, encoding: T) -> Self { + Self { + encoding: encoding.into(), + ..self + } + } +} + impl

EncodingBuilderTrait for PublicationBuilder { fn encoding>(self, encoding: T) -> Self { Self { @@ -226,6 +235,7 @@ impl IntoFuture for PublicationBuilder, PublicationBuil pub struct PublisherBuilder<'a, 'b: 'a> { pub(crate) session: SessionRef<'a>, pub(crate) key_expr: ZResult>, + pub(crate) encoding: Encoding, pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, pub(crate) is_express: bool, @@ -240,6 +250,7 @@ impl<'a, 'b> Clone for PublisherBuilder<'a, 'b> { Ok(k) => Ok(k.clone()), Err(e) => Err(zerror!("Cloned KE Error: {}", e).into()), }, + encoding: self.encoding.clone(), congestion_control: self.congestion_control, priority: self.priority, is_express: self.is_express, @@ -289,6 +300,7 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { session: self.session, id: 0, // This is a one shot Publisher key_expr: self.key_expr?, + encoding: self.encoding, congestion_control: self.congestion_control, priority: self.priority, is_express: self.is_express, @@ -343,6 +355,7 @@ impl<'a, 'b> Wait for PublisherBuilder<'a, 'b> { session: self.session, id, key_expr, + encoding: self.encoding, congestion_control: self.congestion_control, priority: self.priority, is_express: self.is_express, diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index 6b581ccfad..f4b969b18f 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -139,6 +139,7 @@ pub struct Publisher<'a> { pub(crate) session: SessionRef<'a>, pub(crate) id: Id, pub(crate) key_expr: KeyExpr<'a>, + pub(crate) encoding: Encoding, pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, pub(crate) is_express: bool, @@ -178,8 +179,14 @@ impl<'a> Publisher<'a> { &self.key_expr } + /// Get the [`Encoding`] used when publishing data. #[inline] + pub fn encoding(&self) -> &Encoding { + &self.encoding + } + /// Get the `congestion_control` applied when routing the data. + #[inline] pub fn congestion_control(&self) -> CongestionControl { self.congestion_control } @@ -248,7 +255,7 @@ impl<'a> Publisher<'a> { publisher: self, kind: PublicationBuilderPut { payload: payload.into(), - encoding: Encoding::ZENOH_BYTES, + encoding: self.encoding.clone(), }, timestamp: None, #[cfg(feature = "unstable")] diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 893f4725d5..3125e90225 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -409,6 +409,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { PublisherBuilder { session: self.clone(), key_expr: key_expr.try_into().map_err(Into::into), + encoding: Encoding::default(), congestion_control: CongestionControl::DEFAULT, priority: Priority::DEFAULT, is_express: false, @@ -2092,6 +2093,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { PublisherBuilder { session: SessionRef::Shared(self.clone()), key_expr: key_expr.try_into().map_err(Into::into), + encoding: Encoding::default(), congestion_control: CongestionControl::DEFAULT, priority: Priority::DEFAULT, is_express: false, diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 7ba694d80c..77e7e43a10 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -13,27 +13,31 @@ // use std::time::Duration; -use zenoh::{core::Priority, prelude::*, publisher::CongestionControl}; +use zenoh::{core::Priority, encoding::Encoding, prelude::*, publisher::CongestionControl}; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn pubsub() { +async fn qos_pubsub() { let session1 = ztimeout!(zenoh::open(zenoh_config::peer())).unwrap(); let session2 = ztimeout!(zenoh::open(zenoh_config::peer())).unwrap(); let publisher1 = ztimeout!(session1 .declare_publisher("test/qos") + .encoding("text/plain") .priority(Priority::DataHigh) - .congestion_control(CongestionControl::Drop)) + .congestion_control(CongestionControl::Drop) + .express(true)) .unwrap(); let publisher2 = ztimeout!(session1 .declare_publisher("test/qos") + .encoding(Encoding::ZENOH_STRING) .priority(Priority::DataLow) - .congestion_control(CongestionControl::Block)) + .congestion_control(CongestionControl::Block) + .express(false)) .unwrap(); let subscriber = ztimeout!(session2.declare_subscriber("test/qos")).unwrap(); @@ -42,12 +46,16 @@ async fn pubsub() { ztimeout!(publisher1.put("qos")).unwrap(); let sample = ztimeout!(subscriber.recv_async()).unwrap(); + assert_eq!(sample.encoding(), &Encoding::TEXT_PLAIN); assert_eq!(sample.priority(), Priority::DataHigh); assert_eq!(sample.congestion_control(), CongestionControl::Drop); + assert!(sample.express()); ztimeout!(publisher2.put("qos")).unwrap(); let sample = ztimeout!(subscriber.recv_async()).unwrap(); + assert_eq!(sample.encoding(), &Encoding::ZENOH_STRING); assert_eq!(sample.priority(), Priority::DataLow); assert_eq!(sample.congestion_control(), CongestionControl::Block); + assert!(!sample.express()); } From 271b7c7910262f2ab424f9b8cd00be980900f6b0 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Thu, 27 Jun 2024 16:13:31 +0200 Subject: [PATCH 502/598] fix: replace some unsafe code with safe version (#1184) * fix: replace some unsafe code with safe version Compiler is able to optimize bound checks based on previous checks See https://godbolt.org/z/oGesnb6a4 or https://godbolt.org/z/c6c41bvE5 `Writer::with_slot` has been made unsafe, because its implementations rely on a precondition on the write callback * fix: fix documentation * fix: fix missing annotation * fix: fix test * fix: address PR review --- commons/zenoh-buffers/src/bbuf.rs | 5 +- commons/zenoh-buffers/src/lib.rs | 11 ++- commons/zenoh-buffers/src/slice.rs | 91 +++++++----------------- commons/zenoh-buffers/src/vec.rs | 4 +- commons/zenoh-buffers/src/zbuf.rs | 11 ++- commons/zenoh-buffers/src/zslice.rs | 2 +- commons/zenoh-buffers/tests/readwrite.rs | 8 ++- commons/zenoh-codec/src/core/zint.rs | 7 +- io/zenoh-transport/src/common/batch.rs | 12 ++-- 9 files changed, 62 insertions(+), 89 deletions(-) diff --git a/commons/zenoh-buffers/src/bbuf.rs b/commons/zenoh-buffers/src/bbuf.rs index 72491ae704..7af2a1a464 100644 --- a/commons/zenoh-buffers/src/bbuf.rs +++ b/commons/zenoh-buffers/src/bbuf.rs @@ -127,7 +127,7 @@ impl Writer for &mut BBuf { self.capacity() - self.len() } - fn with_slot(&mut self, len: usize, f: F) -> Result + unsafe fn with_slot(&mut self, len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize, { @@ -135,7 +135,8 @@ impl Writer for &mut BBuf { return Err(DidntWrite); } - let written = f(self.as_writable_slice()); + // SAFETY: self.remaining() >= len + let written = write(unsafe { self.as_writable_slice().get_unchecked_mut(..len) }); self.len += written; NonZeroUsize::new(written).ok_or(DidntWrite) diff --git a/commons/zenoh-buffers/src/lib.rs b/commons/zenoh-buffers/src/lib.rs index a527dfbc19..ee630b4201 100644 --- a/commons/zenoh-buffers/src/lib.rs +++ b/commons/zenoh-buffers/src/lib.rs @@ -137,9 +137,14 @@ pub mod writer { fn can_write(&self) -> bool { self.remaining() != 0 } - /// Provides a buffer of exactly `len` uninitialized bytes to `f` to allow in-place writing. - /// `f` must return the number of bytes it actually wrote. - fn with_slot(&mut self, len: usize, f: F) -> Result + /// Provides a buffer of exactly `len` uninitialized bytes to `write` to allow in-place writing. + /// `write` must return the number of bytes it actually wrote. + /// + /// # Safety + /// + /// Caller must ensure that `write` return an integer lesser than or equal to the length of + /// the slice passed in argument + unsafe fn with_slot(&mut self, len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize; } diff --git a/commons/zenoh-buffers/src/slice.rs b/commons/zenoh-buffers/src/slice.rs index 658827b6c4..1f3771c2eb 100644 --- a/commons/zenoh-buffers/src/slice.rs +++ b/commons/zenoh-buffers/src/slice.rs @@ -61,26 +61,13 @@ impl HasWriter for &mut [u8] { impl Writer for &mut [u8] { fn write(&mut self, bytes: &[u8]) -> Result { - let len = bytes.len().min(self.len()); - if len == 0 { + let Some(len) = NonZeroUsize::new(bytes.len().min(self.len())) else { return Err(DidntWrite); - } - - // SAFETY: len is guaranteed to be the minimum between lhs and rhs length. - // We early return if length is 0. - let lhs = crate::unsafe_slice_mut!(self, ..len); - let rhs = crate::unsafe_slice!(bytes, ..len); - lhs.copy_from_slice(rhs); - - // SAFETY: len is guaranteed to be the minimum between lhs and rhs length. - let lhs = crate::unsafe_slice_mut!(self, len..); - // SAFETY: this doesn't compile with simple assignment because the compiler - // doesn't believe that the subslice has the same lifetime as the original slice, - // so we transmute to assure it that it does. - *self = unsafe { mem::transmute::<&mut [u8], &mut [u8]>(lhs) }; - - // SAFETY: this operation is safe since we check if len is non-zero. - Ok(unsafe { NonZeroUsize::new_unchecked(len) }) + }; + let (to_write, remain) = mem::take(self).split_at_mut(len.get()); + to_write.copy_from_slice(&bytes[..len.get()]); + *self = remain; + Ok(len) } fn write_exact(&mut self, bytes: &[u8]) -> Result<(), DidntWrite> { @@ -88,19 +75,7 @@ impl Writer for &mut [u8] { if self.len() < len { return Err(DidntWrite); } - - // SAFETY: len is guaranteed to be the smaller than lhs length. - let lhs = crate::unsafe_slice_mut!(self, ..len); - let rhs = crate::unsafe_slice!(bytes, ..len); - lhs.copy_from_slice(rhs); - - // SAFETY: len is guaranteed to be the minimum between lhs and rhs length. - let lhs = crate::unsafe_slice_mut!(self, len..); - // SAFETY: this doesn't compile with simple assignment because the compiler - // doesn't believe that the subslice has the same lifetime as the original slice, - // so we transmute to assure it that it does. - *self = unsafe { mem::transmute::<&mut [u8], &mut [u8]>(lhs) }; - + let _ = self.write(bytes); Ok(()) } @@ -108,24 +83,17 @@ impl Writer for &mut [u8] { self.len() } - fn with_slot(&mut self, mut len: usize, f: F) -> Result + unsafe fn with_slot(&mut self, len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize, { if len > self.len() { return Err(DidntWrite); } - // SAFETY: we early return in case len is greater than slice.len(). - let s = crate::unsafe_slice_mut!(self, ..len); - len = f(s); - // SAFETY: we early return in case len is greater than slice.len(). - let s = crate::unsafe_slice_mut!(self, len..); - // SAFETY: this doesn't compile with simple assignment because the compiler - // doesn't believe that the subslice has the same lifetime as the original slice, - // so we transmute to assure it that it does. - *self = unsafe { mem::transmute::<&mut [u8], &mut [u8]>(s) }; - - NonZeroUsize::new(len).ok_or(DidntWrite) + let written = write(&mut self[..len]); + // SAFETY: `written` < `len` is guaranteed by function contract + *self = unsafe { mem::take(self).get_unchecked_mut(written..) }; + NonZeroUsize::new(written).ok_or(DidntWrite) } } @@ -165,14 +133,13 @@ impl<'a> HasReader for &'a [u8] { impl Reader for &[u8] { fn read(&mut self, into: &mut [u8]) -> Result { - let len = self.len().min(into.len()); - // SAFETY: len is guaranteed to be the smaller than lhs length. - let lhs = crate::unsafe_slice_mut!(into, ..len); - let rhs = crate::unsafe_slice!(self, ..len); - lhs.copy_from_slice(rhs); - // SAFETY: len is guaranteed to be smaller than slice.len(). - *self = crate::unsafe_slice!(self, len..); - NonZeroUsize::new(len).ok_or(DidntRead) + let Some(len) = NonZeroUsize::new(self.len().min(into.len())) else { + return Err(DidntRead); + }; + let (to_write, remain) = self.split_at(len.get()); + into[..len.get()].copy_from_slice(to_write); + *self = remain; + Ok(len) } fn read_exact(&mut self, into: &mut [u8]) -> Result<(), DidntRead> { @@ -180,24 +147,16 @@ impl Reader for &[u8] { if self.len() < len { return Err(DidntRead); } - // SAFETY: len is guaranteed to be the smaller than lhs length. - let lhs = crate::unsafe_slice_mut!(into, ..len); - let rhs = crate::unsafe_slice!(self, ..len); - lhs.copy_from_slice(rhs); - // SAFETY: len is guaranteed to be smaller than slice.len(). - *self = crate::unsafe_slice!(self, len..); + let (to_write, remain) = self.split_at(len); + into[..len].copy_from_slice(to_write); + *self = remain; Ok(()) } fn read_u8(&mut self) -> Result { - if !self.can_read() { - return Err(DidntRead); - } - // SAFETY: we early return in case the slice is empty. - // Therefore, there is at least one element in the slice. - let ret = *crate::unsafe_slice!(self, 0); - *self = crate::unsafe_slice!(self, 1..); - Ok(ret) + let mut buf = [0; 1]; + self.read(&mut buf)?; + Ok(buf[0]) } fn read_zslices(&mut self, len: usize, mut f: F) -> Result<(), DidntRead> { diff --git a/commons/zenoh-buffers/src/vec.rs b/commons/zenoh-buffers/src/vec.rs index 9d63880aea..fc81fa6687 100644 --- a/commons/zenoh-buffers/src/vec.rs +++ b/commons/zenoh-buffers/src/vec.rs @@ -93,7 +93,7 @@ impl Writer for &mut Vec { usize::MAX } - fn with_slot(&mut self, mut len: usize, f: F) -> Result + unsafe fn with_slot(&mut self, mut len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize, { @@ -103,7 +103,7 @@ impl Writer for &mut Vec { let s = crate::unsafe_slice_mut!(self.spare_capacity_mut(), ..len); // SAFETY: converting MaybeUninit into [u8] is safe because we are going to write on it. // The returned len tells us how many bytes have been written so as to update the len accordingly. - len = unsafe { f(&mut *(s as *mut [mem::MaybeUninit] as *mut [u8])) }; + len = unsafe { write(&mut *(s as *mut [mem::MaybeUninit] as *mut [u8])) }; // SAFETY: we already reserved len elements on the vector. unsafe { self.set_len(self.len() + len) }; diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index a198c654d2..f846280b91 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -590,12 +590,11 @@ impl<'a> HasWriter for &'a mut ZBuf { impl Writer for ZBufWriter<'_> { fn write(&mut self, bytes: &[u8]) -> Result { - if bytes.is_empty() { + let Some(len) = NonZeroUsize::new(bytes.len()) else { return Err(DidntWrite); - } + }; self.write_exact(bytes)?; - // SAFETY: this operation is safe since we check if bytes is empty - Ok(unsafe { NonZeroUsize::new_unchecked(bytes.len()) }) + Ok(len) } fn write_exact(&mut self, bytes: &[u8]) -> Result<(), DidntWrite> { @@ -646,7 +645,7 @@ impl Writer for ZBufWriter<'_> { Ok(()) } - fn with_slot(&mut self, mut len: usize, f: F) -> Result + unsafe fn with_slot(&mut self, mut len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize, { @@ -658,7 +657,7 @@ impl Writer for ZBufWriter<'_> { let s = crate::unsafe_slice_mut!(cache.spare_capacity_mut(), ..len); // SAFETY: converting MaybeUninit into [u8] is safe because we are going to write on it. // The returned len tells us how many bytes have been written so as to update the len accordingly. - len = unsafe { f(&mut *(s as *mut [mem::MaybeUninit] as *mut [u8])) }; + len = unsafe { write(&mut *(s as *mut [mem::MaybeUninit] as *mut [u8])) }; // SAFETY: we already reserved len elements on the vector. unsafe { cache.set_len(prev_cache_len + len) }; diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index 42babb8b88..6ed404eb78 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -122,7 +122,7 @@ impl ZSlice { } pub fn empty() -> Self { - unsafe { ZSlice::new_unchecked(Arc::new([]), 0, 0) } + Self::new(Arc::new([]), 0, 0).unwrap() } /// # Safety diff --git a/commons/zenoh-buffers/tests/readwrite.rs b/commons/zenoh-buffers/tests/readwrite.rs index cdfc8fea05..dd5481c958 100644 --- a/commons/zenoh-buffers/tests/readwrite.rs +++ b/commons/zenoh-buffers/tests/readwrite.rs @@ -46,13 +46,15 @@ macro_rules! run_write { writer.write_exact(&WBS4).unwrap(); - writer - .with_slot(4, |mut buffer| { + // SAFETY: callback returns the length of the buffer + unsafe { + writer.with_slot(4, |mut buffer| { let w = buffer.write(&WBS5).unwrap(); assert_eq!(4, w.get()); w.get() }) - .unwrap(); + } + .unwrap(); }; } diff --git a/commons/zenoh-codec/src/core/zint.rs b/commons/zenoh-codec/src/core/zint.rs index 20c0a0a4f6..a42395b781 100644 --- a/commons/zenoh-codec/src/core/zint.rs +++ b/commons/zenoh-codec/src/core/zint.rs @@ -112,7 +112,7 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, mut x: u64) -> Self::Output { - writer.with_slot(VLE_LEN_MAX, move |buffer| { + let write = move |buffer: &mut [u8]| { let mut len = 0; while (x & !0x7f_u64) != 0 { // SAFETY: buffer is guaranteed to be VLE_LEN long where VLE_LEN is @@ -139,7 +139,10 @@ where } // The number of written bytes len - })?; + }; + // SAFETY: write algorithm guarantees than returned length is lesser than or equal to + // `VLE_LEN_MAX`. + unsafe { writer.with_slot(VLE_LEN_MAX, write)? }; Ok(()) } } diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index 1b065191c0..c36993ddf7 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -322,11 +322,15 @@ impl WBatch { // Compress the actual content let (_length, _header, payload) = Self::split(self.buffer.as_slice(), &self.config); let mut writer = support.writer(); - writer - .with_slot(writer.remaining(), |b| { - lz4_flex::block::compress_into(payload, b).unwrap_or(0) + // SAFETY: assertion ensures `with_slot` precondition + unsafe { + writer.with_slot(writer.remaining(), |b| { + let len = lz4_flex::block::compress_into(payload, b).unwrap_or(0); + assert!(len <= b.len()); + len }) - .map_err(|_| zerror!("Compression error"))?; + } + .map_err(|_| zerror!("Compression error"))?; // Verify whether the resulting compressed data is smaller than the initial input if support.len() < self.buffer.len() { From c96f6b010e8c647ed715dec0c11c0a1afbd739e7 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 27 Jun 2024 17:06:23 +0200 Subject: [PATCH 503/598] Fix Interest codec test (#1198) --- commons/zenoh-codec/tests/codec.rs | 5 +++++ commons/zenoh-protocol/src/network/interest.rs | 8 ++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index 1e1bbe18a3..46fabe5c51 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -532,6 +532,11 @@ fn codec_declare_body() { run!(DeclareBody, DeclareBody::rand()); } +#[test] +fn codec_interest() { + run!(Interest, Interest::rand()); +} + #[test] fn codec_declare_keyexpr() { run!(DeclareKeyExpr, DeclareKeyExpr::rand()); diff --git a/commons/zenoh-protocol/src/network/interest.rs b/commons/zenoh-protocol/src/network/interest.rs index 29ed7e4c29..9f329b6ff5 100644 --- a/commons/zenoh-protocol/src/network/interest.rs +++ b/commons/zenoh-protocol/src/network/interest.rs @@ -195,8 +195,12 @@ impl Interest { let id = rng.gen::(); let mode = InterestMode::rand(); - let options = InterestOptions::rand(); - let wire_expr = rng.gen_bool(0.5).then_some(WireExpr::rand()); + let options = if mode == InterestMode::Final { + InterestOptions::empty() + } else { + InterestOptions::rand() + }; + let wire_expr = options.restricted().then_some(WireExpr::rand()); let ext_qos = ext::QoSType::rand(); let ext_tstamp = rng.gen_bool(0.5).then(ext::TimestampType::rand); let ext_nodeid = ext::NodeIdType::rand(); From 0c7faa950adce5688ab6e84d6796dcb61516ed58 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 28 Jun 2024 09:35:33 +0200 Subject: [PATCH 504/598] Fix CLI args for z_pub_shm (#1199) --- examples/examples/z_pub_shm.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index fd3c7ce1b6..457027ba75 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -95,7 +95,7 @@ async fn main() -> Result<(), ZError> { struct Args { #[arg(short, long, default_value = "demo/example/zenoh-rs-pub")] /// The key expression to publish onto. - path: KeyExpr<'static>, + key: KeyExpr<'static>, #[arg(short, long, default_value = "Pub from SHM Rust!")] /// The payload of to publish. payload: String, @@ -105,5 +105,5 @@ struct Args { fn parse_args() -> (Config, KeyExpr<'static>, String) { let args = Args::parse(); - (args.common.into(), args.path, args.payload) + (args.common.into(), args.key, args.payload) } From 90054a615e2acddfbcfa1fd283aa8c866aa85682 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 1 Jul 2024 17:07:50 +0200 Subject: [PATCH 505/598] Fix bug leading to call get_unchecked on empty array UB (#1207) --- commons/zenoh-keyexpr/src/key_expr/include.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-keyexpr/src/key_expr/include.rs b/commons/zenoh-keyexpr/src/key_expr/include.rs index ca9efaee2d..15e4f50f40 100644 --- a/commons/zenoh-keyexpr/src/key_expr/include.rs +++ b/commons/zenoh-keyexpr/src/key_expr/include.rs @@ -41,7 +41,7 @@ impl Includer<&[u8], &[u8]> for LTRIncluder { if (lempty && !right.has_verbatim()) || (!lempty && self.includes(lrest, right)) { return true; } - if unsafe { right.has_direct_verbatim_non_empty() } { + if right.has_direct_verbatim() { return false; } right = Split::split_once(right, &DELIMITER).1; From 9fcb61e9c4e101e9070cf74868b1bcb3876c2cec Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Wed, 3 Jul 2024 10:10:55 +0200 Subject: [PATCH 506/598] feat: unify pub/sub and query/reply modules (#1193) * feat: unify pub/sub and query/reply modules * fix: fix shm error * feat: move encoding and remove core * feat: rename `Canonizable` into `Canonize` and reexport it * fix: fix examples * fix: fix doc links --- .../src/queryable_get/bin/z_queryable_get.rs | 5 +- .../zenoh-keyexpr/src/key_expr/borrowed.rs | 4 +- commons/zenoh-keyexpr/src/key_expr/canon.rs | 6 +- commons/zenoh-keyexpr/src/key_expr/owned.rs | 4 +- commons/zenoh-macros/src/lib.rs | 2 +- examples/examples/z_get.rs | 5 +- examples/examples/z_get_shm.rs | 3 +- examples/examples/z_info.rs | 2 +- examples/examples/z_ping.rs | 2 +- examples/examples/z_ping_shm.rs | 2 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_pub.rs | 2 +- examples/examples/z_pub_shm_thr.rs | 2 +- examples/examples/z_pub_thr.rs | 6 +- plugins/zenoh-backend-traits/src/config.rs | 2 +- plugins/zenoh-backend-traits/src/lib.rs | 2 +- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 4 +- plugins/zenoh-plugin-rest/src/lib.rs | 10 +- .../src/backends_mgt.rs | 2 +- .../zenoh-plugin-storage-manager/src/lib.rs | 2 +- .../src/memory_backend/mod.rs | 2 +- .../src/replica/align_queryable.rs | 2 +- .../src/replica/aligner.rs | 2 +- .../src/replica/snapshotter.rs | 2 +- .../src/replica/storage.rs | 5 +- .../src/storages_mgt.rs | 2 +- zenoh-ext/src/group.rs | 4 +- zenoh-ext/src/lib.rs | 2 +- zenoh-ext/src/publication_cache.rs | 8 +- zenoh-ext/src/querying_subscriber.rs | 7 +- zenoh-ext/src/session_ext.rs | 2 +- zenoh-ext/src/subscriber_ext.rs | 6 +- zenoh/src/api/builders/publisher.rs | 6 +- zenoh/src/api/bytes.rs | 1 + zenoh/src/api/encoding.rs | 6 +- zenoh/src/api/key_expr.rs | 4 +- zenoh/src/api/query.rs | 2 +- zenoh/src/api/session.rs | 30 ++-- zenoh/src/lib.rs | 146 +++++++----------- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/prelude.rs | 12 +- zenoh/tests/qos.rs | 6 +- zenoh/tests/routing.rs | 2 +- zenoh/tests/session.rs | 4 +- zenoh/tests/shm.rs | 4 +- zenoh/tests/unicity.rs | 2 +- zenohd/src/main.rs | 2 +- 47 files changed, 160 insertions(+), 182 deletions(-) diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 70945a4926..8ea7be201b 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -14,7 +14,10 @@ use std::{convert::TryFrom, time::Duration}; use zenoh::{ - config::Config, key_expr::KeyExpr, prelude::*, query::QueryTarget, selector::Selector, + config::Config, + key_expr::KeyExpr, + prelude::*, + query::{QueryTarget, Selector}, }; #[tokio::main] diff --git a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs index 6d1774bcd8..a98337b987 100644 --- a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs +++ b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs @@ -27,7 +27,7 @@ use core::{ use zenoh_result::{bail, Error as ZError, ZResult}; -use super::{canon::Canonizable, OwnedKeyExpr, FORBIDDEN_CHARS}; +use super::{canon::Canonize, OwnedKeyExpr, FORBIDDEN_CHARS}; /// A [`str`] newtype that is statically known to be a valid key expression. /// @@ -72,7 +72,7 @@ impl keyexpr { pub fn autocanonize<'a, T, E>(t: &'a mut T) -> Result<&'a Self, E> where &'a Self: TryFrom<&'a T, Error = E>, - T: Canonizable + ?Sized, + T: Canonize + ?Sized, { t.canonize(); Self::new(t) diff --git a/commons/zenoh-keyexpr/src/key_expr/canon.rs b/commons/zenoh-keyexpr/src/key_expr/canon.rs index cccccdfba3..7080dbde1a 100644 --- a/commons/zenoh-keyexpr/src/key_expr/canon.rs +++ b/commons/zenoh-keyexpr/src/key_expr/canon.rs @@ -19,13 +19,13 @@ use crate::key_expr::{ DELIMITER, DOUBLE_WILD, SINGLE_WILD, }; -pub trait Canonizable { +pub trait Canonize { fn canonize(&mut self); } const DOLLAR_STAR: &[u8; 2] = b"$*"; -impl Canonizable for &mut str { +impl Canonize for &mut str { fn canonize(&mut self) { let mut writer = Writer { ptr: self.as_mut_ptr(), @@ -114,7 +114,7 @@ impl Canonizable for &mut str { } } -impl Canonizable for String { +impl Canonize for String { fn canonize(&mut self) { let mut s = self.as_mut(); s.canonize(); diff --git a/commons/zenoh-keyexpr/src/key_expr/owned.rs b/commons/zenoh-keyexpr/src/key_expr/owned.rs index a53fdec2f0..6089df2a1e 100644 --- a/commons/zenoh-keyexpr/src/key_expr/owned.rs +++ b/commons/zenoh-keyexpr/src/key_expr/owned.rs @@ -22,7 +22,7 @@ use core::{ str::FromStr, }; -use super::{canon::Canonizable, keyexpr}; +use super::{canon::Canonize, keyexpr}; /// A [`Arc`] newtype that is statically known to be a valid key expression. /// @@ -60,7 +60,7 @@ impl OwnedKeyExpr { pub fn autocanonize(mut t: T) -> Result where Self: TryFrom, - T: Canonizable, + T: Canonize, { t.canonize(); Self::new(t) diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index c1c58d725e..003525daa9 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -287,7 +287,7 @@ fn keformat_support(source: &str) -> proc_macro2::TokenStream { let formatter_doc = format!("And instance of a formatter for `{source}`."); quote! { - use ::zenoh::core::Result as ZResult; + use ::zenoh::Result as ZResult; const FORMAT_INNER: ::zenoh::key_expr::format::KeFormat<'static, [::zenoh::key_expr::format::Segment<'static>; #len]> = unsafe { ::zenoh::key_expr::format::macro_support::const_new(#source, [#(#segments)*]) }; diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 2069e20b31..eebe582f98 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -14,7 +14,10 @@ use std::time::Duration; use clap::Parser; -use zenoh::{query::QueryTarget, selector::Selector, Config}; +use zenoh::{ + query::{QueryTarget, Selector}, + Config, +}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 71a3e3aa65..d8ea97da33 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -15,8 +15,7 @@ use std::time::Duration; use clap::Parser; use zenoh::{ - query::QueryTarget, - selector::Selector, + query::{QueryTarget, Selector}, shm::{ zshm, BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID, diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index d2e4bfdbc0..aa40ef62d4 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::{info::ZenohId, prelude::*}; +use zenoh::{prelude::*, session::ZenohId}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 96454da614..eec9324173 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -14,7 +14,7 @@ use std::time::{Duration, Instant}; use clap::Parser; -use zenoh::{bytes::ZBytes, key_expr::keyexpr, prelude::*, publisher::CongestionControl, Config}; +use zenoh::{bytes::ZBytes, key_expr::keyexpr, prelude::*, qos::CongestionControl, Config}; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 5e809c9341..4c6f8fed01 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -18,7 +18,7 @@ use zenoh::{ bytes::ZBytes, key_expr::keyexpr, prelude::*, - publisher::CongestionControl, + qos::CongestionControl, shm::{PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID}, Config, }; diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index 6a1b8580c7..ef022d234c 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::{key_expr::keyexpr, prelude::*, publisher::CongestionControl, Config}; +use zenoh::{key_expr::keyexpr, prelude::*, qos::CongestionControl, Config}; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 56584f53c4..9f84ba118f 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -14,7 +14,7 @@ use std::time::Duration; use clap::Parser; -use zenoh::{encoding::Encoding, key_expr::KeyExpr, prelude::*, Config}; +use zenoh::{bytes::Encoding, key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 3093a0962d..4641c51c95 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -15,7 +15,7 @@ use clap::Parser; use zenoh::{ bytes::ZBytes, prelude::*, - publisher::CongestionControl, + qos::CongestionControl, shm::{PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID}, Config, }; diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 359e375203..e6c063318e 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -15,7 +15,11 @@ use std::convert::TryInto; use clap::Parser; -use zenoh::{bytes::ZBytes, core::Priority, prelude::*, publisher::CongestionControl}; +use zenoh::{ + bytes::ZBytes, + prelude::*, + qos::{CongestionControl, Priority}, +}; use zenoh_examples::CommonArgs; fn main() { diff --git a/plugins/zenoh-backend-traits/src/config.rs b/plugins/zenoh-backend-traits/src/config.rs index 0d710d9942..98167680c8 100644 --- a/plugins/zenoh-backend-traits/src/config.rs +++ b/plugins/zenoh-backend-traits/src/config.rs @@ -18,8 +18,8 @@ use derive_more::{AsMut, AsRef}; use schemars::JsonSchema; use serde_json::{Map, Value}; use zenoh::{ - core::Result as ZResult, key_expr::{keyexpr, OwnedKeyExpr}, + Result as ZResult, }; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_result::{bail, zerror, Error}; diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 851e9cfbb0..a75d934050 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -124,10 +124,10 @@ use async_trait::async_trait; use const_format::concatcp; use zenoh::{ - core::Result as ZResult, internal::Value, key_expr::{keyexpr, OwnedKeyExpr}, time::Timestamp, + Result as ZResult, }; use zenoh_plugin_trait::{PluginControl, PluginInstance, PluginStatusRec, StructVersion}; use zenoh_util::concat_enabled_features; diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index e39d7c28b2..5f7d466f13 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -15,7 +15,9 @@ use std::time::Duration; use clap::{arg, Command}; use zenoh::{ - config::Config, key_expr::keyexpr, publisher::CongestionControl, sample::QoSBuilderTrait, + config::Config, + key_expr::keyexpr, + qos::{CongestionControl, QoSBuilderTrait}, session::SessionDeclarations, }; diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index a35025e26e..e3dcc0130e 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -26,8 +26,7 @@ use http_types::Method; use serde::{Deserialize, Serialize}; use tide::{http::Mime, sse::Sender, Request, Response, Server, StatusCode}; use zenoh::{ - bytes::ZBytes, - encoding::Encoding, + bytes::{Encoding, ZBytes}, internal::{ bail, plugins::{RunningPluginTrait, ZenohPlugin}, @@ -36,16 +35,15 @@ use zenoh::{ }, key_expr::{keyexpr, KeyExpr}, prelude::*, - query::{QueryConsolidation, Reply}, - sample::{EncodingBuilderTrait, Sample, SampleKind}, - selector::{Parameters, Selector, ZenohParameters}, + query::{Parameters, QueryConsolidation, Reply, Selector, ZenohParameters}, + sample::{Sample, SampleKind}, session::{Session, SessionDeclarations}, }; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; mod config; pub use config::Config; -use zenoh::query::ReplyError; +use zenoh::{bytes::EncodingBuilderTrait, query::ReplyError}; const GIT_VERSION: &str = git_version::git_version!(prefix = "v", cargo_prefix = "v"); lazy_static::lazy_static! { diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index 1bb8af4330..b789b563d2 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -14,7 +14,7 @@ use std::sync::Arc; use flume::Sender; -use zenoh::{core::Result as ZResult, session::Session}; +use zenoh::{session::Session, Result as ZResult}; use zenoh_backend_traits::{config::StorageConfig, Capability, VolumeInstance}; use super::storages_mgt::*; diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 6ea19ce25c..c916b649d9 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -30,7 +30,6 @@ use flume::Sender; use memory_backend::MemoryBackend; use storages_mgt::StorageMessage; use zenoh::{ - core::Result as ZResult, internal::{ plugins::{Response, RunningPlugin, RunningPluginTrait, ZenohPlugin}, runtime::Runtime, @@ -39,6 +38,7 @@ use zenoh::{ key_expr::{keyexpr, KeyExpr}, prelude::Wait, session::Session, + Result as ZResult, }; use zenoh_backend_traits::{ config::{ConfigDiff, PluginConfig, StorageConfig, VolumeConfig}, diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index e3a9cd9196..7c74d9f7f9 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -15,7 +15,7 @@ use std::{collections::HashMap, sync::Arc}; use async_std::sync::RwLock; use async_trait::async_trait; -use zenoh::{core::Result as ZResult, internal::Value, key_expr::OwnedKeyExpr, time::Timestamp}; +use zenoh::{internal::Value, key_expr::OwnedKeyExpr, time::Timestamp, Result as ZResult}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, *, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 802e420636..c11a632e41 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -21,7 +21,7 @@ use std::{ use async_std::sync::Arc; use zenoh::{ - internal::Value, key_expr::OwnedKeyExpr, prelude::*, sample::Sample, selector::Parameters, + internal::Value, key_expr::OwnedKeyExpr, prelude::*, query::Parameters, sample::Sample, time::Timestamp, Session, }; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 8ffeddd71f..7992053a67 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -24,8 +24,8 @@ use zenoh::{ internal::Value, key_expr::{KeyExpr, OwnedKeyExpr}, prelude::*, + query::Selector, sample::{Sample, SampleBuilder}, - selector::Selector, time::Timestamp, Session, }; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs index c5b2573335..6bb2cf113b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs @@ -24,7 +24,7 @@ use async_std::{ }; use flume::Receiver; use futures::join; -use zenoh::{info::ZenohId, key_expr::OwnedKeyExpr, time::Timestamp}; +use zenoh::{key_expr::OwnedKeyExpr, session::ZenohId, time::Timestamp}; use zenoh_backend_traits::config::ReplicaConfig; use super::{Digest, DigestConfig, LogEntry}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 9d12dbd599..f926417743 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -22,7 +22,7 @@ use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; use zenoh::{ - core::Result as ZResult, + bytes::EncodingBuilderTrait, internal::{ bail, buffers::{SplitBuffer, ZBuf}, @@ -35,9 +35,10 @@ use zenoh::{ KeyExpr, OwnedKeyExpr, }, query::{ConsolidationMode, QueryTarget}, - sample::{EncodingBuilderTrait, Sample, SampleBuilder, SampleKind, TimestampBuilderTrait}, + sample::{Sample, SampleBuilder, SampleKind, TimestampBuilderTrait}, session::{Session, SessionDeclarations}, time::{new_timestamp, Timestamp, NTP64}, + Result as ZResult, }; use zenoh_backend_traits::{ config::{GarbageCollectionConfig, StorageConfig}, diff --git a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs index 1670310fcf..27dbaf58f6 100644 --- a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use async_std::sync::Arc; -use zenoh::{core::Result as ZResult, session::Session}; +use zenoh::{session::Session, Result as ZResult}; use zenoh_backend_traits::config::StorageConfig; pub use super::replica::{Replica, StorageService}; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 9120a323ae..4078db08dc 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -28,11 +28,11 @@ use serde::{Deserialize, Serialize}; use tokio::sync::Mutex; use zenoh::{ bytes::ZBytesReader, - core::Priority, internal::{bail, Condition, TaskController}, key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}, prelude::*, - publisher::Publisher, + pubsub::Publisher, + qos::Priority, Session, }; diff --git a/zenoh-ext/src/lib.rs b/zenoh-ext/src/lib.rs index 9802d04e3a..659afa006d 100644 --- a/zenoh-ext/src/lib.rs +++ b/zenoh-ext/src/lib.rs @@ -22,7 +22,7 @@ pub use querying_subscriber::{ }; pub use session_ext::SessionExt; pub use subscriber_ext::{SubscriberBuilderExt, SubscriberForward}; -use zenoh::{core::Result as ZResult, internal::zerror, query::Reply, sample::Sample}; +use zenoh::{internal::zerror, query::Reply, sample::Sample, Result as ZResult}; /// The space of keys to use in a [`FetchingSubscriber`]. pub enum KeySpace { diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index b50f5affb4..09a21f2e16 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -19,16 +19,14 @@ use std::{ }; use zenoh::{ - core::{Error, Resolvable, Resolve, Result as ZResult}, internal::{bail, runtime::ZRuntime, ResolveFuture, TerminatableTask}, key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}, prelude::Wait, - query::Query, - queryable::Queryable, + pubsub::FlumeSubscriber, + query::{Query, Queryable, ZenohParameters}, sample::{Locality, Sample}, - selector::ZenohParameters, session::{SessionDeclarations, SessionRef}, - subscriber::FlumeSubscriber, + Error, Resolvable, Resolve, Result as ZResult, }; /// The builder of PublicationCache, allowing to configure it. diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index e26de62ae0..baf486601d 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -21,17 +21,16 @@ use std::{ }; use zenoh::{ - core::{Error, Resolvable, Resolve, Result as ZResult}, handlers::{locked, DefaultHandler, IntoHandler}, internal::zlock, key_expr::KeyExpr, prelude::Wait, - query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}, + pubsub::{Reliability, Subscriber}, + query::{QueryConsolidation, QueryTarget, ReplyKeyExpr, Selector}, sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}, - selector::Selector, session::{SessionDeclarations, SessionRef}, - subscriber::{Reliability, Subscriber}, time::{new_timestamp, Timestamp}, + Error, Resolvable, Resolve, Result as ZResult, }; use crate::ExtractSample; diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index 2b9cda7cb0..606f00743b 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -14,9 +14,9 @@ use std::{convert::TryInto, sync::Arc}; use zenoh::{ - core::Error, key_expr::KeyExpr, session::{Session, SessionRef}, + Error, }; use super::PublicationCacheBuilder; diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index bac334035d..a7356f86dc 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -16,11 +16,11 @@ use std::time::Duration; use flume::r#async::RecvStream; use futures::stream::{Forward, Map}; use zenoh::{ - core::Result as ZResult, liveliness::LivelinessSubscriberBuilder, + pubsub::{Reliability, Subscriber, SubscriberBuilder}, query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}, sample::{Locality, Sample}, - subscriber::{Reliability, Subscriber, SubscriberBuilder}, + Result as ZResult, }; use crate::{ @@ -42,7 +42,7 @@ where } } -/// Some extensions to the [`zenoh::subscriber::SubscriberBuilder`](zenoh::subscriber::SubscriberBuilder) +/// Some extensions to the [`zenoh::subscriber::SubscriberBuilder`](zenoh::pubsub::SubscriberBuilder) pub trait SubscriberBuilderExt<'a, 'b, Handler> { type KeySpace; diff --git a/zenoh/src/api/builders/publisher.rs b/zenoh/src/api/builders/publisher.rs index 380a9251d5..666b4378e0 100644 --- a/zenoh/src/api/builders/publisher.rs +++ b/zenoh/src/api/builders/publisher.rs @@ -50,13 +50,13 @@ pub struct PublicationBuilderPut { pub struct PublicationBuilderDelete; /// A builder for initializing [`Session::put`](crate::session::Session::put), [`Session::delete`](crate::session::Session::delete), -/// [`Publisher::put`](crate::publisher::Publisher::put), and [`Publisher::delete`](crate::publisher::Publisher::delete) operations. +/// [`Publisher::put`](crate::pubsub::Publisher::put), and [`Publisher::delete`](crate::pubsub::Publisher::delete) operations. /// /// # Examples /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::{encoding::Encoding, prelude::*, publisher::CongestionControl}; +/// use zenoh::{bytes::Encoding, prelude::*, qos::CongestionControl}; /// /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// session @@ -220,7 +220,7 @@ impl IntoFuture for PublicationBuilder, PublicationBuil /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::{prelude::*, publisher::CongestionControl}; +/// use zenoh::{prelude::*, qos::CongestionControl}; /// /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let publisher = session diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index b1d1ff079f..572ac16cab 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -2959,6 +2959,7 @@ impl From> for ZBytes { } mod tests { + #[test] fn serializer() { use std::borrow::Cow; diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index 2b6cee2b23..7e86e9091a 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -37,7 +37,7 @@ use super::bytes::ZBytes; /// /// Create an [`Encoding`] from a string and viceversa. /// ``` -/// use zenoh::encoding::Encoding; +/// use zenoh::bytes::Encoding; /// /// let encoding: Encoding = "text/plain".into(); /// let text: String = encoding.clone().into(); @@ -49,7 +49,7 @@ use super::bytes::ZBytes; /// Since some encoding values are internally optimized by Zenoh, it's generally more efficient to use /// the defined constants and [`Cow`][std::borrow::Cow] conversion to obtain its string representation. /// ``` -/// use zenoh::encoding::Encoding; +/// use zenoh::bytes::Encoding; /// use std::borrow::Cow; /// /// // This allocates @@ -64,7 +64,7 @@ use super::bytes::ZBytes; /// The conventions is to use the `;` separator if an encoding is created from a string. /// Alternatively, [`with_schema()`](Encoding::with_schema) can be used to add a scheme to one of the associated constants. /// ``` -/// use zenoh::encoding::Encoding; +/// use zenoh::bytes::Encoding; /// /// let encoding1 = Encoding::from("text/plain;utf-8"); /// let encoding2 = Encoding::TEXT_PLAIN.with_schema("utf-8"); diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index c6ece3f129..50ce79180b 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -21,7 +21,7 @@ use std::{ use zenoh_core::{Resolvable, Wait}; use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; use zenoh_protocol::{ - core::{key_expr::canon::Canonizable, ExprId, WireExpr}, + core::{key_expr::canon::Canonize, ExprId, WireExpr}, network::{declare, DeclareBody, Mapping, UndeclareKeyExpr}, }; use zenoh_result::ZResult; @@ -145,7 +145,7 @@ impl<'a> KeyExpr<'a> { pub fn autocanonize(mut t: T) -> Result where Self: TryFrom, - T: Canonizable, + T: Canonize, { t.canonize(); Self::new(t) diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index e9598a0064..8cf62344f2 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -42,7 +42,7 @@ use super::{ value::Value, }; -/// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). +/// The [`Queryable`](crate::query::Queryable)s that should be target of a [`get`](Session::get). pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; /// The kind of consolidation. diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 3125e90225..f97e5d7541 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -515,8 +515,8 @@ impl Session { /// pointer to it (`Arc`). This is equivalent to `Arc::new(session)`. /// /// This is useful to share ownership of the `Session` between several threads - /// and tasks. It also allows to create [`Subscriber`](crate::subscriber::Subscriber) and - /// [`Queryable`](crate::queryable::Queryable) with static lifetime that can be moved to several + /// and tasks. It also allows to create [`Subscriber`](crate::pubsub::Subscriber) and + /// [`Queryable`](crate::query::Queryable) with static lifetime that can be moved to several /// threads and tasks /// /// Note: the given zenoh `Session` will be closed when the last reference to @@ -548,7 +548,7 @@ impl Session { /// the program's life. Dropping the returned reference will cause a memory /// leak. /// - /// This is useful to move entities (like [`Subscriber`](crate::subscriber::Subscriber)) which + /// This is useful to move entities (like [`Subscriber`](crate::pubsub::Subscriber)) which /// lifetimes are bound to the session lifetime in several threads or tasks. /// /// Note: the given zenoh `Session` cannot be closed any more. At process @@ -793,7 +793,7 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::{encoding::Encoding, prelude::*}; + /// use zenoh::{bytes::Encoding, prelude::*}; /// /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// session @@ -1979,7 +1979,7 @@ impl Session { } impl<'s> SessionDeclarations<'s, 'static> for Arc { - /// Create a [`Subscriber`](crate::subscriber::Subscriber) for the given key expression. + /// Create a [`Subscriber`](crate::pubsub::Subscriber) for the given key expression. /// /// # Arguments /// @@ -2019,12 +2019,12 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { } } - /// Create a [`Queryable`](crate::queryable::Queryable) for the given key expression. + /// Create a [`Queryable`](crate::query::Queryable) for the given key expression. /// /// # Arguments /// /// * `key_expr` - The key expression matching the queries the - /// [`Queryable`](crate::queryable::Queryable) will reply to + /// [`Queryable`](crate::query::Queryable) will reply to /// /// # Examples /// ```no_run @@ -2063,7 +2063,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { } } - /// Create a [`Publisher`](crate::publisher::Publisher) for the given key expression. + /// Create a [`Publisher`](crate::pubsub::Publisher) for the given key expression. /// /// # Arguments /// @@ -2620,8 +2620,8 @@ impl fmt::Debug for Session { /// Functions to create zenoh entities /// /// This trait contains functions to create zenoh entities like -/// [`Subscriber`](crate::subscriber::Subscriber), and -/// [`Queryable`](crate::queryable::Queryable) +/// [`Subscriber`](crate::pubsub::Subscriber), and +/// [`Queryable`](crate::query::Queryable) /// /// This trait is implemented by [`Session`](crate::session::Session) itself and /// by wrappers [`SessionRef`](crate::session::SessionRef) and [`Arc`](std::sync::Arc) @@ -2644,7 +2644,7 @@ impl fmt::Debug for Session { /// # } /// ``` pub trait SessionDeclarations<'s, 'a> { - /// Create a [`Subscriber`](crate::subscriber::Subscriber) for the given key expression. + /// Create a [`Subscriber`](crate::pubsub::Subscriber) for the given key expression. /// /// # Arguments /// @@ -2675,12 +2675,12 @@ pub trait SessionDeclarations<'s, 'a> { TryIntoKeyExpr: TryInto>, >>::Error: Into; - /// Create a [`Queryable`](crate::queryable::Queryable) for the given key expression. + /// Create a [`Queryable`](crate::query::Queryable) for the given key expression. /// /// # Arguments /// /// * `key_expr` - The key expression matching the queries the - /// [`Queryable`](crate::queryable::Queryable) will reply to + /// [`Queryable`](crate::query::Queryable) will reply to /// /// # Examples /// ```no_run @@ -2710,7 +2710,7 @@ pub trait SessionDeclarations<'s, 'a> { TryIntoKeyExpr: TryInto>, >>::Error: Into; - /// Create a [`Publisher`](crate::publisher::Publisher) for the given key expression. + /// Create a [`Publisher`](crate::pubsub::Publisher) for the given key expression. /// /// # Arguments /// @@ -2826,7 +2826,7 @@ impl crate::net::primitives::EPrimitives for Session { /// # #[tokio::main] /// # async fn main() { /// use std::str::FromStr; -/// use zenoh::{info::ZenohId, prelude::*}; +/// use zenoh::{session::ZenohId, prelude::*}; /// /// let mut config = zenoh::config::peer(); /// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 1a01ff922d..77db49f525 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -111,34 +111,25 @@ pub const FEATURES: &str = zenoh_util::concat_enabled_features!( ] ); +#[allow(deprecated)] +pub use zenoh_core::{AsyncResolve, SyncResolve}; +pub use zenoh_core::{Resolvable, Resolve, Wait}; +/// A zenoh error. +pub use zenoh_result::Error; +/// A zenoh result. +pub use zenoh_result::ZResult as Result; #[doc(inline)] -pub use { - crate::{ - config::Config, - core::{Error, Result}, - scouting::scout, - session::{open, Session}, - }, - zenoh_util::{init_log_from_env_or, try_init_log_from_env}, +pub use zenoh_util::{init_log_from_env_or, try_init_log_from_env}; + +#[doc(inline)] +pub use crate::{ + config::Config, + scouting::scout, + session::{open, Session}, }; pub mod prelude; -/// Zenoh core types -pub mod core { - #[allow(deprecated)] - pub use zenoh_core::{AsyncResolve, SyncResolve}; - pub use zenoh_core::{Resolvable, Resolve, Wait}; - pub use zenoh_result::ErrNo; - /// A zenoh error. - pub use zenoh_result::Error; - /// A zenoh result. - pub use zenoh_result::ZResult as Result; - - /// Zenoh message priority - pub use crate::api::publisher::Priority; -} - /// [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. /// /// In Zenoh, operations are performed on keys. To allow addressing multiple keys with a single operation, we use Key Expressions (KE). @@ -180,7 +171,7 @@ pub mod key_expr { } #[zenoh_macros::unstable] pub use zenoh_keyexpr::SetIntersectionLevel; - pub use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; + pub use zenoh_keyexpr::{canon::Canonize, keyexpr, OwnedKeyExpr}; pub use crate::api::key_expr::{KeyExpr, KeyExprUndeclaration}; // keyexpr format macro support @@ -196,25 +187,20 @@ pub mod key_expr { /// Zenoh [`Session`] and associated types pub mod session { + #[zenoh_macros::unstable] + pub use zenoh_config::wrappers::{EntityGlobalId, ZenohId}; + pub use zenoh_protocol::core::EntityId; + #[zenoh_macros::internal] pub use crate::api::session::{init, InitBuilder}; pub use crate::api::{ builders::publisher::{SessionDeleteBuilder, SessionPutBuilder}, + info::{PeersZenohIdBuilder, RoutersZenohIdBuilder, SessionInfo, ZenohIdBuilder}, query::SessionGetBuilder, session::{open, OpenBuilder, Session, SessionDeclarations, SessionRef, Undeclarable}, }; } -/// Tools to access information about the current zenoh [`Session`]. -pub mod info { - pub use zenoh_config::wrappers::{EntityGlobalId, ZenohId}; - pub use zenoh_protocol::core::EntityId; - - pub use crate::api::info::{ - PeersZenohIdBuilder, RoutersZenohIdBuilder, SessionInfo, ZenohIdBuilder, - }; -} - /// Sample primitives pub mod sample { #[zenoh_macros::unstable] @@ -223,93 +209,64 @@ pub mod sample { pub use crate::api::sample::SourceInfo; pub use crate::api::{ builders::sample::{ - EncodingBuilderTrait, QoSBuilderTrait, SampleBuilder, SampleBuilderAny, - SampleBuilderDelete, SampleBuilderPut, SampleBuilderTrait, TimestampBuilderTrait, + SampleBuilder, SampleBuilderAny, SampleBuilderDelete, SampleBuilderPut, + SampleBuilderTrait, TimestampBuilderTrait, }, sample::{Sample, SampleFields, SampleKind, SourceSn}, }; } -/// Encoding support -pub mod encoding { - pub use crate::api::encoding::Encoding; -} - /// Payload primitives pub mod bytes { - pub use crate::api::bytes::{ - Deserialize, OptionZBytes, Serialize, ZBytes, ZBytesIterator, ZBytesReader, ZBytesWriter, - ZDeserializeError, ZSerde, + pub use crate::api::{ + builders::sample::EncodingBuilderTrait, + bytes::{ + Deserialize, OptionZBytes, Serialize, ZBytes, ZBytesIterator, ZBytesReader, + ZBytesWriter, ZDeserializeError, ZSerde, + }, + encoding::Encoding, }; } -/// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries -pub mod selector { - pub use zenoh_protocol::core::Parameters; - #[zenoh_macros::unstable] - pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; - - pub use crate::api::selector::Selector; - #[zenoh_macros::unstable] - pub use crate::api::selector::ZenohParameters; -} - -/// Subscribing primitives -pub mod subscriber { - /// The kind of reliability. +/// Pub/sub primitives +pub mod pubsub { pub use zenoh_protocol::core::Reliability; - pub use crate::api::subscriber::{FlumeSubscriber, Subscriber, SubscriberBuilder}; -} - -/// Publishing primitives -pub mod publisher { - pub use zenoh_protocol::core::CongestionControl; - - #[zenoh_macros::unstable] - pub use crate::api::publisher::MatchingListener; - #[zenoh_macros::unstable] - pub use crate::api::publisher::MatchingListenerBuilder; #[zenoh_macros::unstable] - pub use crate::api::publisher::MatchingListenerUndeclaration; - #[zenoh_macros::unstable] - pub use crate::api::publisher::MatchingStatus; - #[zenoh_macros::unstable] - pub use crate::api::publisher::PublisherDeclarations; - #[zenoh_macros::unstable] - pub use crate::api::publisher::PublisherRef; + pub use crate::api::publisher::{ + MatchingListener, MatchingListenerBuilder, MatchingListenerUndeclaration, MatchingStatus, + PublisherDeclarations, PublisherRef, + }; pub use crate::api::{ builders::publisher::{ PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, PublisherDeleteBuilder, PublisherPutBuilder, }, publisher::{Publisher, PublisherUndeclaration}, + subscriber::{FlumeSubscriber, Subscriber, SubscriberBuilder}, }; } -/// Get operation primitives -pub mod querier { - // Later the `Querier` with `get`` operation will be added here, in addition to `Session::get`, - // similarly to the `Publisher` with `put` operation and `Session::put` -} - -/// Query and Reply primitives +/// Query/reply primitives pub mod query { + pub use zenoh_protocol::core::Parameters; #[zenoh_macros::unstable] - pub use crate::api::query::ReplyKeyExpr; + pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; + #[zenoh_macros::internal] pub use crate::api::queryable::ReplySample; + #[zenoh_macros::unstable] + pub use crate::api::{query::ReplyKeyExpr, selector::ZenohParameters}; pub use crate::api::{ query::{ConsolidationMode, QueryConsolidation, QueryTarget, Reply, ReplyError}, - queryable::{Query, ReplyBuilder, ReplyBuilderDelete, ReplyBuilderPut, ReplyErrBuilder}, + queryable::{ + Query, Queryable, QueryableBuilder, QueryableUndeclaration, ReplyBuilder, + ReplyBuilderDelete, ReplyBuilderPut, ReplyErrBuilder, + }, + selector::Selector, }; } -/// Queryable primitives -pub mod queryable { - pub use crate::api::queryable::{Queryable, QueryableBuilder, QueryableUndeclaration}; -} - /// Callback handler trait pub mod handlers { pub use crate::api::handlers::{ @@ -318,6 +275,13 @@ pub mod handlers { }; } +/// Quality of service primitives +pub mod qos { + pub use zenoh_protocol::core::CongestionControl; + + pub use crate::api::{builders::sample::QoSBuilderTrait, publisher::Priority}; +} + /// Scouting primitives pub mod scouting { pub use zenoh_config::wrappers::Hello; @@ -455,6 +419,8 @@ pub mod internal { }; } + pub use zenoh_result::ErrNo; + pub use crate::api::value::Value; } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index eb010f9037..26807e8907 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -50,7 +50,7 @@ use crate::{ queryable::{Query, QueryInner}, value::Value, }, - encoding::Encoding, + bytes::Encoding, net::primitives::Primitives, }; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 63cb397e38..373d56c65a 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -37,29 +37,29 @@ mod _prelude { session::{SessionDeclarations, Undeclarable}, }, config::ValidatedMap, - core::{Error as ZError, Resolvable, Resolve, Result as ZResult}, + Error as ZError, Resolvable, Resolve, Result as ZResult, }; } pub use _prelude::*; #[allow(deprecated)] -pub use crate::core::AsyncResolve; +pub use crate::AsyncResolve; #[allow(deprecated)] -pub use crate::core::SyncResolve; -pub use crate::core::Wait; +pub use crate::SyncResolve; +pub use crate::Wait; /// Prelude to import when using Zenoh's sync API. #[deprecated(since = "1.0.0", note = "use `zenoh::prelude` instead")] pub mod sync { pub use super::_prelude::*; #[allow(deprecated)] - pub use crate::core::SyncResolve; + pub use crate::SyncResolve; } /// Prelude to import when using Zenoh's async API. #[deprecated(since = "1.0.0", note = "use `zenoh::prelude` instead")] pub mod r#async { pub use super::_prelude::*; #[allow(deprecated)] - pub use crate::core::AsyncResolve; + pub use crate::AsyncResolve; } diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 77e7e43a10..8d7d7e7322 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -13,7 +13,11 @@ // use std::time::Duration; -use zenoh::{core::Priority, encoding::Encoding, prelude::*, publisher::CongestionControl}; +use zenoh::{ + bytes::Encoding, + prelude::*, + qos::{CongestionControl, Priority}, +}; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 7f61f459d6..2256455be5 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -24,7 +24,7 @@ use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::{ config::{ModeDependentValue, WhatAmI, WhatAmIMatcher}, prelude::*, - publisher::CongestionControl, + qos::CongestionControl, Config, Result, Session, }; use zenoh_core::ztimeout; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 25adaf42e0..859ff43f7d 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -22,8 +22,8 @@ use std::{ #[cfg(feature = "internal")] use zenoh::internal::runtime::{Runtime, RuntimeBuilder}; use zenoh::{ - config, key_expr::KeyExpr, prelude::*, publisher::CongestionControl, sample::SampleKind, - subscriber::Reliability, Session, + config, key_expr::KeyExpr, prelude::*, pubsub::Reliability, qos::CongestionControl, + sample::SampleKind, Session, }; use zenoh_core::ztimeout; diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 43205e8e47..33665913ed 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -23,11 +23,11 @@ use std::{ use zenoh::{ config, prelude::*, - publisher::CongestionControl, + pubsub::Reliability, + qos::CongestionControl, shm::{ BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID, }, - subscriber::Reliability, Session, }; use zenoh_core::ztimeout; diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 6ce01ff2bf..a89ddb4b04 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -25,7 +25,7 @@ use zenoh::{ config::{EndPoint, WhatAmI}, key_expr::KeyExpr, prelude::*, - publisher::CongestionControl, + qos::CongestionControl, Session, }; use zenoh_core::ztimeout; diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 81ca715f44..e69dd3d263 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -19,7 +19,7 @@ use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilte use url::Url; use zenoh::{ config::{Config, EndPoint, ModeDependentValue, PermissionsConf, ValidatedMap, WhatAmI}, - core::Result, + Result, }; #[cfg(feature = "loki")] From e021d0de69e589e06c4ec8f328097610ce52a44d Mon Sep 17 00:00:00 2001 From: "ChenYing Kuo (CY)" Date: Wed, 3 Jul 2024 16:15:06 +0800 Subject: [PATCH 507/598] Add z_bytes in the examples. (#1180) * Add z_bytes in the examples. Signed-off-by: ChenYing Kuo * Add encoding information in examples. Signed-off-by: ChenYing Kuo * Update the format in examples README. Signed-off-by: ChenYing Kuo * Add z_bytes description in README. Signed-off-by: ChenYing Kuo * Update comments in other examples to point to z_bytes.rs. Signed-off-by: ChenYing Kuo * Support JSON, YAML, Protobuf in z_bytes.rs. Signed-off-by: ChenYing Kuo * Fix lint issues. Signed-off-by: ChenYing Kuo * Use Cow instead of Vec. Signed-off-by: ChenYing Kuo --------- Signed-off-by: ChenYing Kuo --- Cargo.lock | 44 +++++++++- examples/Cargo.toml | 4 + examples/README.md | 49 ++++++++++- examples/examples/z_bytes.rs | 143 +++++++++++++++++++++++++++++++ examples/examples/z_get.rs | 2 + examples/examples/z_pub.rs | 1 + examples/examples/z_put.rs | 1 + examples/examples/z_queryable.rs | 2 + examples/examples/z_sub.rs | 1 + 9 files changed, 240 insertions(+), 7 deletions(-) create mode 100644 examples/examples/z_bytes.rs diff --git a/Cargo.lock b/Cargo.lock index b8a03280c2..5344e0135c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2200,8 +2200,8 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f56d36f573486ba7f462b62cbae597fef7d5d93665e7047956b457531b8a1ced" dependencies = [ - "prost", - "prost-types", + "prost 0.11.9", + "prost-types 0.11.9", ] [[package]] @@ -2972,7 +2972,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.11.9", +] + +[[package]] +name = "prost" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +dependencies = [ + "bytes", + "prost-derive 0.12.6", ] [[package]] @@ -2988,13 +2998,35 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "prost-derive" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn 2.0.52", +] + [[package]] name = "prost-types" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ - "prost", + "prost 0.11.9", +] + +[[package]] +name = "prost-types" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" +dependencies = [ + "prost 0.12.6", ] [[package]] @@ -5497,8 +5529,12 @@ dependencies = [ "futures", "git-version", "json5", + "prost 0.12.6", + "prost-types 0.12.6", "rand 0.8.5", "rustc_version 0.4.0", + "serde_json", + "serde_yaml", "tokio", "tracing", "zenoh", diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 90281ae558..e8cda2ae27 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -43,6 +43,10 @@ zenoh-collections = { workspace = true } tracing = { workspace = true } zenoh = { workspace = true, default-features = true } zenoh-ext = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +prost = "0.12.6" +prost-types = "0.12.6" [dev-dependencies] rand = { workspace = true, features = ["default"] } diff --git a/examples/README.md b/examples/README.md index 7776561ef8..d187244c51 100644 --- a/examples/README.md +++ b/examples/README.md @@ -3,6 +3,7 @@ ## Start instructions When Zenoh is built in release mode: + ```bash ./target/release/example/ ``` @@ -20,6 +21,7 @@ Scouts for Zenoh peers and routers available on the network. Typical usage: + ```bash z_scout ``` @@ -29,11 +31,11 @@ Gets information about the Zenoh session. Typical usage: + ```bash z_info ``` - ### z_put Puts a path/value into Zenoh. @@ -41,10 +43,13 @@ and [z_storage](#z_storage) examples. Typical usage: + ```bash z_put ``` + or + ```bash z_put -k demo/example/test -v 'Hello World' ``` @@ -55,10 +60,13 @@ The published value will be received by all matching subscribers, for instance the [z_sub](#z_sub) and [z_storage](#z_storage) examples. Typical usage: + ```bash z_pub ``` + or + ```bash z_pub -k demo/example/test -v 'Hello World' ``` @@ -69,10 +77,13 @@ The subscriber will be notified of each `put` or `delete` made on any key expression matching the subscriber key expression, and will print this notification. Typical usage: + ```bash z_sub ``` + or + ```bash z_sub -k 'demo/**' ``` @@ -82,14 +93,16 @@ Declares a key expression and a pull subscriber. On each pull, the pull subscriber will be notified of the last N `put` or `delete` made on each key expression matching the subscriber key expression, and will print this notification. - Typical usage: + ```bash z_pull ``` + or + ```bash - z_pull -k demo/** --size 3 + z_pull -k demo/** --size 3 ``` ### z_get @@ -99,10 +112,13 @@ will receive this query and reply with paths/values that will be received by the receiver stream. Typical usage: + ```bash z_get ``` + or + ```bash z_get -s 'demo/**' ``` @@ -114,10 +130,13 @@ with a selector that matches the path, and will return a value to the querier. Typical usage: + ```bash z_queryable ``` + or + ```bash z_queryable -k demo/example/queryable -v 'This is the result' ``` @@ -131,10 +150,13 @@ and that match the queried selector. Typical usage: + ```bash z_storage ``` + or + ```bash z_storage -k 'demo/**' ``` @@ -145,11 +167,13 @@ Note that on subscriber side, the same `z_sub` example than for non-shared-memory example is used. Typical Subscriber usage: + ```bash z_sub ``` Typical Publisher usage: + ```bash z_pub_shm ``` @@ -161,11 +185,13 @@ put operations and a subscriber receiving notifications of those puts. Typical Subscriber usage: + ```bash z_sub_thr ``` Typical Publisher usage: + ```bash z_pub_thr 1024 ``` @@ -182,11 +208,13 @@ :warning: z_pong needs to start first to avoid missing the kickoff from z_ping. Typical Pong usage: + ```bash z_pong ``` Typical Ping usage: + ```bash z_ping 1024 ``` @@ -200,11 +228,13 @@ Note that on subscriber side, the same `z_sub_thr` example than for non-shared-memory example is used. Typical Subscriber usage: + ```bash z_sub_thr ``` Typical Publisher usage: + ```bash z_pub_shm_thr ``` @@ -217,10 +247,13 @@ or killing the `z_liveliness` example. Typical usage: + ```bash z_liveliness ``` + or + ```bash z_liveliness -k 'group1/member1' ``` @@ -231,10 +264,13 @@ (`group1/**` by default). Those tokens could be declared by the `z_liveliness` example. Typical usage: + ```bash z_get_liveliness ``` + or + ```bash z_get_liveliness -k 'group1/**' ``` @@ -249,10 +285,17 @@ matching liveliness tokens that were alive before it's start. Typical usage: + ```bash z_sub_liveliness ``` + or + ```bash z_sub_liveliness -k 'group1/**' ``` + +### z_bytes + + Show how to serialize different message types into ZBytes, and then deserialize from ZBytes to the original message types. diff --git a/examples/examples/z_bytes.rs b/examples/examples/z_bytes.rs new file mode 100644 index 0000000000..ac4a2cc94a --- /dev/null +++ b/examples/examples/z_bytes.rs @@ -0,0 +1,143 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{borrow::Cow, collections::HashMap, io::Cursor}; + +use zenoh::bytes::ZBytes; + +fn main() { + // Numeric: u8, u16, u32, u128, usize, i8, i16, i32, i128, isize, f32, f64 + let input = 1234_u32; + let payload = ZBytes::from(input); + let output: u32 = payload.deserialize().unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::ZENOH_UINT32; + + // String + let input = String::from("test"); + let payload = ZBytes::from(&input); + let output: String = payload.deserialize().unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::ZENOH_STRING; + + // Cow + let input = Cow::from("test"); + let payload = ZBytes::from(&input); + let output: Cow = payload.deserialize().unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::ZENOH_STRING; + + // Vec: The deserialization should be infallible + let input: Vec = vec![1, 2, 3, 4]; + let payload = ZBytes::from(&input); + let output: Vec = payload.into(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::ZENOH_BYTES; + + // Writer & Reader + // serialization + let mut bytes = ZBytes::empty(); + let mut writer = bytes.writer(); + let i1 = 1234_u32; + let i2 = String::from("test"); + let i3 = vec![1, 2, 3, 4]; + writer.serialize(i1); + writer.serialize(&i2); + writer.serialize(&i3); + // deserialization + let mut reader = bytes.reader(); + let o1: u32 = reader.deserialize().unwrap(); + let o2: String = reader.deserialize().unwrap(); + let o3: Vec = reader.deserialize().unwrap(); + assert_eq!(i1, o1); + assert_eq!(i2, o2); + assert_eq!(i3, o3); + + // Tuple + let input = (1234_u32, String::from("test")); + let payload = ZBytes::serialize(input.clone()); + let output: (u32, String) = payload.deserialize().unwrap(); + assert_eq!(input, output); + + // Iterator + let input: [i32; 4] = [1, 2, 3, 4]; + let payload = ZBytes::from_iter(input.iter()); + for (idx, value) in payload.iter::().enumerate() { + assert_eq!(input[idx], value.unwrap()); + } + + // HashMap + let mut input: HashMap = HashMap::new(); + input.insert(0, String::from("abc")); + input.insert(1, String::from("def")); + let payload = ZBytes::from(input.clone()); + let output = payload.deserialize::>().unwrap(); + assert_eq!(input, output); + + // JSON + let data = r#" + { + "name": "John Doe", + "age": 43, + "phones": [ + "+44 1234567", + "+44 2345678" + ] + }"#; + let input: serde_json::Value = serde_json::from_str(data).unwrap(); + let payload = ZBytes::try_serialize(input.clone()).unwrap(); + let output: serde_json::Value = payload.deserialize().unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::APPLICATION_JSON; + + // YAML + let data = r#" + name: "John Doe" + age: 43 + phones: + - "+44 1234567" + - "+44 2345678" + "#; + let input: serde_yaml::Value = serde_yaml::from_str(data).unwrap(); + let payload = ZBytes::try_serialize(input.clone()).unwrap(); + let output: serde_yaml::Value = payload.deserialize().unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::APPLICATION_YAML; + + // Protobuf + use prost::Message; + #[derive(Message, Eq, PartialEq)] + struct EntityInfo { + #[prost(uint32)] + id: u32, + #[prost(string)] + name: String, + } + let input = EntityInfo { + id: 1234, + name: String::from("John Doe"), + }; + let payload = ZBytes::from(input.encode_to_vec()); + let output = + EntityInfo::decode(Cursor::new(payload.deserialize::>().unwrap())).unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::APPLICATION_PROTOBUF; +} diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index eebe582f98..a83eeb5034 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -37,6 +37,7 @@ async fn main() { // // Uncomment this line to use a ring channel instead. // // More information on the ring channel are available in the z_pull example. // .with(zenoh::handlers::RingChannel::default()) + // Refer to z_bytes.rs to see how to serialize different types of message .payload(payload.unwrap_or_default()) .target(target) .timeout(timeout) @@ -45,6 +46,7 @@ async fn main() { while let Ok(reply) = replies.recv_async().await { match reply.result() { Ok(sample) => { + // Refer to z_bytes.rs to see how to deserialize different types of message let payload = sample .payload() .deserialize::() diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 9f84ba118f..4ff177c32a 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -35,6 +35,7 @@ async fn main() { tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] {payload}"); println!("Putting Data ('{}': '{}')...", &key_expr, buf); + // Refer to z_bytes.rs to see how to serialize different types of message publisher .put(buf) .encoding(Encoding::TEXT_PLAIN) // Optionally set the encoding metadata diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index bc4dd88eed..0097f99139 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -26,6 +26,7 @@ async fn main() { let session = zenoh::open(config).await.unwrap(); println!("Putting Data ('{key_expr}': '{payload}')..."); + // Refer to z_bytes.rs to see how to serialize different types of message session.put(&key_expr, payload).await.unwrap(); } diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 7857c8caff..d6c5c7ea46 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -46,6 +46,7 @@ async fn main() { match query.payload() { None => println!(">> [Queryable ] Received Query '{}'", query.selector()), Some(query_payload) => { + // Refer to z_bytes.rs to see how to deserialize different types of message let deserialized_payload = query_payload .deserialize::() .unwrap_or_else(|e| format!("{}", e)); @@ -61,6 +62,7 @@ async fn main() { key_expr.as_str(), payload, ); + // Refer to z_bytes.rs to see how to serialize different types of message query .reply(key_expr.clone(), payload.clone()) .await diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index 8ecc4b9818..690a211119 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -35,6 +35,7 @@ async fn main() { println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { + // Refer to z_bytes.rs to see how to deserialize different types of message let payload = sample .payload() .deserialize::() From 76a1d18791606475acd617917becd8237a706477 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 3 Jul 2024 16:39:08 +0200 Subject: [PATCH 508/598] Use TCP MSS as TCP link MTU --- Cargo.lock | 2 + io/zenoh-links/zenoh-link-tcp/Cargo.toml | 1 + io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 20 ++++++++- io/zenoh-links/zenoh-link-tls/Cargo.toml | 1 + io/zenoh-links/zenoh-link-tls/src/unicast.rs | 41 ++++++++++++++----- io/zenoh-transport/src/common/batch.rs | 10 +---- io/zenoh-transport/src/multicast/link.rs | 6 +-- .../src/unicast/establishment/accept.rs | 2 +- io/zenoh-transport/src/unicast/link.rs | 4 +- .../src/unicast/lowlatency/link.rs | 6 +-- .../src/unicast/universal/link.rs | 2 +- .../tests/unicast_compression.rs | 2 +- io/zenoh-transport/tests/unicast_transport.rs | 2 +- 13 files changed, 62 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5344e0135c..1f8b8bac9e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5695,6 +5695,7 @@ name = "zenoh-link-tcp" version = "0.11.0-dev" dependencies = [ "async-trait", + "socket2 0.5.6", "tokio", "tokio-util", "tracing", @@ -5719,6 +5720,7 @@ dependencies = [ "rustls-pki-types", "rustls-webpki", "secrecy", + "socket2 0.5.6", "tokio", "tokio-rustls", "tokio-util", diff --git a/io/zenoh-links/zenoh-link-tcp/Cargo.toml b/io/zenoh-links/zenoh-link-tcp/Cargo.toml index ca94412382..4a501f61ed 100644 --- a/io/zenoh-links/zenoh-link-tcp/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tcp/Cargo.toml @@ -26,6 +26,7 @@ description = "Internal crate for zenoh." [dependencies] async-trait = { workspace = true } +socket2 = { workspace = true } tokio = { workspace = true, features = ["net", "io-util", "rt", "time"] } tokio-util = { workspace = true, features = ["rt"] } tracing = {workspace = true} diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index 5c4d086c5b..df5a3bdae4 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -80,6 +80,12 @@ impl LinkUnicastTcp { dst_locator: Locator::new(TCP_LOCATOR_PREFIX, dst_addr.to_string(), "").unwrap(), } } + + #[allow(clippy::mut_from_ref)] + fn get_socket(&self) -> &TcpStream { + unsafe { &*self.socket.get() } + } + #[allow(clippy::mut_from_ref)] fn get_mut_socket(&self) -> &mut TcpStream { unsafe { &mut *self.socket.get() } @@ -147,7 +153,18 @@ impl LinkUnicastTrait for LinkUnicastTcp { #[inline(always)] fn get_mtu(&self) -> BatchSize { - *TCP_DEFAULT_MTU + // target_os limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 + #[cfg(not(target_os = "redox"))] + { + let socket = socket2::SockRef::from(self.get_socket()); + let mss = socket.mss().unwrap_or(*TCP_DEFAULT_MTU as u32); + mss.min(*TCP_DEFAULT_MTU as u32) as BatchSize + } + + #[cfg(target_os = "redox")] + { + *TCP_DEFAULT_MTU + } } #[inline(always)] @@ -195,6 +212,7 @@ impl fmt::Debug for LinkUnicastTcp { f.debug_struct("Tcp") .field("src", &self.src_addr) .field("dst", &self.dst_addr) + .field("mtu", &self.get_mtu()) .finish() } } diff --git a/io/zenoh-links/zenoh-link-tls/Cargo.toml b/io/zenoh-links/zenoh-link-tls/Cargo.toml index e0f1c6b03d..a716c72c99 100644 --- a/io/zenoh-links/zenoh-link-tls/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tls/Cargo.toml @@ -33,6 +33,7 @@ rustls-pemfile = { workspace = true } rustls-pki-types = { workspace = true } rustls-webpki = { workspace = true } secrecy = { workspace = true } +socket2 = { workspace = true } tokio = { workspace = true, features = ["fs", "io-util", "net", "sync"] } tokio-rustls = { workspace = true } tokio-util = { workspace = true, features = ["rt"] } diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 41847a1577..2f2d2431a1 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -109,11 +109,15 @@ impl LinkUnicastTls { } } + fn get_socket(&self) -> &TlsStream { + unsafe { &*self.inner.get() } + } + // NOTE: It is safe to suppress Clippy warning since no concurrent reads // or concurrent writes will ever happen. The read_mtx and write_mtx // are respectively acquired in any read and write operation. #[allow(clippy::mut_from_ref)] - fn get_sock_mut(&self) -> &mut TlsStream { + fn get_mut_socket(&self) -> &mut TlsStream { unsafe { &mut *self.inner.get() } } } @@ -124,7 +128,7 @@ impl LinkUnicastTrait for LinkUnicastTls { tracing::trace!("Closing TLS link: {}", self); // Flush the TLS stream let _guard = zasynclock!(self.write_mtx); - let tls_stream = self.get_sock_mut(); + let tls_stream = self.get_mut_socket(); let res = tls_stream.flush().await; tracing::trace!("TLS link flush {}: {:?}", self, res); // Close the underlying TCP stream @@ -136,7 +140,7 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn write(&self, buffer: &[u8]) -> ZResult { let _guard = zasynclock!(self.write_mtx); - self.get_sock_mut().write(buffer).await.map_err(|e| { + self.get_mut_socket().write(buffer).await.map_err(|e| { tracing::trace!("Write error on TLS link {}: {}", self, e); zerror!(e).into() }) @@ -144,7 +148,7 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn write_all(&self, buffer: &[u8]) -> ZResult<()> { let _guard = zasynclock!(self.write_mtx); - self.get_sock_mut().write_all(buffer).await.map_err(|e| { + self.get_mut_socket().write_all(buffer).await.map_err(|e| { tracing::trace!("Write error on TLS link {}: {}", self, e); zerror!(e).into() }) @@ -152,7 +156,7 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn read(&self, buffer: &mut [u8]) -> ZResult { let _guard = zasynclock!(self.read_mtx); - self.get_sock_mut().read(buffer).await.map_err(|e| { + self.get_mut_socket().read(buffer).await.map_err(|e| { tracing::trace!("Read error on TLS link {}: {}", self, e); zerror!(e).into() }) @@ -160,10 +164,14 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn read_exact(&self, buffer: &mut [u8]) -> ZResult<()> { let _guard = zasynclock!(self.read_mtx); - let _ = self.get_sock_mut().read_exact(buffer).await.map_err(|e| { - tracing::trace!("Read error on TLS link {}: {}", self, e); - zerror!(e) - })?; + let _ = self + .get_mut_socket() + .read_exact(buffer) + .await + .map_err(|e| { + tracing::trace!("Read error on TLS link {}: {}", self, e); + zerror!(e) + })?; Ok(()) } @@ -179,7 +187,18 @@ impl LinkUnicastTrait for LinkUnicastTls { #[inline(always)] fn get_mtu(&self) -> BatchSize { - *TLS_DEFAULT_MTU + // target_os limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 + #[cfg(not(target_os = "redox"))] + { + let socket = socket2::SockRef::from(self.get_socket().get_ref().0); + let mss = socket.mss().unwrap_or(*TLS_DEFAULT_MTU as u32); + mss.min(*TLS_DEFAULT_MTU as u32) as BatchSize + } + + #[cfg(target_os = "redox")] + { + *TLS_DEFAULT_MTU + } } #[inline(always)] @@ -206,7 +225,7 @@ impl LinkUnicastTrait for LinkUnicastTls { impl Drop for LinkUnicastTls { fn drop(&mut self) { // Close the underlying TCP stream - let (tcp_stream, _) = self.get_sock_mut().get_mut(); + let (tcp_stream, _) = self.get_mut_socket().get_mut(); let _ = zenoh_runtime::ZRuntime::Acceptor .block_in_place(async move { tcp_stream.shutdown().await }); } diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index c36993ddf7..94b03b0514 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -120,14 +120,6 @@ impl BatchConfig { .then_some(BatchHeader::new(BatchHeader::COMPRESSION)) } } - - pub fn max_buffer_size(&self) -> usize { - let mut len = self.mtu as usize; - if self.is_streamed { - len += BatchSize::BITS as usize / 8; - } - len - } } // Batch header @@ -214,7 +206,7 @@ pub struct WBatch { impl WBatch { pub fn new(config: BatchConfig) -> Self { let mut batch = Self { - buffer: BBuf::with_capacity(config.max_buffer_size()), + buffer: BBuf::with_capacity(config.mtu as usize), codec: Zenoh080Batch::new(), config, #[cfg(feature = "stats")] diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 9c2bdbe1f1..90999d32ce 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -73,9 +73,7 @@ impl TransportLinkMulticast { .batch .is_compression .then_some(BBuf::with_capacity( - lz4_flex::block::get_maximum_output_size( - self.config.batch.max_buffer_size() - ), + lz4_flex::block::get_maximum_output_size(self.config.batch.mtu as usize), )), None ), @@ -551,7 +549,7 @@ async fn rx_task( } // The pool of buffers - let mtu = link.inner.config.batch.max_buffer_size(); + let mtu = link.inner.config.batch.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index f3a053aa63..3f71d7b6da 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -769,7 +769,7 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - .await?; tracing::debug!( - "New transport link accepted from {} to {}: {}.", + "New transport link accepted from {} to {}: {}", osyn_out.other_zid, manager.config.zid, s_link, diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs index e43f4d3813..736360db63 100644 --- a/io/zenoh-transport/src/unicast/link.rs +++ b/io/zenoh-transport/src/unicast/link.rs @@ -67,9 +67,7 @@ impl TransportLinkUnicast { .batch .is_compression .then_some(BBuf::with_capacity( - lz4_flex::block::get_maximum_output_size( - self.config.batch.max_buffer_size() - ), + lz4_flex::block::get_maximum_output_size(self.config.batch.mtu as usize), )), None ), diff --git a/io/zenoh-transport/src/unicast/lowlatency/link.rs b/io/zenoh-transport/src/unicast/lowlatency/link.rs index 250850726f..3ba1cd724f 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/link.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/link.rs @@ -152,11 +152,7 @@ impl TransportUnicastLowlatency { // The pool of buffers let pool = { - let mtu = if is_streamed { - link_rx.batch.mtu as usize - } else { - link_rx.batch.max_buffer_size() - }; + let mtu = link_rx.batch.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index 9655d0964d..cc3afc06e5 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -248,7 +248,7 @@ async fn rx_task( } // The pool of buffers - let mtu = link.batch.max_buffer_size(); + let mtu = link.batch.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index 7c2443c5d9..e5015c3d25 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -51,8 +51,8 @@ mod tests { const MSG_COUNT: usize = 1_000; const MSG_SIZE_ALL: [usize; 2] = [1_024, 131_072]; - const MSG_SIZE_LOWLATENCY: [usize; 2] = [1_024, 65000]; const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; + const MSG_SIZE_LOWLATENCY: [usize; 1] = MSG_SIZE_NOFRAG; // Transport Handler for the router struct SHRouter { diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index a0fabe1ffd..1c5d749b59 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -232,13 +232,13 @@ const SLEEP_COUNT: Duration = Duration::from_millis(10); const MSG_COUNT: usize = 1_000; const MSG_SIZE_ALL: [usize; 2] = [1_024, 131_072]; -const MSG_SIZE_LOWLATENCY: [usize; 2] = [1_024, 65000]; #[cfg(any( feature = "transport_tcp", feature = "transport_udp", feature = "transport_unixsock-stream", ))] const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; +const MSG_SIZE_LOWLATENCY: [usize; 1] = MSG_SIZE_NOFRAG; // Transport Handler for the router struct SHRouter { From feb25ec0f5de0a596b3502008b692071c6eb01e0 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 3 Jul 2024 16:55:58 +0200 Subject: [PATCH 509/598] Compute default TCP MSS if not supported --- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 13 +++++++++---- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 13 +++++++++---- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index df5a3bdae4..e0690dff16 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -153,17 +153,22 @@ impl LinkUnicastTrait for LinkUnicastTcp { #[inline(always)] fn get_mtu(&self) -> BatchSize { - // target_os limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 - #[cfg(not(target_os = "redox"))] + // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 + #[cfg(target_family = "unix")] { let socket = socket2::SockRef::from(self.get_socket()); let mss = socket.mss().unwrap_or(*TCP_DEFAULT_MTU as u32); mss.min(*TCP_DEFAULT_MTU as u32) as BatchSize } - #[cfg(target_os = "redox")] + #[cfg(not(target_family = "unix"))] { - *TCP_DEFAULT_MTU + // See IETF RFC6691 https://datatracker.ietf.org/doc/rfc6691/ + let header = match self.src_addr.ip() { + std::net::IpAddr::V4(_) => 40, + std::net::IpAddr::V6(_) => 60, + }; + *TCP_DEFAULT_MTU - header } } diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 2f2d2431a1..933461b47e 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -187,17 +187,22 @@ impl LinkUnicastTrait for LinkUnicastTls { #[inline(always)] fn get_mtu(&self) -> BatchSize { - // target_os limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 - #[cfg(not(target_os = "redox"))] + // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 + #[cfg(target_family = "unix")] { let socket = socket2::SockRef::from(self.get_socket().get_ref().0); let mss = socket.mss().unwrap_or(*TLS_DEFAULT_MTU as u32); mss.min(*TLS_DEFAULT_MTU as u32) as BatchSize } - #[cfg(target_os = "redox")] + #[cfg(not(target_family = "unix"))] { - *TLS_DEFAULT_MTU + // See IETF RFC6691 https://datatracker.ietf.org/doc/rfc6691/ + let header = match self.src_addr.ip() { + std::net::IpAddr::V4(_) => 40, + std::net::IpAddr::V6(_) => 60, + }; + *TLS_DEFAULT_MTU - header } } From 770b707925ed70e381ba9d8e40858e0bb3c8cd6c Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 3 Jul 2024 17:04:00 +0200 Subject: [PATCH 510/598] Fix clippy --- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 2 +- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index e0690dff16..e1682604df 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -81,7 +81,7 @@ impl LinkUnicastTcp { } } - #[allow(clippy::mut_from_ref)] + #[cfg(target_family = "unix")] fn get_socket(&self) -> &TcpStream { unsafe { &*self.socket.get() } } diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 933461b47e..bc90ba0983 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -109,6 +109,7 @@ impl LinkUnicastTls { } } + #[cfg(target_family = "unix")] fn get_socket(&self) -> &TlsStream { unsafe { &*self.inner.get() } } From 78ffa1085c7e08395f462d2f9e59214de154883b Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 4 Jul 2024 14:23:56 +0200 Subject: [PATCH 511/598] Compute larget tcp mss lesser than max batch size --- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 27 ++++++++++++-------- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 27 ++++++++++++-------- 2 files changed, 32 insertions(+), 22 deletions(-) diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index e1682604df..6f5bf96ee9 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -153,23 +153,28 @@ impl LinkUnicastTrait for LinkUnicastTcp { #[inline(always)] fn get_mtu(&self) -> BatchSize { + // See IETF RFC6691: https://datatracker.ietf.org/doc/rfc6691/ + let header = match self.src_addr.ip() { + std::net::IpAddr::V4(_) => 40, + std::net::IpAddr::V6(_) => 60, + }; + #[allow(unused_mut)] // mut is not needed when target_family != unix + let mut mtu = *TCP_DEFAULT_MTU - header; + // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 #[cfg(target_family = "unix")] { let socket = socket2::SockRef::from(self.get_socket()); - let mss = socket.mss().unwrap_or(*TCP_DEFAULT_MTU as u32); - mss.min(*TCP_DEFAULT_MTU as u32) as BatchSize + let mss = socket.mss().unwrap_or(mtu as u32); + // Compute largest multiple of TCP MSS that is smaller of default MTU + let mut tgt = mss; + while (tgt + mss) < mtu as u32 { + tgt += mss; + } + mtu = (mtu as u32).min(tgt) as BatchSize; } - #[cfg(not(target_family = "unix"))] - { - // See IETF RFC6691 https://datatracker.ietf.org/doc/rfc6691/ - let header = match self.src_addr.ip() { - std::net::IpAddr::V4(_) => 40, - std::net::IpAddr::V6(_) => 60, - }; - *TCP_DEFAULT_MTU - header - } + mtu } #[inline(always)] diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index bc90ba0983..62bf49e611 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -188,23 +188,28 @@ impl LinkUnicastTrait for LinkUnicastTls { #[inline(always)] fn get_mtu(&self) -> BatchSize { + // See IETF RFC6691: https://datatracker.ietf.org/doc/rfc6691/ + let header = match self.src_addr.ip() { + std::net::IpAddr::V4(_) => 40, + std::net::IpAddr::V6(_) => 60, + }; + #[allow(unused_mut, assign)] // mut is not needed when target_family != unix + let mut mtu = *TLS_DEFAULT_MTU - header; + // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 #[cfg(target_family = "unix")] { let socket = socket2::SockRef::from(self.get_socket().get_ref().0); - let mss = socket.mss().unwrap_or(*TLS_DEFAULT_MTU as u32); - mss.min(*TLS_DEFAULT_MTU as u32) as BatchSize + let mss = socket.mss().unwrap_or(mtu as u32); + // Compute largest multiple of TCP MSS that is smaller of default MTU + let mut tgt = mss; + while (tgt + mss) < mtu as u32 { + tgt += mss; + } + mtu = (mtu as u32).min(tgt) as BatchSize; } - #[cfg(not(target_family = "unix"))] - { - // See IETF RFC6691 https://datatracker.ietf.org/doc/rfc6691/ - let header = match self.src_addr.ip() { - std::net::IpAddr::V4(_) => 40, - std::net::IpAddr::V6(_) => 60, - }; - *TLS_DEFAULT_MTU - header - } + mtu } #[inline(always)] From a5195af7edc12bed5e876bbac3e2f55503dee1db Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 4 Jul 2024 14:26:36 +0200 Subject: [PATCH 512/598] Fix clippy --- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 62bf49e611..74e3ae8341 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -193,7 +193,7 @@ impl LinkUnicastTrait for LinkUnicastTls { std::net::IpAddr::V4(_) => 40, std::net::IpAddr::V6(_) => 60, }; - #[allow(unused_mut, assign)] // mut is not needed when target_family != unix + #[allow(unused_mut)] // mut is not needed when target_family != unix let mut mtu = *TLS_DEFAULT_MTU - header; // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 From 5850c946fbf351928f76b24a25566724c8ff70ad Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 4 Jul 2024 17:48:00 +0200 Subject: [PATCH 513/598] Consider MSS/2 in TCP MTU computation --- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 3 ++- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index 6f5bf96ee9..99d2d44c36 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -165,7 +165,8 @@ impl LinkUnicastTrait for LinkUnicastTcp { #[cfg(target_family = "unix")] { let socket = socket2::SockRef::from(self.get_socket()); - let mss = socket.mss().unwrap_or(mtu as u32); + // Get the MSS and divide it by 2 to ensure we can at least fill half the MSS + let mss = socket.mss().unwrap_or(mtu as u32) / 2; // Compute largest multiple of TCP MSS that is smaller of default MTU let mut tgt = mss; while (tgt + mss) < mtu as u32 { diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 74e3ae8341..4ab21d9993 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -200,7 +200,8 @@ impl LinkUnicastTrait for LinkUnicastTls { #[cfg(target_family = "unix")] { let socket = socket2::SockRef::from(self.get_socket().get_ref().0); - let mss = socket.mss().unwrap_or(mtu as u32); + // Get the MSS and divide it by 2 to ensure we can at least fill half the MSS + let mss = socket.mss().unwrap_or(mtu as u32) / 2; // Compute largest multiple of TCP MSS that is smaller of default MTU let mut tgt = mss; while (tgt + mss) < mtu as u32 { From bef94d19a901f0f85c42947058271a6a59858582 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 4 Jul 2024 17:52:50 +0200 Subject: [PATCH 514/598] Make listen and connect endpoints ModeDependentValues (#1216) * Make listen and connect endpoints ModeDependentValues * Improve DEFAULT_CONFIG doc * Fix doctests --- DEFAULT_CONFIG.json5 | 24 +++-- commons/zenoh-config/src/defaults.rs | 28 ++++++ commons/zenoh-config/src/lib.rs | 18 ++-- commons/zenoh-config/src/mode_dependent.rs | 90 +++++++++++++++---- commons/zenoh-protocol/src/core/whatami.rs | 3 +- examples/src/lib.rs | 12 ++- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 6 +- zenoh-ext/examples/src/lib.rs | 12 ++- zenoh-ext/tests/liveliness.rs | 36 +++++--- zenoh/src/api/session.rs | 3 +- zenoh/src/net/runtime/orchestrator.rs | 81 ++++++++--------- zenoh/tests/acl.rs | 6 +- zenoh/tests/authentication.rs | 18 +++- zenoh/tests/connection_retry.rs | 10 ++- zenoh/tests/events.rs | 28 ++++-- zenoh/tests/interceptors.rs | 12 ++- zenoh/tests/liveliness.rs | 30 ++++--- zenoh/tests/matching.rs | 3 +- zenoh/tests/open_time.rs | 6 +- zenoh/tests/routing.rs | 24 ++--- zenoh/tests/session.rs | 68 ++++++++++---- zenoh/tests/shm.rs | 40 ++++++--- zenoh/tests/unicity.rs | 36 ++++++-- zenohd/src/main.rs | 14 +-- 24 files changed, 427 insertions(+), 181 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 6906d15cf5..1e9921bbe3 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -23,9 +23,13 @@ /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 connect: { /// timeout waiting for all endpoints connected (0: no retry, -1: infinite timeout) - /// Accepts a single value or different values for router, peer and client. + /// Accepts a single value (e.g. timeout_ms: 0) + /// or different values for router, peer and client (e.g. timeout_ms: { router: -1, peer: -1, client: 0 }). timeout_ms: { router: -1, peer: -1, client: 0 }, + /// The list of endpoints to connect to. + /// Accepts a single list (e.g. endpoints: ["tcp/10.10.10.10:7447", "tcp/11.11.11.11:7447"]) + /// or different lists for router, peer and client (e.g. endpoints: { router: ["tcp/10.10.10.10:7447"], peer: ["tcp/11.11.11.11:7447"] }). endpoints: [ // "/

" ], @@ -49,19 +53,21 @@ }, }, - /// Which endpoints to listen on. E.g. tcp/localhost:7447. + /// Which endpoints to listen on. E.g. tcp/0.0.0.0:7447. /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, /// peers, or client can use to establish a zenoh session. /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: /// E.g. tcp/0.0.0.0:7447#iface=eth0, for listen connection only on eth0 listen: { /// timeout waiting for all listen endpoints (0: no retry, -1: infinite timeout) - /// Accepts a single value or different values for router, peer and client. + /// Accepts a single value (e.g. timeout_ms: 0) + /// or different values for router, peer and client (e.g. timeout_ms: { router: -1, peer: -1, client: 0 }). timeout_ms: 0, - endpoints: [ - // "/
" - ], + /// The list of endpoints to listen on. + /// Accepts a single list (e.g. endpoints: ["tcp/[::]:7447", "udp/[::]:7447"]) + /// or different lists for router, peer and client (e.g. endpoints: { router: ["tcp/[::]:7447"], peer: ["tcp/[::]:0"] }). + endpoints: { router: ["tcp/[::]:7447"], peer: ["tcp/[::]:0"] }, /// Global listen configuration, /// Accepts a single value or different values for router, peer and client. @@ -98,7 +104,8 @@ /// The time-to-live on multicast scouting packets ttl: 1, /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. - /// Accepts a single value or different values for router, peer and client. + /// Accepts a single value (e.g. autoconnect: "router|peer") + /// or different values for router, peer and client (e.g. autoconnect: { router: "", peer: "router|peer" }). /// Each value is bit-or-like combinations of "peer", "router" and "client". autoconnect: { router: "", peer: "router|peer" }, /// Whether or not to listen for scout messages on UDP multicast and reply to them. @@ -115,7 +122,8 @@ /// direct connectivity with each other. multihop: false, /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. - /// Accepts a single value or different values for router, peer and client. + /// Accepts a single value (e.g. autoconnect: "router|peer") + /// or different values for router, peer and client (e.g. autoconnect: { router: "", peer: "router|peer" }). /// Each value is bit-or-like combinations of "peer", "router" and "client". autoconnect: { router: "", peer: "router|peer" }, }, diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index a6be460bcb..bbb03a7eff 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -100,6 +100,34 @@ pub mod routing { } } +impl Default for ListenConfig { + #[allow(clippy::unnecessary_cast)] + fn default() -> Self { + Self { + timeout_ms: None, + endpoints: ModeDependentValue::Dependent(ModeValues { + router: Some(vec!["tcp/[::]:7447".parse().unwrap()]), + peer: Some(vec!["tcp/[::]:0".parse().unwrap()]), + client: None, + }), + exit_on_failure: None, + retry: None, + } + } +} + +impl Default for ConnectConfig { + #[allow(clippy::unnecessary_cast)] + fn default() -> Self { + Self { + timeout_ms: None, + endpoints: ModeDependentValue::Unique(vec![]), + exit_on_failure: None, + retry: None, + } + } +} + impl Default for TransportUnicastConf { fn default() -> Self { Self { diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 51dce4ffb4..e239ac8b7a 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -185,10 +185,8 @@ pub fn peer() -> Config { pub fn client, T: Into>(peers: I) -> Config { let mut config = Config::default(); config.set_mode(Some(WhatAmI::Client)).unwrap(); - config - .connect - .endpoints - .extend(peers.into_iter().map(|t| t.into())); + config.connect.endpoints = + ModeDependentValue::Unique(peers.into_iter().map(|t| t.into()).collect()); config } @@ -227,21 +225,23 @@ validated_struct::validator! { /// The node's mode ("router" (default value in `zenohd`), "peer" or "client"). mode: Option, /// Which zenoh nodes to connect to. - pub connect: #[derive(Default)] + pub connect: ConnectConfig { /// global timeout for full connect cycle pub timeout_ms: Option>, - pub endpoints: Vec, + /// The list of endpoints to connect to + pub endpoints: ModeDependentValue>, /// if connection timeout exceed, exit from application pub exit_on_failure: Option>, pub retry: Option, }, - /// Which endpoints to listen on. `zenohd` will add `tcp/[::]:7447` to these locators if left empty. - pub listen: #[derive(Default)] + /// Which endpoints to listen on. + pub listen: ListenConfig { /// global timeout for full listen cycle pub timeout_ms: Option>, - pub endpoints: Vec, + /// The list of endpoints to listen on + pub endpoints: ModeDependentValue>, /// if connection timeout exceed, exit from application pub exit_on_failure: Option>, pub retry: Option, diff --git a/commons/zenoh-config/src/mode_dependent.rs b/commons/zenoh-config/src/mode_dependent.rs index 7c331c8318..6a06f967ba 100644 --- a/commons/zenoh-config/src/mode_dependent.rs +++ b/commons/zenoh-config/src/mode_dependent.rs @@ -18,7 +18,7 @@ use serde::{ de::{self, MapAccess, Visitor}, Deserialize, Serialize, }; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor}; +use zenoh_protocol::core::{EndPoint, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor}; pub trait ModeDependent { fn router(&self) -> Option<&T>; @@ -32,6 +32,7 @@ pub trait ModeDependent { WhatAmI::Client => self.client(), } } + fn get_mut(&mut self, whatami: WhatAmI) -> Option<&mut T>; } #[derive(Clone, Debug, Serialize, Deserialize)] @@ -59,6 +60,15 @@ impl ModeDependent for ModeValues { fn client(&self) -> Option<&T> { self.client.as_ref() } + + #[inline] + fn get_mut(&mut self, whatami: WhatAmI) -> Option<&mut T> { + match whatami { + WhatAmI::Router => self.router.as_mut(), + WhatAmI::Peer => self.peer.as_mut(), + WhatAmI::Client => self.client.as_mut(), + } + } } #[derive(Clone, Debug)] @@ -67,6 +77,15 @@ pub enum ModeDependentValue { Dependent(ModeValues), } +impl ModeDependentValue { + #[inline] + pub fn set(&mut self, value: T) -> Result, ModeDependentValue> { + let mut value = ModeDependentValue::Unique(value); + std::mem::swap(self, &mut value); + Ok(value) + } +} + impl ModeDependent for ModeDependentValue { #[inline] fn router(&self) -> Option<&T> { @@ -91,6 +110,14 @@ impl ModeDependent for ModeDependentValue { Self::Dependent(o) => o.client(), } } + + #[inline] + fn get_mut(&mut self, whatami: WhatAmI) -> Option<&mut T> { + match self { + Self::Unique(v) => Some(v), + Self::Dependent(o) => o.get_mut(whatami), + } + } } impl serde::Serialize for ModeDependentValue @@ -249,31 +276,62 @@ impl<'a> serde::Deserialize<'a> for ModeDependentValue { } } +impl<'a> serde::Deserialize<'a> for ModeDependentValue> { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'a>, + { + struct UniqueOrDependent(PhantomData U>); + + impl<'de> Visitor<'de> for UniqueOrDependent>> { + type Value = ModeDependentValue>; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("list of endpoints or mode dependent list of endpoints") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: de::SeqAccess<'de>, + { + let mut v = seq.size_hint().map_or_else(Vec::new, Vec::with_capacity); + + while let Some(s) = seq.next_element()? { + v.push(s); + } + Ok(ModeDependentValue::Unique(v)) + } + + fn visit_map(self, map: M) -> Result + where + M: MapAccess<'de>, + { + ModeValues::deserialize(de::value::MapAccessDeserializer::new(map)) + .map(ModeDependentValue::Dependent) + } + } + deserializer.deserialize_any(UniqueOrDependent(PhantomData)) + } +} + impl ModeDependent for Option> { #[inline] fn router(&self) -> Option<&T> { - match self { - Some(ModeDependentValue::Unique(v)) => Some(v), - Some(ModeDependentValue::Dependent(o)) => o.router(), - None => None, - } + self.as_ref().and_then(|m| m.router()) } #[inline] fn peer(&self) -> Option<&T> { - match self { - Some(ModeDependentValue::Unique(v)) => Some(v), - Some(ModeDependentValue::Dependent(o)) => o.peer(), - None => None, - } + self.as_ref().and_then(|m| m.peer()) } #[inline] fn client(&self) -> Option<&T> { - match self { - Some(ModeDependentValue::Unique(v)) => Some(v), - Some(ModeDependentValue::Dependent(o)) => o.client(), - None => None, - } + self.as_ref().and_then(|m| m.client()) + } + + #[inline] + fn get_mut(&mut self, whatami: WhatAmI) -> Option<&mut T> { + self.as_mut().and_then(|m| m.get_mut(whatami)) } } diff --git a/commons/zenoh-protocol/src/core/whatami.rs b/commons/zenoh-protocol/src/core/whatami.rs index 10c5b42c78..9eb9628e3f 100644 --- a/commons/zenoh-protocol/src/core/whatami.rs +++ b/commons/zenoh-protocol/src/core/whatami.rs @@ -18,9 +18,10 @@ use const_format::formatcp; use zenoh_result::{bail, ZError}; #[repr(u8)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] pub enum WhatAmI { Router = 0b001, + #[default] Peer = 0b010, Client = 0b100, } diff --git a/examples/src/lib.rs b/examples/src/lib.rs index 1ab27dfc8f..e863e1457c 100644 --- a/examples/src/lib.rs +++ b/examples/src/lib.rs @@ -56,10 +56,18 @@ impl From<&CommonArgs> for Config { } .unwrap(); if !value.connect.is_empty() { - config.connect.endpoints = value.connect.iter().map(|v| v.parse().unwrap()).collect(); + config + .connect + .endpoints + .set(value.connect.iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } if !value.listen.is_empty() { - config.listen.endpoints = value.listen.iter().map(|v| v.parse().unwrap()).collect(); + config + .listen + .endpoints + .set(value.listen.iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } if value.no_multicast_scouting { config.scouting.multicast.set_enabled(Some(false)).unwrap(); diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 5f7d466f13..e3fae4d285 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -110,13 +110,15 @@ fn parse_args() -> Config { config .connect .endpoints - .extend(values.into_iter().map(|v| v.parse().unwrap())) + .set(values.into_iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } if let Some(values) = args.get_many::<&String>("listen") { config .listen .endpoints - .extend(values.into_iter().map(|v| v.parse().unwrap())) + .set(values.into_iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } if args.get_flag("no-multicast-scouting") { config.scouting.multicast.set_enabled(Some(false)).unwrap(); diff --git a/zenoh-ext/examples/src/lib.rs b/zenoh-ext/examples/src/lib.rs index b3e675b046..881d60c138 100644 --- a/zenoh-ext/examples/src/lib.rs +++ b/zenoh-ext/examples/src/lib.rs @@ -50,10 +50,18 @@ impl From<&CommonArgs> for Config { } .unwrap(); if !value.connect.is_empty() { - config.connect.endpoints = value.connect.iter().map(|v| v.parse().unwrap()).collect(); + config + .connect + .endpoints + .set(value.connect.iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } if !value.listen.is_empty() { - config.listen.endpoints = value.listen.iter().map(|v| v.parse().unwrap()).collect(); + config + .listen + .endpoints + .set(value.listen.iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } config } diff --git a/zenoh-ext/tests/liveliness.rs b/zenoh-ext/tests/liveliness.rs index 23e901d458..97dc817394 100644 --- a/zenoh-ext/tests/liveliness.rs +++ b/zenoh-ext/tests/liveliness.rs @@ -37,7 +37,8 @@ async fn test_liveliness_querying_subscriber_clique() { let peer1 = { let mut c = config::default(); c.listen - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -49,7 +50,8 @@ async fn test_liveliness_querying_subscriber_clique() { let peer2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -107,7 +109,8 @@ async fn test_liveliness_querying_subscriber_brokered() { let _router = { let mut c = config::default(); c.listen - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Router)); @@ -119,7 +122,8 @@ async fn test_liveliness_querying_subscriber_brokered() { let client1 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -131,7 +135,8 @@ async fn test_liveliness_querying_subscriber_brokered() { let client2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -143,7 +148,8 @@ async fn test_liveliness_querying_subscriber_brokered() { let client3 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -201,7 +207,8 @@ async fn test_liveliness_fetching_subscriber_clique() { let peer1 = { let mut c = config::default(); c.listen - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -213,7 +220,8 @@ async fn test_liveliness_fetching_subscriber_clique() { let peer2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -275,7 +283,8 @@ async fn test_liveliness_fetching_subscriber_brokered() { let _router = { let mut c = config::default(); c.listen - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Router)); @@ -287,7 +296,8 @@ async fn test_liveliness_fetching_subscriber_brokered() { let client1 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -299,7 +309,8 @@ async fn test_liveliness_fetching_subscriber_brokered() { let client2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -311,7 +322,8 @@ async fn test_liveliness_fetching_subscriber_brokered() { let client3 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index f97e5d7541..2c50560d77 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -2830,7 +2830,8 @@ impl crate::net::primitives::EPrimitives for Session { /// /// let mut config = zenoh::config::peer(); /// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); -/// config.connect.endpoints.extend("tcp/10.10.10.10:7447,tcp/11.11.11.11:7447".split(',').map(|s|s.parse().unwrap())); +/// config.connect.endpoints.set( +/// ["tcp/10.10.10.10:7447", "tcp/11.11.11.11:7447"].iter().map(|s|s.parse().unwrap()).collect()); /// /// let session = zenoh::open(config).await.unwrap(); /// # } diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index da7739e3be..0bd2b8ef33 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -43,8 +43,6 @@ const RCV_BUF_SIZE: usize = u16::MAX as usize; const SCOUT_INITIAL_PERIOD: Duration = Duration::from_millis(1_000); const SCOUT_MAX_PERIOD: Duration = Duration::from_millis(8_000); const SCOUT_PERIOD_INCREASE_FACTOR: u32 = 2; -const ROUTER_DEFAULT_LISTENER: &str = "tcp/[::]:7447"; -const PEER_DEFAULT_LISTENER: &str = "tcp/[::]:0"; pub enum Loop { Continue, @@ -130,7 +128,12 @@ impl Runtime { let (peers, scouting, addr, ifaces, timeout, multicast_ttl) = { let guard = self.state.config.lock(); ( - guard.connect().endpoints().clone(), + guard + .connect() + .endpoints() + .client() + .unwrap_or(&vec![]) + .clone(), unwrap_or_default!(guard.scouting().multicast().enabled()), unwrap_or_default!(guard.scouting().multicast().address()), unwrap_or_default!(guard.scouting().multicast().interface()), @@ -168,27 +171,14 @@ impl Runtime { async fn start_peer(&self) -> ZResult<()> { let (listeners, peers, scouting, listen, autoconnect, addr, ifaces, delay, linkstate) = { let guard = &self.state.config.lock(); - let listeners = if guard.listen().endpoints().is_empty() { - let endpoint: EndPoint = PEER_DEFAULT_LISTENER.parse().unwrap(); - let protocol = endpoint.protocol(); - let mut listeners = vec![]; - if self - .state - .manager - .config - .protocols - .iter() - .any(|p| p.as_str() == protocol.as_str()) - { - listeners.push(endpoint) - } - listeners - } else { - guard.listen().endpoints().clone() - }; ( - listeners, - guard.connect().endpoints().clone(), + guard.listen().endpoints().peer().unwrap_or(&vec![]).clone(), + guard + .connect() + .endpoints() + .peer() + .unwrap_or(&vec![]) + .clone(), unwrap_or_default!(guard.scouting().multicast().enabled()), *unwrap_or_default!(guard.scouting().multicast().listen().peer()), *unwrap_or_default!(guard.scouting().multicast().autoconnect().peer()), @@ -223,27 +213,19 @@ impl Runtime { async fn start_router(&self) -> ZResult<()> { let (listeners, peers, scouting, listen, autoconnect, addr, ifaces, delay) = { let guard = self.state.config.lock(); - let listeners = if guard.listen().endpoints().is_empty() { - let endpoint: EndPoint = ROUTER_DEFAULT_LISTENER.parse().unwrap(); - let protocol = endpoint.protocol(); - let mut listeners = vec![]; - if self - .state - .manager - .config - .protocols - .iter() - .any(|p| p.as_str() == protocol.as_str()) - { - listeners.push(endpoint) - } - listeners - } else { - guard.listen().endpoints().clone() - }; ( - listeners, - guard.connect().endpoints().clone(), + guard + .listen() + .endpoints() + .router() + .unwrap_or(&vec![]) + .clone(), + guard + .connect() + .endpoints() + .router() + .unwrap_or(&vec![]) + .clone(), unwrap_or_default!(guard.scouting().multicast().enabled()), *unwrap_or_default!(guard.scouting().multicast().listen().router()), *unwrap_or_default!(guard.scouting().multicast().autoconnect().router()), @@ -422,7 +404,16 @@ impl Runtime { } pub(crate) async fn update_peers(&self) -> ZResult<()> { - let peers = { self.state.config.lock().connect().endpoints().clone() }; + let peers = { + self.state + .config + .lock() + .connect() + .endpoints() + .get(self.state.whatami) + .unwrap_or(&vec![]) + .clone() + }; let transports = self.manager().get_transports_unicast().await; if self.state.whatami == WhatAmI::Client { @@ -1163,6 +1154,8 @@ impl Runtime { .lock() .connect() .endpoints() + .get(session.runtime.state.whatami) + .unwrap_or(&vec![]) .clone() }; if peers.contains(endpoint) { diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index bbadd0dcf3..d1790dc009 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -47,7 +47,11 @@ mod test { async fn get_basic_router_config() -> Config { let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec!["tcp/127.0.0.1:27447".parse().unwrap()]; + config + .listen + .endpoints + .set(vec!["tcp/127.0.0.1:27447".parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config } diff --git a/zenoh/tests/authentication.rs b/zenoh/tests/authentication.rs index f8dcf74bc4..39daff0199 100644 --- a/zenoh/tests/authentication.rs +++ b/zenoh/tests/authentication.rs @@ -236,7 +236,11 @@ client2name:client2passwd"; let cert_path = TESTFILES_PATH.to_string_lossy(); let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec![format!("tls/127.0.0.1:{}", port).parse().unwrap()]; + config + .listen + .endpoints + .set(vec![format!("tls/127.0.0.1:{}", port).parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config .insert_json5( @@ -278,7 +282,11 @@ client2name:client2passwd"; let cert_path = TESTFILES_PATH.to_string_lossy(); let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec![format!("quic/127.0.0.1:{}", port).parse().unwrap()]; + config + .listen + .endpoints + .set(vec![format!("quic/127.0.0.1:{}", port).parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config .insert_json5( @@ -327,7 +335,11 @@ client2name:client2passwd"; async fn get_basic_router_config_usrpswd() -> Config { let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec!["tcp/127.0.0.1:37447".parse().unwrap()]; + config + .listen + .endpoints + .set(vec!["tcp/127.0.0.1:37447".parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config .insert_json5( diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index 9bee87f199..78814556f7 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -16,6 +16,7 @@ use zenoh::{ prelude::*, Config, }; +use zenoh_config::ModeDependent; #[test] fn retry_config_overriding() { @@ -74,7 +75,14 @@ fn retry_config_overriding() { }, ]; - for (i, endpoint) in config.listen().endpoints().iter().enumerate() { + for (i, endpoint) in config + .listen() + .endpoints() + .get(config.mode().unwrap_or_default()) + .unwrap_or(&vec![]) + .iter() + .enumerate() + { let retry_config = zenoh_config::get_retry_config(&config, Some(endpoint), true); assert_eq!(retry_config, expected[i]); } diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 267b30442f..c6931f1c2c 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -20,14 +20,26 @@ const TIMEOUT: Duration = Duration::from_secs(10); async fn open_session(listen: &[&str], connect: &[&str]) -> Session { let mut config = config::peer(); - config.listen.endpoints = listen - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); - config.connect.endpoints = connect - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .listen + .endpoints + .set( + listen + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); + config + .connect + .endpoints + .set( + connect + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Opening session"); ztimeout!(zenoh::open(config)).unwrap() diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 7b82b23814..1e5ef13799 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -56,8 +56,16 @@ fn build_config( .set_enabled(Some(false)) .unwrap(); - sub_config.listen.endpoints = vec![locator.parse().unwrap()]; - pub_config.connect.endpoints = vec![locator.parse().unwrap()]; + sub_config + .listen + .endpoints + .set(vec![locator.parse().unwrap()]) + .unwrap(); + pub_config + .connect + .endpoints + .set(vec![locator.parse().unwrap()]) + .unwrap(); match flow { InterceptorFlow::Egress => pub_config.set_downsampling(ds_config).unwrap(), diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index dbd850da24..72dab9bd29 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -32,7 +32,8 @@ async fn test_liveliness_subscriber_clique() { let peer1 = { let mut c = config::default(); c.listen - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -44,7 +45,8 @@ async fn test_liveliness_subscriber_clique() { let peer2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -89,7 +91,8 @@ async fn test_liveliness_query_clique() { let peer1 = { let mut c = config::default(); c.listen - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -101,7 +104,8 @@ async fn test_liveliness_query_clique() { let peer2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Peer)); @@ -140,7 +144,8 @@ async fn test_liveliness_subscriber_brokered() { let _router = { let mut c = config::default(); c.listen - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Router)); @@ -152,7 +157,8 @@ async fn test_liveliness_subscriber_brokered() { let client1 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -164,7 +170,8 @@ async fn test_liveliness_subscriber_brokered() { let client2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -209,7 +216,8 @@ async fn test_liveliness_query_brokered() { let _router = { let mut c = config::default(); c.listen - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Router)); @@ -221,7 +229,8 @@ async fn test_liveliness_query_brokered() { let client1 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); @@ -233,7 +242,8 @@ async fn test_liveliness_query_brokered() { let client2 = { let mut c = config::default(); c.connect - .set_endpoints(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) .unwrap(); c.scouting.multicast.set_enabled(Some(false)).unwrap(); let _ = c.set_mode(Some(WhatAmI::Client)); diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index 13a05a268e..da0ba0a6d1 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -27,7 +27,8 @@ async fn create_session_pair(locator: &str) -> (Session, Session) { config.scouting.multicast.set_enabled(Some(false)).unwrap(); config .listen - .set_endpoints(vec![locator.parse().unwrap()]) + .endpoints + .set(vec![locator.parse().unwrap()]) .unwrap(); config }; diff --git a/zenoh/tests/open_time.rs b/zenoh/tests/open_time.rs index a6336e863a..7f1c2b2972 100644 --- a/zenoh/tests/open_time.rs +++ b/zenoh/tests/open_time.rs @@ -41,7 +41,8 @@ async fn time_open( router_config.set_mode(Some(WhatAmI::Router)).unwrap(); router_config .listen - .set_endpoints(vec![listen_endpoint.clone()]) + .endpoints + .set(vec![listen_endpoint.clone()]) .unwrap(); router_config .transport @@ -70,7 +71,8 @@ async fn time_open( app_config.set_mode(Some(connect_mode)).unwrap(); app_config .connect - .set_endpoints(vec![connect_endpoint.clone()]) + .endpoints + .set(vec![connect_endpoint.clone()]) .unwrap(); app_config .transport diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 2256455be5..fd680ae545 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -268,16 +268,20 @@ impl Recipe { let mut config = node.config.unwrap_or_default(); config.set_mode(Some(node.mode)).unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); - config - .listen - .set_endpoints(node.listen.iter().map(|x| x.parse().unwrap()).collect()) - .unwrap(); - config - .connect - .set_endpoints( - node.connect.iter().map(|x| x.parse().unwrap()).collect(), - ) - .unwrap(); + if !node.listen.is_empty() { + config + .listen + .endpoints + .set(node.listen.iter().map(|x| x.parse().unwrap()).collect()) + .unwrap(); + } + if !node.connect.is_empty() { + config + .connect + .endpoints + .set(node.connect.iter().map(|x| x.parse().unwrap()).collect()) + .unwrap(); + } config }; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 859ff43f7d..916b0c4fb3 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -36,19 +36,31 @@ const MSG_SIZE: [usize; 2] = [1_024, 100_000]; async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .listen + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Opening peer01 session: {:?}", endpoints); let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.connect.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .connect + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][02a] Opening peer02 session: {:?}", endpoints); let peer02 = ztimeout!(zenoh::open(config)).unwrap(); @@ -59,13 +71,21 @@ async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { async fn open_session_multicast(endpoint01: &str, endpoint02: &str) -> (Session, Session) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = vec![endpoint01.parse().unwrap()]; + config + .listen + .endpoints + .set(vec![endpoint01.parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(true)).unwrap(); println!("[ ][01a] Opening peer01 session: {}", endpoint01); let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.listen.endpoints = vec![endpoint02.parse().unwrap()]; + config + .listen + .endpoints + .set(vec![endpoint02.parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(true)).unwrap(); println!("[ ][02a] Opening peer02 session: {}", endpoint02); let peer02 = ztimeout!(zenoh::open(config)).unwrap(); @@ -266,20 +286,32 @@ async fn zenoh_session_multicast() { async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .listen + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Creating r1 session runtime: {:?}", endpoints); let mut r1 = RuntimeBuilder::new(config).build().await.unwrap(); r1.start().await.unwrap(); let mut config = config::peer(); - config.connect.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .connect + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][02a] Creating r2 session runtime: {:?}", endpoints); let mut r2 = RuntimeBuilder::new(config).build().await.unwrap(); diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 33665913ed..e47de65812 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -41,20 +41,32 @@ const MSG_SIZE: [usize; 2] = [1_024, 100_000]; async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .listen + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][01a] Opening peer01 session: {:?}", endpoints); let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.connect.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .connect + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][02a] Opening peer02 session: {:?}", endpoints); @@ -66,14 +78,22 @@ async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { async fn open_session_multicast(endpoint01: &str, endpoint02: &str) -> (Session, Session) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = vec![endpoint01.parse().unwrap()]; + config + .listen + .endpoints + .set(vec![endpoint01.parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(true)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][01a] Opening peer01 session: {}", endpoint01); let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.listen.endpoints = vec![endpoint02.parse().unwrap()]; + config + .listen + .endpoints + .set(vec![endpoint02.parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(true)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][02a] Opening peer02 session: {}", endpoint02); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index a89ddb4b04..49663249ad 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -38,23 +38,39 @@ const MSG_SIZE: [usize; 2] = [1_024, 100_000]; async fn open_p2p_sessions() -> (Session, Session, Session) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = vec!["tcp/127.0.0.1:27447".parse().unwrap()]; + config + .listen + .endpoints + .set(vec!["tcp/127.0.0.1:27447".parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Opening s01 session"); let s01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.listen.endpoints = vec!["tcp/127.0.0.1:27448".parse().unwrap()]; - config.connect.endpoints = vec!["tcp/127.0.0.1:27447".parse().unwrap()]; + config + .listen + .endpoints + .set(vec!["tcp/127.0.0.1:27448".parse().unwrap()]) + .unwrap(); + config + .connect + .endpoints + .set(vec!["tcp/127.0.0.1:27447".parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][02a] Opening s02 session"); let s02 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.connect.endpoints = vec![ - "tcp/127.0.0.1:27447".parse().unwrap(), - "tcp/127.0.0.1:27448".parse().unwrap(), - ]; + config + .connect + .endpoints + .set(vec![ + "tcp/127.0.0.1:27447".parse().unwrap(), + "tcp/127.0.0.1:27448".parse().unwrap(), + ]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][03a] Opening s03 session"); let s03 = ztimeout!(zenoh::open(config)).unwrap(); @@ -66,7 +82,11 @@ async fn open_router_session() -> Session { // Open the sessions let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec!["tcp/127.0.0.1:37447".parse().unwrap()]; + config + .listen + .endpoints + .set(vec!["tcp/127.0.0.1:37447".parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][00a] Opening router session"); ztimeout!(zenoh::open(config)).unwrap() diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index e69dd3d263..71fa0bce34 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -37,8 +37,6 @@ lazy_static::lazy_static!( static ref LONG_VERSION: String = format!("{} built with {}", GIT_VERSION, env!("RUSTC_VERSION")); ); -const DEFAULT_LISTENER: &str = "tcp/[::]:7447"; - #[derive(Debug, Parser)] #[command(version=GIT_VERSION, long_version=LONG_VERSION.as_str(), about="The zenoh router")] struct Args { @@ -168,7 +166,8 @@ fn config_from_args(args: &Args) -> Config { if !args.connect.is_empty() { config .connect - .set_endpoints( + .endpoints + .set( args.connect .iter() .map(|v| match v.parse::() { @@ -184,7 +183,8 @@ fn config_from_args(args: &Args) -> Config { if !args.listen.is_empty() { config .listen - .set_endpoints( + .endpoints + .set( args.listen .iter() .map(|v| match v.parse::() { @@ -197,12 +197,6 @@ fn config_from_args(args: &Args) -> Config { ) .unwrap(); } - if config.listen.endpoints.is_empty() { - config - .listen - .endpoints - .push(DEFAULT_LISTENER.parse().unwrap()) - } if args.no_timestamp { config .timestamping From 54d12e1b57b1aee2f65c92d43408e930f76e4690 Mon Sep 17 00:00:00 2001 From: Julien Enoch Date: Fri, 5 Jul 2024 09:49:04 +0200 Subject: [PATCH 515/598] Bump uhlc to 0.8.0 - changing formatting of Timestamps as Strings (#1218) * Bump uhlc to 0.8.0 * Remove irrelevant tests in storages replication * Convert Timestamps in some unit tests * Make cargo fmt happy --- Cargo.lock | 4 +- Cargo.toml | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 6 +- .../src/replica/digest.rs | 381 ------------------ .../tests/operations.rs | 9 +- .../tests/wildcard.rs | 15 +- 6 files changed, 14 insertions(+), 403 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5344e0135c..3438a2d2c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4715,9 +4715,9 @@ checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" [[package]] name = "uhlc" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99b6df3f3e948b40e20c38a6d1fd6d8f91b3573922fc164e068ad3331560487e" +checksum = "79ac3c37bd9506595768f0387bd39d644525728b4a1d783218acabfb56356db7" dependencies = [ "humantime", "lazy_static", diff --git a/Cargo.toml b/Cargo.toml index a1820cb495..ce8ae1643d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -171,7 +171,7 @@ tokio-rustls = { version = "0.26.0", default-features = false } # tokio-vsock = see: io/zenoh-links/zenoh-link-vsock/Cargo.toml (workspaces does not support platform dependent dependencies) thread-priority = "1.1.0" typenum = "1.16.0" -uhlc = { version = "0.7.0", default-features = false } # Default features are disabled due to usage in no_std crates +uhlc = { version = "0.8.0", default-features = false } # Default features are disabled due to usage in no_std crates unwrap-infallible = "0.1.5" unzip-n = "0.1.2" url = "2.3.1" diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index e3dcc0130e..d6db7c74cb 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -56,7 +56,7 @@ struct JSONSample { key: String, value: serde_json::Value, encoding: String, - time: Option, + timestamp: Option, } pub fn base64_encode(data: &[u8]) -> String { @@ -100,7 +100,7 @@ fn sample_to_json(sample: &Sample) -> JSONSample { key: sample.key_expr().as_str().to_string(), value: payload_to_json(sample.payload(), sample.encoding()), encoding: sample.encoding().to_string(), - time: sample.timestamp().map(|ts| ts.to_string()), + timestamp: sample.timestamp().map(|ts| ts.to_string()), } } @@ -111,7 +111,7 @@ fn result_to_json(sample: Result<&Sample, &ReplyError>) -> JSONSample { key: "ERROR".into(), value: payload_to_json(err.payload(), err.encoding()), encoding: err.encoding().to_string(), - time: None, + timestamp: None, }, } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs b/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs index bf06c61f25..07ba7e9ea3 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs @@ -833,384 +833,3 @@ impl Digest { } } } - -#[test] -fn test_create_digest_empty_initial() { - async_std::task::block_on(async { - zenoh::internal::zasync_executor_init!(); - }); - let created = Digest::create_digest( - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - Vec::new(), - 1671612730, - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 0, - eras: HashMap::new(), - intervals: HashMap::new(), - subintervals: HashMap::new(), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_create_digest_with_initial_hot() { - async_std::task::block_on(async { - zenoh::internal::zasync_executor_init!(); - }); - let created = Digest::create_digest( - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - vec![LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }], - 1671634800, - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 6001159706341373391, - eras: HashMap::from([( - EraType::Hot, - Interval { - checksum: 4598971083408074426, - content: BTreeSet::from([1671634800]), - }, - )]), - intervals: HashMap::from([( - 1671634800, - Interval { - checksum: 8436018757196527319, - content: BTreeSet::from([16716348000]), - }, - )]), - subintervals: HashMap::from([( - 16716348000, - SubInterval { - checksum: 10827088509365589085, - content: BTreeSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - }, - )]), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_create_digest_with_initial_warm() { - async_std::task::block_on(async { - zenoh::internal::zasync_executor_init!(); - }); - let created = Digest::create_digest( - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - vec![LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }], - 1671634810, - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 6001159706341373391, - eras: HashMap::from([( - EraType::Warm, - Interval { - checksum: 4598971083408074426, - content: BTreeSet::from([1671634800]), - }, - )]), - intervals: HashMap::from([( - 1671634800, - Interval { - checksum: 8436018757196527319, - content: BTreeSet::from([16716348000]), - }, - )]), - subintervals: HashMap::from([( - 16716348000, - SubInterval { - checksum: 10827088509365589085, - content: BTreeSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - }, - )]), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_create_digest_with_initial_cold() { - async_std::task::block_on(async { - zenoh::internal::zasync_executor_init!(); - }); - let created = Digest::create_digest( - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - vec![LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }], - 1671634910, - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 6001159706341373391, - eras: HashMap::from([( - EraType::Cold, - Interval { - checksum: 4598971083408074426, - content: BTreeSet::from([1671634800]), - }, - )]), - intervals: HashMap::from([( - 1671634800, - Interval { - checksum: 8436018757196527319, - content: BTreeSet::from([16716348000]), - }, - )]), - subintervals: HashMap::from([( - 16716348000, - SubInterval { - checksum: 10827088509365589085, - content: BTreeSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - }, - )]), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_update_digest_add_content() { - async_std::task::block_on(async { - zenoh::internal::zasync_executor_init!(); - }); - let created = Digest::update_digest( - Digest { - timestamp: Timestamp::from_str("2022-12-21T13:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 0, - eras: HashMap::new(), - intervals: HashMap::new(), - subintervals: HashMap::new(), - }, - 1671634910, - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - HashSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - HashSet::new(), - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 6001159706341373391, - eras: HashMap::from([( - EraType::Cold, - Interval { - checksum: 4598971083408074426, - content: BTreeSet::from([1671634800]), - }, - )]), - intervals: HashMap::from([( - 1671634800, - Interval { - checksum: 8436018757196527319, - content: BTreeSet::from([16716348000]), - }, - )]), - subintervals: HashMap::from([( - 16716348000, - SubInterval { - checksum: 10827088509365589085, - content: BTreeSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - }, - )]), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_update_digest_remove_content() { - async_std::task::block_on(async { - zenoh::internal::zasync_executor_init!(); - }); - let created = Digest::update_digest( - Digest { - timestamp: Timestamp::from_str("2022-12-21T13:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 3304302629246049840, - eras: HashMap::from([( - EraType::Cold, - Interval { - checksum: 8238986480495191270, - content: BTreeSet::from([1671634800]), - }, - )]), - intervals: HashMap::from([( - 1671634800, - Interval { - checksum: 12344398372324783476, - content: BTreeSet::from([16716348000]), - }, - )]), - subintervals: HashMap::from([( - 16716348000, - SubInterval { - checksum: 10007212639402189432, - content: BTreeSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - }, - )]), - }, - 1671634910, - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - HashSet::new(), - HashSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 0, - eras: HashMap::new(), - intervals: HashMap::new(), - subintervals: HashMap::new(), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_update_remove_digest() { - async_std::task::block_on(async { - zenoh::internal::zasync_executor_init!(); - }); - let created = Digest::create_digest( - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - Vec::new(), - 1671612730, - ); - let added = Digest::update_digest( - created.clone(), - 1671612730, - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - HashSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T12:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("a/b/c").unwrap(), - }]), - HashSet::new(), - ); - assert_ne!(created, added); - - let removed = Digest::update_digest( - added.clone(), - 1671612730, - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - HashSet::new(), - HashSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T12:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("a/b/c").unwrap(), - }]), - ); - assert_eq!(created, removed); - - let added_again = Digest::update_digest( - removed, - 1671612730, - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - HashSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T12:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("a/b/c").unwrap(), - }]), - HashSet::new(), - ); - assert_eq!(added, added_again); -} diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 505634e6fb..c1ed09b1a7 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -86,8 +86,7 @@ async fn test_updates_in_order() { &session, "operation/test/a", "1", - Timestamp::from_str("2022-01-17T10:42:10.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123566570568799/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -102,8 +101,7 @@ async fn test_updates_in_order() { &session, "operation/test/b", "2", - Timestamp::from_str("2022-01-17T10:43:10.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123824268606559/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -117,8 +115,7 @@ async fn test_updates_in_order() { delete_data( &session, "operation/test/a", - Timestamp::from_str("2022-01-17T10:43:10.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123824268606559/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 04e4549508..d6e94ecb1f 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -87,8 +87,7 @@ async fn test_wild_card_in_order() { &session, "wild/test/*", "1", - Timestamp::from_str("2022-01-17T10:42:10.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123566570568799/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -102,8 +101,7 @@ async fn test_wild_card_in_order() { &session, "wild/test/a", "2", - Timestamp::from_str("2022-01-17T10:42:11.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123570865536095/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -119,8 +117,7 @@ async fn test_wild_card_in_order() { &session, "wild/test/b", "3", - Timestamp::from_str("2022-01-17T10:42:11.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123570865536095/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -150,8 +147,7 @@ async fn test_wild_card_in_order() { &session, "wild/test/*", "4", - Timestamp::from_str("2022-01-17T10:43:12.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123832858541151/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -168,8 +164,7 @@ async fn test_wild_card_in_order() { delete_data( &session, "wild/test/*", - Timestamp::from_str("2022-01-17T13:43:10.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054170209915403359/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; From bf94b9bb137557c722382cb7ff329ed89111018f Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 5 Jul 2024 10:50:19 +0200 Subject: [PATCH 516/598] Add establishment trace logs --- .../src/unicast/establishment/accept.rs | 31 ++++++++++++++++--- .../src/unicast/establishment/open.rs | 12 +++++-- 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index 3f71d7b6da..64949357c6 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -163,6 +163,12 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { .await .map_err(|e| (e, Some(close::reason::INVALID)))?; + tracing::trace!( + "Establishment Accept InitSyn: {}. Received: {:?}", + self.link, + msg + ); + let init_syn = match msg.body { TransportBody::InitSyn(init_syn) => init_syn, _ => { @@ -362,7 +368,7 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { let cookie: ZSlice = encrypted.into(); // Send the message on the link - let message: TransportMessage = InitAck { + let msg: TransportMessage = InitAck { version: input.mine_version, whatami: input.mine_whatami, zid: input.mine_zid, @@ -381,10 +387,16 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { let _ = self .link - .send(&message) + .send(&msg) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; + tracing::trace!( + "Establishment Accept InitAck: {}. Sent: {:?}", + self.link, + msg + ); + let output = SendInitAckOut { cookie_nonce, #[cfg(feature = "shared-memory")] @@ -405,6 +417,12 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { .await .map_err(|e| (e, Some(close::reason::INVALID)))?; + tracing::trace!( + "Establishment Accept OpenSyn: {}. Received: {:?}", + self.link, + msg + ); + let open_syn = match msg.body { TransportBody::OpenSyn(open_syn) => open_syn, TransportBody::Close(Close { reason, .. }) => { @@ -594,7 +612,7 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Build OpenAck message let mine_initial_sn = compute_sn(input.mine_zid, input.other_zid, state.transport.resolution); - let open_ack = OpenAck { + let msg = OpenAck { lease: input.mine_lease, initial_sn: mine_initial_sn, ext_qos, @@ -607,8 +625,13 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { }; // Do not send the OpenAck right now since we might still incur in MAX_LINKS error + tracing::trace!( + "Establishment Accept OpenAck: {}. Sent: {:?}", + self.link, + msg + ); - let output = SendOpenAckOut { open_ack }; + let output = SendOpenAckOut { open_ack: msg }; Ok(output) } } diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index 9f6f2e61a7..a9e797228e 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -208,6 +208,8 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { } .into(); + tracing::trace!("Establishment Open InitSyn: {}. Sent: {:?}", link, msg); + let _ = link .send(&msg) .await @@ -229,6 +231,8 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .await .map_err(|e| (e, Some(close::reason::INVALID)))?; + tracing::trace!("Establishment Open InitAck: {}. Received: {:?}", link, msg); + let init_ack = match msg.body { TransportBody::InitAck(init_ack) => init_ack, TransportBody::Close(Close { reason, .. }) => { @@ -414,7 +418,7 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { // Build and send an OpenSyn message let mine_initial_sn = compute_sn(input.mine_zid, input.other_zid, state.transport.resolution); - let message: TransportMessage = OpenSyn { + let msg: TransportMessage = OpenSyn { lease: input.mine_lease, initial_sn: mine_initial_sn, cookie: input.other_cookie, @@ -429,10 +433,12 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .into(); let _ = link - .send(&message) + .send(&msg) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; + tracing::trace!("Establishment Open OpenSyn: {}. Sent: {:?}", link, msg); + let output = SendOpenSynOut { mine_initial_sn, #[cfg(feature = "shared-memory")] @@ -454,6 +460,8 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .await .map_err(|e| (e, Some(close::reason::INVALID)))?; + tracing::trace!("Establishment Open OpenAck: {}. Received: {:?}", link, msg); + let open_ack = match msg.body { TransportBody::OpenAck(open_ack) => open_ack, TransportBody::Close(Close { reason, .. }) => { From c7e418f526277c3aa8f7810a006264208659772e Mon Sep 17 00:00:00 2001 From: J-Loudet Date: Fri, 5 Jul 2024 11:22:26 +0200 Subject: [PATCH 517/598] fix: typos (#1220) * zenoh/src/api/handlers/ring.rs: synchrounous -> synchronous * zenoh/src/api/selector.rs: intendend -> intended Signed-off-by: Julien Loudet --- zenoh/src/api/handlers/ring.rs | 2 +- zenoh/src/api/selector.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/api/handlers/ring.rs b/zenoh/src/api/handlers/ring.rs index 18ca6f495c..7b058d1905 100644 --- a/zenoh/src/api/handlers/ring.rs +++ b/zenoh/src/api/handlers/ring.rs @@ -24,7 +24,7 @@ use zenoh_result::ZResult; use super::{callback::Callback, Dyn, IntoHandler}; use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; -/// A synchrounous ring channel with a limited size that allows users to keep the last N data. +/// A synchronous ring channel with a limited size that allows users to keep the last N data. pub struct RingChannel { capacity: usize, } diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 813ae0528d..d7b7466be2 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -26,7 +26,7 @@ use super::{key_expr::KeyExpr, queryable::Query}; /// A selector is the combination of a [Key Expression](crate::key_expr::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters -/// with a few intendend uses: +/// with a few intended uses: /// - specifying arguments to a queryable, allowing the passing of Remote Procedure Call parameters /// - filtering by value, /// - filtering by metadata, such as the timestamp of a value, From 47f5347c0aff93e61bdaa28b1c89425f04260ec3 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 5 Jul 2024 12:31:56 +0200 Subject: [PATCH 518/598] Compute TCP/TLS MTU upon link creation --- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 55 ++++++++++---------- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 54 +++++++++---------- 2 files changed, 53 insertions(+), 56 deletions(-) diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index 99d2d44c36..7532055f8e 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -43,6 +43,8 @@ pub struct LinkUnicastTcp { // The destination socket address of this link (address used on the remote host) dst_addr: SocketAddr, dst_locator: Locator, + // The computed mtu + mtu: BatchSize, } unsafe impl Sync for LinkUnicastTcp {} @@ -71,6 +73,29 @@ impl LinkUnicastTcp { ); } + // Compute the MTU + // See IETF RFC6691: https://datatracker.ietf.org/doc/rfc6691/ + let header = match src_addr.ip() { + std::net::IpAddr::V4(_) => 40, + std::net::IpAddr::V6(_) => 60, + }; + #[allow(unused_mut)] // mut is not needed when target_family != unix + let mut mtu = *TCP_DEFAULT_MTU - header; + + // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 + #[cfg(target_family = "unix")] + { + let socket = socket2::SockRef::from(&socket); + // Get the MSS and divide it by 2 to ensure we can at least fill half the MSS + let mss = socket.mss().unwrap_or(mtu as u32) / 2; + // Compute largest multiple of TCP MSS that is smaller of default MTU + let mut tgt = mss; + while (tgt + mss) < mtu as u32 { + tgt += mss; + } + mtu = (mtu as u32).min(tgt) as BatchSize; + } + // Build the Tcp object LinkUnicastTcp { socket: UnsafeCell::new(socket), @@ -78,14 +103,10 @@ impl LinkUnicastTcp { src_locator: Locator::new(TCP_LOCATOR_PREFIX, src_addr.to_string(), "").unwrap(), dst_addr, dst_locator: Locator::new(TCP_LOCATOR_PREFIX, dst_addr.to_string(), "").unwrap(), + mtu, } } - #[cfg(target_family = "unix")] - fn get_socket(&self) -> &TcpStream { - unsafe { &*self.socket.get() } - } - #[allow(clippy::mut_from_ref)] fn get_mut_socket(&self) -> &mut TcpStream { unsafe { &mut *self.socket.get() } @@ -153,29 +174,7 @@ impl LinkUnicastTrait for LinkUnicastTcp { #[inline(always)] fn get_mtu(&self) -> BatchSize { - // See IETF RFC6691: https://datatracker.ietf.org/doc/rfc6691/ - let header = match self.src_addr.ip() { - std::net::IpAddr::V4(_) => 40, - std::net::IpAddr::V6(_) => 60, - }; - #[allow(unused_mut)] // mut is not needed when target_family != unix - let mut mtu = *TCP_DEFAULT_MTU - header; - - // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 - #[cfg(target_family = "unix")] - { - let socket = socket2::SockRef::from(self.get_socket()); - // Get the MSS and divide it by 2 to ensure we can at least fill half the MSS - let mss = socket.mss().unwrap_or(mtu as u32) / 2; - // Compute largest multiple of TCP MSS that is smaller of default MTU - let mut tgt = mss; - while (tgt + mss) < mtu as u32 { - tgt += mss; - } - mtu = (mtu as u32).min(tgt) as BatchSize; - } - - mtu + self.mtu } #[inline(always)] diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 4ab21d9993..716eac2121 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -61,6 +61,7 @@ pub struct LinkUnicastTls { write_mtx: AsyncMutex<()>, read_mtx: AsyncMutex<()>, auth_identifier: LinkAuthId, + mtu: BatchSize, } unsafe impl Send for LinkUnicastTls {} @@ -96,6 +97,29 @@ impl LinkUnicastTls { ); } + // Compute the MTU + // See IETF RFC6691: https://datatracker.ietf.org/doc/rfc6691/ + let header = match src_addr.ip() { + std::net::IpAddr::V4(_) => 40, + std::net::IpAddr::V6(_) => 60, + }; + #[allow(unused_mut)] // mut is not needed when target_family != unix + let mut mtu = *TLS_DEFAULT_MTU - header; + + // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 + #[cfg(target_family = "unix")] + { + let socket = socket2::SockRef::from(&tcp_stream); + // Get the MSS and divide it by 2 to ensure we can at least fill half the MSS + let mss = socket.mss().unwrap_or(mtu as u32) / 2; + // Compute largest multiple of TCP MSS that is smaller of default MTU + let mut tgt = mss; + while (tgt + mss) < mtu as u32 { + tgt += mss; + } + mtu = (mtu as u32).min(tgt) as BatchSize; + } + // Build the Tls object LinkUnicastTls { inner: UnsafeCell::new(socket), @@ -106,14 +130,10 @@ impl LinkUnicastTls { write_mtx: AsyncMutex::new(()), read_mtx: AsyncMutex::new(()), auth_identifier, + mtu, } } - #[cfg(target_family = "unix")] - fn get_socket(&self) -> &TlsStream { - unsafe { &*self.inner.get() } - } - // NOTE: It is safe to suppress Clippy warning since no concurrent reads // or concurrent writes will ever happen. The read_mtx and write_mtx // are respectively acquired in any read and write operation. @@ -188,29 +208,7 @@ impl LinkUnicastTrait for LinkUnicastTls { #[inline(always)] fn get_mtu(&self) -> BatchSize { - // See IETF RFC6691: https://datatracker.ietf.org/doc/rfc6691/ - let header = match self.src_addr.ip() { - std::net::IpAddr::V4(_) => 40, - std::net::IpAddr::V6(_) => 60, - }; - #[allow(unused_mut)] // mut is not needed when target_family != unix - let mut mtu = *TLS_DEFAULT_MTU - header; - - // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 - #[cfg(target_family = "unix")] - { - let socket = socket2::SockRef::from(self.get_socket().get_ref().0); - // Get the MSS and divide it by 2 to ensure we can at least fill half the MSS - let mss = socket.mss().unwrap_or(mtu as u32) / 2; - // Compute largest multiple of TCP MSS that is smaller of default MTU - let mut tgt = mss; - while (tgt + mss) < mtu as u32 { - tgt += mss; - } - mtu = (mtu as u32).min(tgt) as BatchSize; - } - - mtu + self.mtu } #[inline(always)] From 2b2064cf180c86d8869a55fa49ce073ae880588c Mon Sep 17 00:00:00 2001 From: J-Loudet Date: Fri, 5 Jul 2024 11:22:26 +0200 Subject: [PATCH 519/598] fix: typos (#1220) * zenoh/src/api/handlers/ring.rs: synchrounous -> synchronous * zenoh/src/api/selector.rs: intendend -> intended Signed-off-by: Julien Loudet --- zenoh/src/api/handlers/ring.rs | 2 +- zenoh/src/api/selector.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/api/handlers/ring.rs b/zenoh/src/api/handlers/ring.rs index 18ca6f495c..7b058d1905 100644 --- a/zenoh/src/api/handlers/ring.rs +++ b/zenoh/src/api/handlers/ring.rs @@ -24,7 +24,7 @@ use zenoh_result::ZResult; use super::{callback::Callback, Dyn, IntoHandler}; use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; -/// A synchrounous ring channel with a limited size that allows users to keep the last N data. +/// A synchronous ring channel with a limited size that allows users to keep the last N data. pub struct RingChannel { capacity: usize, } diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 813ae0528d..d7b7466be2 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -26,7 +26,7 @@ use super::{key_expr::KeyExpr, queryable::Query}; /// A selector is the combination of a [Key Expression](crate::key_expr::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters -/// with a few intendend uses: +/// with a few intended uses: /// - specifying arguments to a queryable, allowing the passing of Remote Procedure Call parameters /// - filtering by value, /// - filtering by metadata, such as the timestamp of a value, From 55557f943d97dc3db8b428cae930e98b163f988d Mon Sep 17 00:00:00 2001 From: Charles Schleich Date: Fri, 5 Jul 2024 13:59:59 +0000 Subject: [PATCH 520/598] =?UTF-8?q?remove=20`new=5Ftimestamp`=20fn=20,=20`?= =?UTF-8?q?time`=20module,=20reworked=20plugin=20storage=20=E2=80=A6=20(#1?= =?UTF-8?q?188)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: ChenYing Kuo Co-authored-by: Luca Cominardi Co-authored-by: ChenYing Kuo (CY) --- .../src/replica/mod.rs | 6 ++--- .../src/replica/snapshotter.rs | 22 +++++++++-------- .../src/replica/storage.rs | 7 ++---- zenoh-ext/src/querying_subscriber.rs | 14 +++++++---- zenoh/src/api/mod.rs | 1 - zenoh/src/api/time.rs | 24 ------------------- zenoh/src/lib.rs | 2 -- 7 files changed, 27 insertions(+), 49 deletions(-) delete mode 100644 zenoh/src/api/time.rs diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 114e5c206b..014fdc697e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -112,11 +112,10 @@ impl Replica { }; // Zid of session for generating timestamps - let zid = session.zid(); let replica = Replica { name: name.to_string(), - session, + session: session.clone(), key_expr: storage_config.key_expr.clone(), replica_config: storage_config.replica_config.clone().unwrap(), digests_published: RwLock::new(HashSet::new()), @@ -131,7 +130,8 @@ impl Replica { let config = replica.replica_config.clone(); // snapshotter - let snapshotter = Arc::new(Snapshotter::new(zid, rx_log, &startup_entries, &config).await); + let snapshotter = + Arc::new(Snapshotter::new(session, rx_log, &startup_entries, &config).await); // digest sub let digest_sub = replica.start_digest_sub(tx_digest).fuse(); // queryable for alignment diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs index 6bb2cf113b..190cf6005b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs @@ -24,14 +24,14 @@ use async_std::{ }; use flume::Receiver; use futures::join; -use zenoh::{key_expr::OwnedKeyExpr, session::ZenohId, time::Timestamp}; +use zenoh::{key_expr::OwnedKeyExpr, time::Timestamp, Session}; use zenoh_backend_traits::config::ReplicaConfig; use super::{Digest, DigestConfig, LogEntry}; pub struct Snapshotter { - // session id for timestamp generation - id: ZenohId, + // session ref for timestamp generation + session: Arc, // channel to get updates from the storage storage_update: Receiver<(OwnedKeyExpr, Timestamp)>, // configuration parameters of the replica @@ -57,7 +57,7 @@ pub struct ReplicationInfo { impl Snapshotter { // Initialize the snapshot parameters, logs and digest pub async fn new( - id: ZenohId, + session: Arc, rx_sample: Receiver<(OwnedKeyExpr, Timestamp)>, initial_entries: &Vec<(OwnedKeyExpr, Timestamp)>, replica_config: &ReplicaConfig, @@ -66,12 +66,12 @@ impl Snapshotter { // from initial entries, populate the log - stable and volatile // compute digest let (last_snapshot_time, last_interval) = Snapshotter::compute_snapshot_params( - id, + session.clone(), replica_config.propagation_delay, replica_config.delta, ); let snapshotter = Snapshotter { - id, + session, storage_update: rx_sample, replica_config: replica_config.clone(), content: ReplicationInfo { @@ -131,7 +131,7 @@ impl Snapshotter { let mut last_snapshot_time = self.content.last_snapshot_time.write().await; let mut last_interval = self.content.last_interval.write().await; let (time, interval) = Snapshotter::compute_snapshot_params( - self.id, + self.session.clone(), self.replica_config.propagation_delay, self.replica_config.delta, ); @@ -143,13 +143,15 @@ impl Snapshotter { } } + // TODO // Compute latest snapshot time and latest interval with respect to the current time pub fn compute_snapshot_params( - id: ZenohId, + session: Arc, propagation_delay: Duration, delta: Duration, ) -> (Timestamp, u64) { - let now = zenoh::time::new_timestamp(id); + let now = session.new_timestamp(); + let latest_interval = (now .get_time() .to_system_time() @@ -206,7 +208,7 @@ impl Snapshotter { // Create digest from the stable log at startup async fn initialize_digest(&self) { - let now = zenoh::time::new_timestamp(self.id); + let now = self.session.new_timestamp(); let replica_data = &self.content; let log_locked = replica_data.stable_log.read().await; let latest_interval = replica_data.last_interval.read().await; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index f926417743..17be005f08 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -37,7 +37,7 @@ use zenoh::{ query::{ConsolidationMode, QueryTarget}, sample::{Sample, SampleBuilder, SampleKind, TimestampBuilderTrait}, session::{Session, SessionDeclarations}, - time::{new_timestamp, Timestamp, NTP64}, + time::{Timestamp, NTP64}, Result as ZResult, }; use zenoh_backend_traits::{ @@ -148,9 +148,6 @@ impl StorageService { ); t.add_async(gc).await; - // get session id for timestamp generation - let zid = self.session.info().zid().await; - // subscribe on key_expr let storage_sub = match self.session.declare_subscriber(&self.key_expr).await { Ok(storage_sub) => storage_sub, @@ -240,7 +237,7 @@ impl StorageService { continue; } }; - let timestamp = sample.timestamp().cloned().unwrap_or(new_timestamp(zid)); + let timestamp = sample.timestamp().cloned().unwrap_or(self.session.new_timestamp()); let sample = SampleBuilder::from(sample).timestamp(timestamp).into(); self.process_sample(sample).await; }, diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index baf486601d..6134e4d2d7 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -17,7 +17,7 @@ use std::{ future::{IntoFuture, Ready}, mem::swap, sync::{Arc, Mutex}, - time::Duration, + time::{Duration, SystemTime, UNIX_EPOCH}, }; use zenoh::{ @@ -29,7 +29,7 @@ use zenoh::{ query::{QueryConsolidation, QueryTarget, ReplyKeyExpr, Selector}, sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}, session::{SessionDeclarations, SessionRef}, - time::{new_timestamp, Timestamp}, + time::Timestamp, Error, Resolvable, Resolve, Result as ZResult, }; @@ -654,7 +654,8 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { InputHandler: IntoHandler<'static, Sample, Handler = Handler> + Send, TryIntoSample: ExtractSample + Send + Sync, { - let zid = conf.session.zid(); + let session_id = conf.session.zid(); + let state = Arc::new(Mutex::new(InnerState { pending_fetches: 0, merge_queue: MergeQueue::new(), @@ -672,9 +673,14 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { tracing::trace!( "Sample received while fetch in progress: push it to merge_queue" ); + // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. - let timestamp = s.timestamp().cloned().unwrap_or(new_timestamp(zid)); + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().into(); // UNIX_EPOCH is Returns a Timespec::zero(), Unwrap Should be permissable here + let timestamp = s + .timestamp() + .cloned() + .unwrap_or(Timestamp::new(now, session_id.into())); state .merge_queue .push(SampleBuilder::from(s).timestamp(timestamp).into()); diff --git a/zenoh/src/api/mod.rs b/zenoh/src/api/mod.rs index 91ae6bed67..d3053cb3c9 100644 --- a/zenoh/src/api/mod.rs +++ b/zenoh/src/api/mod.rs @@ -35,5 +35,4 @@ pub(crate) mod scouting; pub(crate) mod selector; pub(crate) mod session; pub(crate) mod subscriber; -pub(crate) mod time; pub(crate) mod value; diff --git a/zenoh/src/api/time.rs b/zenoh/src/api/time.rs deleted file mode 100644 index 1879143389..0000000000 --- a/zenoh/src/api/time.rs +++ /dev/null @@ -1,24 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use std::time::{SystemTime, UNIX_EPOCH}; - -use zenoh_protocol::core::{Timestamp, TimestampId}; - -// TODO: Shall we remove this new_timestamp in favoir of the src/api/session::Session::new_timestamp(); -/// Generates a [`Timestamp`] with [`TimestampId`] and current system time -/// The [`TimestampId`] can be taken from session id returned by [`SessionInfo::zid()`](crate::api::info::SessionInfo::zid). -pub fn new_timestamp>(id: T) -> Timestamp { - let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().into(); - Timestamp::new(now, id.into()) -} diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 77db49f525..024c1303af 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -353,8 +353,6 @@ pub mod liveliness { /// Timestamp support pub mod time { pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; - - pub use crate::api::time::new_timestamp; } /// Configuration to pass to [`open`] and [`scout`] functions and associated constants From b2df2b711e12a20e3d0950bc4af019a4893842d6 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 5 Jul 2024 17:05:33 +0200 Subject: [PATCH 521/598] Make adminspace return current metadata (#1221) --- zenoh/src/net/runtime/adminspace.rs | 5 +---- zenoh/src/net/runtime/mod.rs | 3 --- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 26807e8907..e2dad5c844 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -57,7 +57,6 @@ use crate::{ pub struct AdminContext { runtime: Runtime, version: String, - metadata: serde_json::Value, } type Handler = Arc; @@ -153,7 +152,6 @@ impl AdminSpace { let zid_str = runtime.state.zid.to_string(); let whatami_str = runtime.state.whatami.to_str(); let mut config = runtime.config().lock(); - let metadata = runtime.state.metadata.clone(); let root_key: OwnedKeyExpr = format!("@/{whatami_str}/{zid_str}").try_into().unwrap(); let mut handlers: HashMap<_, Handler> = HashMap::new(); @@ -221,7 +219,6 @@ impl AdminSpace { let context = Arc::new(AdminContext { runtime: runtime.clone(), version, - metadata, }); let admin = Arc::new(AdminSpace { zid: runtime.zid(), @@ -601,7 +598,7 @@ fn local_data(context: &AdminContext, query: Query) { let mut json = json!({ "zid": context.runtime.state.zid, "version": context.version, - "metadata": context.metadata, + "metadata": context.runtime.config().lock().metadata(), "locators": locators, "sessions": transports, "plugins": plugins, diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 4f3c6974f7..b7ba0d11da 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -70,7 +70,6 @@ pub(crate) struct RuntimeState { zid: ZenohId, whatami: WhatAmI, next_id: AtomicU32, - metadata: serde_json::Value, router: Arc, config: Notifier, manager: TransportManager, @@ -138,7 +137,6 @@ impl RuntimeBuilder { tracing::info!("Using ZID: {}", zid); let whatami = unwrap_or_default!(config.mode()); - let metadata = config.metadata().clone(); let hlc = (*unwrap_or_default!(config.timestamping().enabled().get(whatami))) .then(|| Arc::new(HLCBuilder::new().with_id(uhlc::ID::from(&zid)).build())); @@ -179,7 +177,6 @@ impl RuntimeBuilder { zid: zid.into(), whatami, next_id: AtomicU32::new(1), // 0 is reserved for routing core - metadata, router, config: config.clone(), manager: transport_manager, From cae8697e7da3f5d3ec75d0dd92d6180196b00efc Mon Sep 17 00:00:00 2001 From: Charles Schleich Date: Fri, 5 Jul 2024 15:09:30 +0000 Subject: [PATCH 522/598] simplify timestamp id, remove allocation. (#1223) --- zenoh/src/api/session.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 2c50560d77..f5890edc3a 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -675,9 +675,15 @@ impl Session { /// # } /// ``` pub fn new_timestamp(&self) -> Timestamp { - let id = self.runtime.zid(); - let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().into(); // UNIX_EPOCH is Returns a Timespec::zero(), Unwrap Should be permissable here - Timestamp::new(now, id.into()) + match self.hlc() { + Some(hlc) => hlc.new_timestamp(), + None => { + // Called in the case that the runtime is not initialized with an hlc + // UNIX_EPOCH is Returns a Timespec::zero(), Unwrap Should be permissable here + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().into(); + Timestamp::new(now, self.runtime.zid().into()) + } + } } } From 418b5a628c7587815eae593a8df32aa5e9f45983 Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Fri, 5 Jul 2024 22:03:38 +0200 Subject: [PATCH 523/598] Fix typos ("nof" -> "not") (#1227) --- zenoh/src/net/routing/dispatcher/queries.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 6ce9046a4a..445f138d8d 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -722,7 +722,7 @@ pub(crate) fn route_send_response( )); } None => tracing::warn!( - "Route reply {}:{} from {}: Query nof found!", + "Route reply {}:{} from {}: Query not found!", face, qid, face @@ -748,7 +748,7 @@ pub(crate) fn route_send_response_final( finalize_pending_query(query); } None => tracing::warn!( - "Route final reply {}:{} from {}: Query nof found!", + "Route final reply {}:{} from {}: Query not found!", face, qid, face From 6df74c7bebe216ebfe792972052cfc16f98b0e99 Mon Sep 17 00:00:00 2001 From: J-Loudet Date: Sun, 7 Jul 2024 10:40:31 +0200 Subject: [PATCH 524/598] fix(storage-manager): do not start when 'timestamping' is disabled (#1219) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All storage must have a timestamp associated with a Sample. As it is possible to publish without adding a timestamp, it means that a Zenoh node must add this timestamp "at some point". Up until now, the default configuration of a router ('timestamping' enabled) combined with the fact that only routers could load plugins (and, thus, storage) made it so that a timestamp was (by default) always added. Recent changes in Zenoh — namely the fact that not only routers can load plugins and that peers and client have, by default, the 'timestamping' configuration disabled — invalidate these assumptions. We should then enforce at runtime, that the 'timestamping' configuration is enabled when attempting to load the storage manager. This commit adds this check by verifying that there is an HLC associated with the Zenoh Session — the HLC is only created if 'timestamping' is enabled (see `zenoh/zenoh/src/net/runtime/mod.rs::142`). * plugins/zenoh-plugin-storage-manager/src/lib.rs: return an error if the storage manager is started while the configuration option 'timestamping' is disabled. * plugins/zenoh-plugin-storage-manager/tests/operations.rs: updated the `config` used in the test to enable 'timestamping'. * plugins/zenoh-plugin-storage-manager/tests/wildcard.rs: updated the `config` used in the test to enable 'timestamping'. Signed-off-by: Julien Loudet --- plugins/zenoh-plugin-storage-manager/src/lib.rs | 17 +++++++++++++++++ .../tests/operations.rs | 12 ++++++++++++ .../tests/wildcard.rs | 12 ++++++++++++ 3 files changed, 41 insertions(+) diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index c916b649d9..7399d3e507 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -31,6 +31,7 @@ use memory_backend::MemoryBackend; use storages_mgt::StorageMessage; use zenoh::{ internal::{ + bail, plugins::{Response, RunningPlugin, RunningPluginTrait, ZenohPlugin}, runtime::Runtime, zlock, LibLoader, @@ -120,6 +121,22 @@ impl StorageRuntimeInner { let session = Arc::new(zenoh::session::init(runtime.clone()).wait()?); + // NOTE: All storage **must** have a timestamp associated with a Sample. Considering that it is possible to make + // a publication without associating a timestamp, that means that the node managing the storage (be it a + // Zenoh client / peer / router) has to add it. + // + // If the `timestamping` configuration setting is disabled then there is no HLC associated with the + // Session. That eventually means that no timestamp can be generated which goes against the previous + // requirement. + // + // Hence, in that scenario, we refuse to start the storage manager and any storage. + if session.hlc().is_none() { + tracing::error!( + "Cannot start storage manager (and thus any storage) without the 'timestamping' setting enabled in the Zenoh configuration" + ); + bail!("Cannot start storage manager, 'timestamping' is disabled in the configuration"); + } + // After this moment result should be only Ok. Failure of loading of one voulme or storage should not affect others. let mut new_self = StorageRuntimeInner { diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index c1ed09b1a7..d8ada83e4c 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -70,6 +70,18 @@ async fn test_updates_in_order() { }"#, ) .unwrap(); + config + .insert_json5( + "timestamping", + r#"{ + enabled: { + router: true, + peer: true, + client: true + } + }"#, + ) + .unwrap(); let runtime = zenoh::internal::runtime::RuntimeBuilder::new(config) .build() diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index d6e94ecb1f..d1633a28d4 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -71,6 +71,18 @@ async fn test_wild_card_in_order() { }"#, ) .unwrap(); + config + .insert_json5( + "timestamping", + r#"{ + enabled: { + router: true, + peer: true, + client: true + } + }"#, + ) + .unwrap(); let runtime = zenoh::internal::runtime::RuntimeBuilder::new(config) .build() From 12b11ee6f2f1a9390dccfeb3633be2f88b568a50 Mon Sep 17 00:00:00 2001 From: J-Loudet Date: Mon, 8 Jul 2024 15:08:09 +0200 Subject: [PATCH 525/598] fix(storage-manager): validate presence of timestamp (#1229) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit introduces checks before accessing the `timestamp` associated with a Sample — instead of calling `unwrap()`. In theory, a Sample should never arrive to a Storage without a Timestamp. In practice, we cannot guarantee this invariant with certainty (future modifications of the code base?). With these checks, the Storage will simply discard the Sample instead of panicking the entire storage manager. * plugins/zenoh-plugin-storage-manager/src/replica/storage.rs: add checks when accessing the timestamp and remove `unwrap`. Signed-off-by: Julien Loudet --- .../src/replica/storage.rs | 41 +++++++++++++------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 17be005f08..d12b51042c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -271,6 +271,17 @@ impl StorageService { // the trimming during PUT and GET should be handled by the plugin async fn process_sample(&self, sample: Sample) { tracing::trace!("[STORAGE] Processing sample: {:?}", sample); + + // A Sample, in theory, will not arrive to a Storage without a Timestamp. This check (which, again, should + // never enter the `None` branch) ensures that the Storage Manager does not panic even if it ever happens. + let sample_timestamp = match sample.timestamp() { + Some(timestamp) => timestamp, + None => { + tracing::error!("Discarding Sample that has no Timestamp: {:?}", sample); + return; + } + }; + // if wildcard, update wildcard_updates if sample.key_expr().is_wild() { self.register_wildcard_update(sample.clone()).await; @@ -288,12 +299,10 @@ impl StorageService { ); for k in matching_keys { - if !self - .is_deleted(&k.clone(), sample.timestamp().unwrap()) - .await + if !self.is_deleted(&k.clone(), sample_timestamp).await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) - && self.is_latest(&k, sample.timestamp().unwrap()).await)) + && self.is_latest(&k, sample_timestamp).await)) { tracing::trace!( "Sample `{:?}` identified as needed processing for key {}", @@ -302,9 +311,8 @@ impl StorageService { ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store: Sample = if let Some(update) = self - .ovderriding_wild_update(&k, sample.timestamp().unwrap()) - .await + let sample_to_store: Sample = if let Some(update) = + self.ovderriding_wild_update(&k, sample_timestamp).await { match update.kind { SampleKind::Put => { @@ -323,6 +331,16 @@ impl StorageService { .into() }; + // A Sample that is to be stored **must** have a Timestamp. In theory, the Sample generated should have + // a Timestamp and, in theory, this check is unneeded. + let sample_to_store_timestamp = match sample_to_store.timestamp() { + Some(timestamp) => *timestamp, + None => { + tracing::error!("Discarding `Sample` generated through `SampleBuilder` that has no Timestamp: {:?}", sample_to_store); + continue; + } + }; + let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { Ok(stripped) => stripped, Err(e) => { @@ -340,16 +358,15 @@ impl StorageService { sample_to_store.payload().clone(), sample_to_store.encoding().clone(), ), - *sample_to_store.timestamp().unwrap(), + sample_to_store_timestamp, ) .await } SampleKind::Delete => { // register a tombstone - self.mark_tombstone(&k, *sample_to_store.timestamp().unwrap()) - .await; + self.mark_tombstone(&k, sample_to_store_timestamp).await; storage - .delete(stripped_key, *sample_to_store.timestamp().unwrap()) + .delete(stripped_key, sample_to_store_timestamp) .await } }; @@ -363,7 +380,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), *sample_to_store.timestamp().unwrap())); + .send((k.clone(), sample_to_store_timestamp)); match sending { Ok(_) => (), Err(e) => { From 9e1c4a8680a04b34f09dd2776d07958c8aae6f92 Mon Sep 17 00:00:00 2001 From: Diogo Matsubara Date: Mon, 8 Jul 2024 17:55:56 +0200 Subject: [PATCH 526/598] fix: Update zenoh-macros category (#1232) --- commons/zenoh-macros/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-macros/Cargo.toml b/commons/zenoh-macros/Cargo.toml index 7d06482e48..6adfe63deb 100644 --- a/commons/zenoh-macros/Cargo.toml +++ b/commons/zenoh-macros/Cargo.toml @@ -20,7 +20,7 @@ homepage = { workspace = true } authors = { workspace = true } edition = { workspace = true } license = { workspace = true } -categories = ["proc-macros"] +categories = ["development-tools::procedural-macro-helpers"] description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html From f6415057f375e868b67cdbb0330281ce1f47a6af Mon Sep 17 00:00:00 2001 From: Alexander Date: Fri, 12 Jul 2024 08:42:16 +0200 Subject: [PATCH 527/598] Fix downsampling with wildcards (#1240) --- zenoh/src/net/routing/interceptor/downsampling.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/zenoh/src/net/routing/interceptor/downsampling.rs b/zenoh/src/net/routing/interceptor/downsampling.rs index 1b31040d3c..c8881341e0 100644 --- a/zenoh/src/net/routing/interceptor/downsampling.rs +++ b/zenoh/src/net/routing/interceptor/downsampling.rs @@ -126,11 +126,12 @@ pub(crate) struct DownsamplingInterceptor { impl InterceptorTrait for DownsamplingInterceptor { fn compute_keyexpr_cache(&self, key_expr: &KeyExpr<'_>) -> Option> { let ke_id = zlock!(self.ke_id); - if let Some(id) = ke_id.weight_at(&key_expr.clone()) { - Some(Box::new(Some(*id))) - } else { - Some(Box::new(None::)) + if let Some(node) = ke_id.intersecting_keys(key_expr).next() { + if let Some(id) = ke_id.weight_at(&node) { + return Some(Box::new(Some(*id))); + } } + Some(Box::new(None::)) } fn intercept( @@ -188,6 +189,11 @@ impl DownsamplingInterceptor { latest_message_timestamp, }, ); + tracing::debug!( + "New downsampler rule enabled: key_expr={:?}, threshold={:?}", + rule.key_expr, + threshold + ); } Self { ke_id: Arc::new(Mutex::new(ke_id)), From 01d67def09fb4d384e4fbccaa4fe899e4315abce Mon Sep 17 00:00:00 2001 From: Julien Enoch Date: Fri, 12 Jul 2024 11:19:02 +0200 Subject: [PATCH 528/598] fix(zenoh-ext): update error log about timestamping (#1237) --- zenoh-ext/src/publication_cache.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 09a21f2e16..07d993e815 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -143,8 +143,8 @@ impl<'a> PublicationCache<'a> { if conf.session.hlc().is_none() { bail!( "Failed requirement for PublicationCache on {}: \ - the Session is not configured with 'add_timestamp=true'", - key_expr + the 'timestamping' setting must be enabled in the Zenoh configuration", + key_expr, ) } From afa1bdc8dbefb53162cbce08b276332e88425833 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Jul 2024 09:30:31 +0200 Subject: [PATCH 529/598] Mark replier_id accessor as unstable (#1226) * replier_id is marked as unstable * Fix test * Fix test --- zenoh/src/api/query.rs | 9 +++++++-- zenoh/src/api/session.rs | 8 +++++++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 8cf62344f2..f1807333c7 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -18,13 +18,16 @@ use std::{ time::Duration, }; +#[cfg(feature = "unstable")] use zenoh_config::ZenohId; use zenoh_core::{Resolvable, Wait}; use zenoh_keyexpr::OwnedKeyExpr; -use zenoh_protocol::core::{CongestionControl, Parameters, ZenohIdProto}; +#[cfg(feature = "unstable")] +use zenoh_protocol::core::ZenohIdProto; +use zenoh_protocol::core::{CongestionControl, Parameters}; use zenoh_result::ZResult; -#[zenoh_macros::unstable] +#[cfg(feature = "unstable")] use super::{ builders::sample::SampleBuilderTrait, bytes::OptionZBytes, sample::SourceInfo, selector::ZenohParameters, @@ -118,6 +121,7 @@ impl From for ReplyError { #[derive(Clone, Debug)] pub struct Reply { pub(crate) result: Result, + #[cfg(feature = "unstable")] pub(crate) replier_id: Option, } @@ -137,6 +141,7 @@ impl Reply { self.result } + #[zenoh_macros::unstable] /// Gets the id of the zenoh instance that answered this Reply. pub fn replier_id(&self) -> Option { self.replier_id.map(Into::into) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index f5890edc3a..8a5d9e746e 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -1760,6 +1760,7 @@ impl Session { self.task_controller .spawn_with_rt(zenoh_runtime::ZRuntime::Net, { let state = self.state.clone(); + #[cfg(feature = "unstable")] let zid = self.runtime.zid(); async move { tokio::select! { @@ -1775,6 +1776,7 @@ impl Session { } (query.callback)(Reply { result: Err(Value::new("Timeout", Encoding::ZENOH_STRING).into()), + #[cfg(feature = "unstable")] replier_id: Some(zid.into()), }); } @@ -1874,6 +1876,7 @@ impl Session { tracing::debug!("Timeout on liveliness query {}! Send error and close.", id); (query.callback)(Reply { result: Err(Value::new("Timeout", Encoding::ZENOH_STRING).into()), + #[cfg(feature = "unstable")] replier_id: Some(zid.into()), }); } @@ -2238,6 +2241,7 @@ impl Primitives for Session { #[cfg(feature = "unstable")] attachment: None, }), + #[cfg(feature = "unstable")] replier_id: None, }; @@ -2404,8 +2408,9 @@ impl Primitives for Session { encoding: e.encoding.into(), }; let new_reply = Reply { - replier_id: e.ext_sinfo.map(|info| info.id.zid), result: Err(value.into()), + #[cfg(feature = "unstable")] + replier_id: e.ext_sinfo.map(|info| info.id.zid), }; callback(new_reply); } @@ -2487,6 +2492,7 @@ impl Primitives for Session { let sample = info.into_sample(key_expr.into_owned(), payload, attachment); let new_reply = Reply { result: Ok(sample), + #[cfg(feature = "unstable")] replier_id: None, }; let callback = From 952cef8f1583a1a75f586f00e76ca10c3671ddbb Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Tue, 16 Jul 2024 09:47:14 +0200 Subject: [PATCH 530/598] Make `rustls`, `rustls-webpki` and `webpki-roots` optional (#1238) --- io/zenoh-link-commons/Cargo.toml | 5 +++-- io/zenoh-link-commons/src/lib.rs | 1 + io/zenoh-links/zenoh-link-quic/Cargo.toml | 2 +- io/zenoh-links/zenoh-link-tls/Cargo.toml | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/io/zenoh-link-commons/Cargo.toml b/io/zenoh-link-commons/Cargo.toml index 12b70cad6d..7ec7c533d7 100644 --- a/io/zenoh-link-commons/Cargo.toml +++ b/io/zenoh-link-commons/Cargo.toml @@ -26,14 +26,15 @@ version = { workspace = true } [features] compression = [] +tls = ["dep:rustls", "dep:rustls-webpki", "dep:webpki-roots"] [dependencies] async-trait = { workspace = true } base64 = { workspace = true, optional = true } flume = { workspace = true } futures = { workspace = true } -rustls = { workspace = true } -rustls-webpki = { workspace = true } +rustls = { workspace = true, optional = true } +rustls-webpki = { workspace = true, optional = true } serde = { workspace = true, features = ["default"] } tokio = { workspace = true, features = [ "fs", diff --git a/io/zenoh-link-commons/src/lib.rs b/io/zenoh-link-commons/src/lib.rs index 56d99806a2..46c0968f3f 100644 --- a/io/zenoh-link-commons/src/lib.rs +++ b/io/zenoh-link-commons/src/lib.rs @@ -21,6 +21,7 @@ extern crate alloc; mod listener; mod multicast; +#[cfg(feature = "tls")] pub mod tls; mod unicast; diff --git a/io/zenoh-links/zenoh-link-quic/Cargo.toml b/io/zenoh-links/zenoh-link-quic/Cargo.toml index 1af2a253b8..ff634d9d15 100644 --- a/io/zenoh-links/zenoh-link-quic/Cargo.toml +++ b/io/zenoh-links/zenoh-link-quic/Cargo.toml @@ -49,7 +49,7 @@ x509-parser = { workspace = true } zenoh-collections = { workspace = true } zenoh-config = { workspace = true } zenoh-core = { workspace = true } -zenoh-link-commons = { workspace = true } +zenoh-link-commons = { workspace = true, features = ["tls"] } zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } zenoh-runtime = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-tls/Cargo.toml b/io/zenoh-links/zenoh-link-tls/Cargo.toml index a716c72c99..3bd357d1e4 100644 --- a/io/zenoh-links/zenoh-link-tls/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tls/Cargo.toml @@ -43,7 +43,7 @@ webpki-roots = { workspace = true } zenoh-collections = { workspace = true } zenoh-config = { workspace = true } zenoh-core = { workspace = true } -zenoh-link-commons = { workspace = true } +zenoh-link-commons = { workspace = true, features = ["tls"] } zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } zenoh-runtime = { workspace = true } From efffcb3a2862ba4c2ba6fcaf9c68a6d86bfcc960 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 16 Jul 2024 09:50:21 +0200 Subject: [PATCH 531/598] Fix bug in routers replying to peers liveliness queries (#1233) * Fix bug in routers replying to peers livaliness queries * Add liveliness checks to routing two_node_combination and three_node_combination tests * Fix stable build * Change delays * Remove put and get delays * Add liveliness sub checks to routing two_node_combination and three_node_combination tests --- zenoh/src/net/routing/hat/router/token.rs | 5 +- zenoh/tests/routing.rs | 243 +++++++++++++++++++--- 2 files changed, 220 insertions(+), 28 deletions(-) diff --git a/zenoh/src/net/routing/hat/router/token.rs b/zenoh/src/net/routing/hat/router/token.rs index c167c8df15..06d3a4b14f 100644 --- a/zenoh/src/net/routing/hat/router/token.rs +++ b/zenoh/src/net/routing/hat/router/token.rs @@ -948,7 +948,10 @@ pub(crate) fn declare_token_interest( aggregate: bool, send_declare: &mut SendDeclare, ) { - if mode.current() && face.whatami == WhatAmI::Client { + if mode.current() + && (face.whatami == WhatAmI::Client + || (face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer))) + { let interest_id = (!mode.future()).then_some(id); if let Some(res) = res.as_ref() { if aggregate { diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index fd680ae545..f65c939533 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -20,7 +20,7 @@ use std::{ time::Duration, }; -use tokio_util::{sync::CancellationToken, task::TaskTracker}; +use tokio_util::sync::CancellationToken; use zenoh::{ config::{ModeDependentValue, WhatAmI, WhatAmIMatcher}, prelude::*, @@ -32,9 +32,8 @@ use zenoh_result::bail; const TIMEOUT: Duration = Duration::from_secs(10); const MSG_COUNT: usize = 50; -const MSG_SIZE: [usize; 2] = [1_024, 131_072]; -// Maximal recipes to run at once -const PARALLEL_RECIPES: usize = 4; +#[cfg(feature = "unstable")] +const LIVELINESSGET_DELAY: Duration = Duration::from_millis(10); #[derive(Debug, Clone, PartialEq, Eq)] enum Task { @@ -42,6 +41,14 @@ enum Task { Sub(String, usize), Queryable(String, usize), Get(String, usize), + #[cfg(feature = "unstable")] + Liveliness(String), + #[cfg(feature = "unstable")] + LivelinessGet(String), + #[cfg(feature = "unstable")] + LivelinessLoop(String), + #[cfg(feature = "unstable")] + LivelinessSub(String), Sleep(Duration), Wait, Checkpoint, @@ -99,6 +106,22 @@ impl Task { println!("Pub task done."); } + // The Queryable task keeps replying to requested messages until all checkpoints are finished. + Self::Queryable(ke, payload_size) => { + let queryable = ztimeout!(session.declare_queryable(ke))?; + let payload = vec![0u8; *payload_size]; + + loop { + tokio::select! { + _ = token.cancelled() => break, + query = queryable.recv_async() => { + ztimeout!(query?.reply(ke.to_owned(), payload.clone()))?; + }, + } + } + println!("Queryable task done."); + } + // The Get task gets and checks if the incoming message matches the expected size until it receives enough counts. Self::Get(ke, expected_size) => { let mut counter = 0; @@ -133,20 +156,92 @@ impl Task { println!("Get got sufficient amount of messages. Done."); } - // The Queryable task keeps replying to requested messages until all checkpoints are finished. - Self::Queryable(ke, payload_size) => { - let queryable = ztimeout!(session.declare_queryable(ke))?; - let payload = vec![0u8; *payload_size]; + #[cfg(feature = "unstable")] + // The Liveliness task. + Self::Liveliness(ke) => { + let _liveliness = ztimeout!(session.liveliness().declare_token(ke))?; + + token.cancelled().await; + println!("Liveliness task done."); + } + + #[cfg(feature = "unstable")] + // The LivelinessGet task. + Self::LivelinessGet(ke) => { + let mut counter = 0; + while counter < MSG_COUNT { + tokio::select! { + _ = token.cancelled() => break, + replies = async { session.liveliness().get(ke).timeout(Duration::from_secs(10)).await } => { + let replies = replies?; + while let Ok(reply) = replies.recv_async().await { + if let Err(err) = reply.result() { + tracing::warn!( + "Sample got from {} failed to unwrap! Error: {:?}.", + ke, + err + ); + continue; + } + counter += 1; + } + tokio::time::sleep(LIVELINESSGET_DELAY).await; + } + } + } + println!("LivelinessGet got sufficient amount of messages. Done."); + } + + // The LivelinessLoop task. + #[cfg(feature = "unstable")] + Self::LivelinessLoop(ke) => { + let mut liveliness: Option = None; + loop { + match liveliness.take() { + Some(liveliness) => { + tokio::select! { + _ = token.cancelled() => break, + res = tokio::time::timeout(std::time::Duration::from_secs(1), async {liveliness.undeclare().await}) => { + _ = res?; + } + } + } + None => { + tokio::select! { + _ = token.cancelled() => break, + res = tokio::time::timeout(std::time::Duration::from_secs(1), async {session.liveliness().declare_token(ke) + .await + }) => { + liveliness = res?.ok(); + } + } + } + } + } + println!("LivelinessLoop task done."); + } + + #[cfg(feature = "unstable")] + // The LivelinessSub task. + Self::LivelinessSub(ke) => { + let sub = ztimeout!(session.liveliness().declare_subscriber(ke))?; + let mut counter = 0; loop { tokio::select! { _ = token.cancelled() => break, - query = queryable.recv_async() => { - ztimeout!(query?.reply(ke.to_owned(), payload.clone()))?; - }, + res = sub.recv_async() => { + if res.is_ok() { + counter += 1; + if counter >= MSG_COUNT { + println!("LivelinessSub received sufficient amount of messages. Done."); + break; + } + } + } } } - println!("Queryable task done."); + println!("LivelinessSub task done."); } // Make the zenoh session sleep for a while. @@ -488,12 +583,21 @@ async fn static_failover_brokering() -> Result<()> { Result::Ok(()) } +#[cfg(feature = "unstable")] +use tokio_util::task::TaskTracker; +#[cfg(feature = "unstable")] +const MSG_SIZE: [usize; 2] = [1_024, 131_072]; +// Maximal recipes to run at once +#[cfg(feature = "unstable")] +const PARALLEL_RECIPES: usize = 4; + // All test cases varying in // 1. Message size: 2 (sizes) // 2. Mode: {Client, Peer} x {Client x Peer} x {Router} = 2 x 2 x 1 = 4 (cases) // 3. Spawning order (delay_in_secs for node1, node2, and node3) = 6 (cases) // // Total cases = 2 x 4 x 6 = 48 +#[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 9)] async fn three_node_combination() -> Result<()> { zenoh::try_init_log_from_env(); @@ -524,6 +628,10 @@ async fn three_node_combination() -> Result<()> { let ke_pubsub = format!("three_node_combination_keyexpr_pubsub_{idx}"); let ke_getqueryable = format!("three_node_combination_keyexpr_getqueryable_{idx}"); + let ke_getliveliness = + format!("three_node_combination_keyexpr_getliveliness_{idx}"); + let ke_subliveliness = + format!("three_node_combination_keyexpr_subliveliness_{idx}"); use rand::Rng; let mut rng = rand::thread_rng(); @@ -538,7 +646,7 @@ async fn three_node_combination() -> Result<()> { ..Default::default() }; - let (pub_node, queryable_node) = { + let (pub_node, queryable_node, liveliness_node, livelinessloop_node) = { let base = Node { mode: node1_mode, connect: vec![locator.clone()], @@ -554,7 +662,7 @@ async fn three_node_combination() -> Result<()> { )])]); pub_node.warmup += Duration::from_millis(rng.gen_range(0..500)); - let mut queryable_node = base; + let mut queryable_node = base.clone(); queryable_node.name = format!("Queryable {node1_mode}"); queryable_node.con_task = ConcurrentTask::from([SequentialTask::from([Task::Queryable( @@ -563,10 +671,31 @@ async fn three_node_combination() -> Result<()> { )])]); queryable_node.warmup += Duration::from_millis(rng.gen_range(0..500)); - (pub_node, queryable_node) + let mut liveliness_node = base.clone(); + liveliness_node.name = format!("Liveliness {node1_mode}"); + liveliness_node.con_task = + ConcurrentTask::from([SequentialTask::from([Task::Liveliness( + ke_getliveliness.clone(), + )])]); + liveliness_node.warmup += Duration::from_millis(rng.gen_range(0..500)); + + let mut livelinessloop_node = base; + livelinessloop_node.name = format!("LivelinessLoop {node1_mode}"); + livelinessloop_node.con_task = + ConcurrentTask::from([SequentialTask::from([Task::LivelinessLoop( + ke_subliveliness.clone(), + )])]); + livelinessloop_node.warmup += Duration::from_millis(rng.gen_range(0..500)); + + ( + pub_node, + queryable_node, + liveliness_node, + livelinessloop_node, + ) }; - let (sub_node, get_node) = { + let (sub_node, get_node, livelinessget_node, livelinesssub_node) = { let base = Node { mode: node2_mode, connect: vec![locator], @@ -582,7 +711,7 @@ async fn three_node_combination() -> Result<()> { ])]); sub_node.warmup += Duration::from_millis(rng.gen_range(0..500)); - let mut get_node = base; + let mut get_node = base.clone(); get_node.name = format!("Get {node2_mode}"); get_node.con_task = ConcurrentTask::from([SequentialTask::from([ Task::Get(ke_getqueryable, msg_size), @@ -590,12 +719,30 @@ async fn three_node_combination() -> Result<()> { ])]); get_node.warmup += Duration::from_millis(rng.gen_range(0..500)); - (sub_node, get_node) + let mut livelinessget_node = base.clone(); + livelinessget_node.name = format!("LivelinessGet {node2_mode}"); + livelinessget_node.con_task = ConcurrentTask::from([SequentialTask::from([ + Task::LivelinessGet(ke_getliveliness), + Task::Checkpoint, + ])]); + livelinessget_node.warmup += Duration::from_millis(rng.gen_range(0..500)); + + let mut livelinesssub_node = base; + livelinesssub_node.name = format!("LivelinessSub {node2_mode}"); + livelinesssub_node.con_task = ConcurrentTask::from([SequentialTask::from([ + Task::LivelinessSub(ke_subliveliness), + Task::Checkpoint, + ])]); + livelinesssub_node.warmup += Duration::from_millis(rng.gen_range(0..500)); + + (sub_node, get_node, livelinessget_node, livelinesssub_node) }; ( Recipe::new([router_node.clone(), pub_node, sub_node]), - Recipe::new([router_node, queryable_node, get_node]), + Recipe::new([router_node.clone(), queryable_node, get_node]), + Recipe::new([router_node.clone(), liveliness_node, livelinessget_node]), + Recipe::new([router_node, livelinessloop_node, livelinesssub_node]), ) }, ) @@ -603,10 +750,12 @@ async fn three_node_combination() -> Result<()> { for chunks in recipe_list.chunks(4).map(|x| x.to_vec()) { let mut join_set = tokio::task::JoinSet::new(); - for (pubsub, getqueryable) in chunks { + for (pubsub, getqueryable, getliveliness, subliveliness) in chunks { join_set.spawn(async move { pubsub.run().await?; getqueryable.run().await?; + getliveliness.run().await?; + subliveliness.run().await?; Result::Ok(()) }); } @@ -625,6 +774,7 @@ async fn three_node_combination() -> Result<()> { // 2. Mode: {Client, Peer} x {Client, Peer} x {IsFirstListen} = 2 x 2 x 2 = 8 (modes) // // Total cases = 2 x 8 = 16 +#[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 8)] async fn two_node_combination() -> Result<()> { zenoh::try_init_log_from_env(); @@ -649,6 +799,8 @@ async fn two_node_combination() -> Result<()> { idx += 1; let ke_pubsub = format!("two_node_combination_keyexpr_pubsub_{idx}"); let ke_getqueryable = format!("two_node_combination_keyexpr_getqueryable_{idx}"); + let ke_subliveliness = format!("two_node_combination_keyexpr_subliveliness_{idx}"); + let ke_getliveliness = format!("two_node_combination_keyexpr_getliveliness_{idx}"); let (node1_listen_connect, node2_listen_connect) = { let locator = format!("tcp/127.0.0.1:{}", base_port + idx); @@ -662,7 +814,7 @@ async fn two_node_combination() -> Result<()> { } }; - let (pub_node, queryable_node) = { + let (pub_node, queryable_node, liveliness_node, livelinessloop_node) = { let base = Node { mode: node1_mode, listen: node1_listen_connect.0, @@ -677,7 +829,7 @@ async fn two_node_combination() -> Result<()> { msg_size, )])]); - let mut queryable_node = base; + let mut queryable_node = base.clone(); queryable_node.name = format!("Queryable {node1_mode}"); queryable_node.con_task = ConcurrentTask::from([SequentialTask::from([Task::Queryable( @@ -685,10 +837,29 @@ async fn two_node_combination() -> Result<()> { msg_size, )])]); - (pub_node, queryable_node) + let mut liveliness_node = base.clone(); + liveliness_node.name = format!("Liveliness {node1_mode}"); + liveliness_node.con_task = + ConcurrentTask::from([SequentialTask::from([Task::Liveliness( + ke_getliveliness.clone(), + )])]); + + let mut livelinessloop_node = base; + livelinessloop_node.name = format!("LivelinessLoop {node1_mode}"); + livelinessloop_node.con_task = + ConcurrentTask::from([SequentialTask::from([Task::LivelinessLoop( + ke_subliveliness.clone(), + )])]); + + ( + pub_node, + queryable_node, + liveliness_node, + livelinessloop_node, + ) }; - let (sub_node, get_node) = { + let (sub_node, get_node, livelinessget_node, livelinesssub_node) = { let base = Node { mode: node2_mode, listen: node2_listen_connect.0, @@ -703,29 +874,47 @@ async fn two_node_combination() -> Result<()> { Task::Checkpoint, ])]); - let mut get_node = base; + let mut get_node = base.clone(); get_node.name = format!("Get {node2_mode}"); get_node.con_task = ConcurrentTask::from([SequentialTask::from([ Task::Get(ke_getqueryable, msg_size), Task::Checkpoint, ])]); - (sub_node, get_node) + let mut livelinessget_node = base.clone(); + livelinessget_node.name = format!("LivelinessGet {node2_mode}"); + livelinessget_node.con_task = ConcurrentTask::from([SequentialTask::from([ + Task::LivelinessGet(ke_getliveliness), + Task::Checkpoint, + ])]); + + let mut livelinesssub_node = base; + livelinesssub_node.name = format!("LivelinessSub {node2_mode}"); + livelinesssub_node.con_task = ConcurrentTask::from([SequentialTask::from([ + Task::LivelinessSub(ke_subliveliness), + Task::Checkpoint, + ])]); + + (sub_node, get_node, livelinessget_node, livelinesssub_node) }; ( Recipe::new([pub_node, sub_node]), Recipe::new([queryable_node, get_node]), + Recipe::new([liveliness_node, livelinessget_node]), + Recipe::new([livelinessloop_node, livelinesssub_node]), ) }) .collect(); for chunks in recipe_list.chunks(PARALLEL_RECIPES).map(|x| x.to_vec()) { let task_tracker = TaskTracker::new(); - for (pubsub, getqueryable) in chunks { + for (pubsub, getqueryable, getlivelienss, subliveliness) in chunks { task_tracker.spawn(async move { pubsub.run().await?; getqueryable.run().await?; + getlivelienss.run().await?; + subliveliness.run().await?; Result::Ok(()) }); } From 44b1f2eefae48b099df051d289d4c84cc06e7c8a Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Tue, 16 Jul 2024 15:24:51 +0200 Subject: [PATCH 532/598] fix: fix keyexpr canonization unsafe behavior (#1191) * fix: fix keyexpr canonization unsafe behavior * fix: fix keyexpr canonization * Retrigger CI * refactor: use safe version of unimportant unsafe code --- commons/zenoh-keyexpr/src/key_expr/canon.rs | 172 ++++++++++---------- commons/zenoh-keyexpr/src/key_expr/utils.rs | 19 --- 2 files changed, 84 insertions(+), 107 deletions(-) diff --git a/commons/zenoh-keyexpr/src/key_expr/canon.rs b/commons/zenoh-keyexpr/src/key_expr/canon.rs index 7080dbde1a..8187467004 100644 --- a/commons/zenoh-keyexpr/src/key_expr/canon.rs +++ b/commons/zenoh-keyexpr/src/key_expr/canon.rs @@ -12,114 +12,102 @@ // ZettaScale Zenoh Team, // use alloc::string::String; -use core::{slice, str}; - -use crate::key_expr::{ - utils::{Split, Writer}, - DELIMITER, DOUBLE_WILD, SINGLE_WILD, -}; pub trait Canonize { fn canonize(&mut self); } -const DOLLAR_STAR: &[u8; 2] = b"$*"; - -impl Canonize for &mut str { - fn canonize(&mut self) { - let mut writer = Writer { - ptr: self.as_mut_ptr(), - len: 0, - }; - if let Some(position) = self.find("$*$*") { - writer.len = position; - let mut need_final_write = true; - for between_dollarstar in self.as_bytes()[(position + 4)..].splitter(DOLLAR_STAR) { - need_final_write = between_dollarstar.is_empty(); - if !need_final_write { - writer.write(DOLLAR_STAR.as_ref()); - writer.write(between_dollarstar); - } - } - if need_final_write { - writer.write(DOLLAR_STAR.as_ref()) +// Return the length of the canonized string +fn canonize(bytes: &mut [u8]) -> usize { + let mut index = 0; + let mut written = 0; + let mut double_wild = false; + loop { + match &bytes[index..] { + [b'*', b'*'] => { + bytes[written..written + 2].copy_from_slice(b"**"); + written += 2; + return written; } - *self = unsafe { - str::from_utf8_unchecked_mut(slice::from_raw_parts_mut(writer.ptr, writer.len)) + [b'*', b'*', b'/', ..] => { + double_wild = true; + index += 3; } - } - writer.len = 0; - let mut ke = self.as_bytes().splitter(&b'/'); - let mut in_big_wild = false; - - for chunk in ke.by_ref() { - if chunk.is_empty() { - break; - } - if in_big_wild { - match chunk { - [SINGLE_WILD] | b"$*" => { - writer.write_byte(b'*'); - break; - } - DOUBLE_WILD => continue, - _ => { - writer.write(b"**/"); - writer.write(chunk); - in_big_wild = false; - break; + [b'*', r @ ..] | [b'$', b'*', r @ ..] if r.is_empty() || r.starts_with(b"/") => { + let (end, len) = (!r.starts_with(b"/"), r.len()); + bytes[written] = b'*'; + written += 1; + if end { + if double_wild { + bytes[written..written + 3].copy_from_slice(b"/**"); + written += 3; } + return written; } - } else if chunk == DOUBLE_WILD { - in_big_wild = true; - continue; - } else { - writer.write(if chunk == b"$*" { b"*" } else { chunk }); - break; + bytes[written] = b'/'; + written += 1; + index = bytes.len() - len + 1; } - } - for chunk in ke { - if chunk.is_empty() { - writer.write_byte(b'/'); - continue; + // Handle chunks with only repeated "$*" + [b'$', b'*', b'$', b'*', ..] => { + index += 2; } - if in_big_wild { - match chunk { - [SINGLE_WILD] | b"$*" => { - writer.write(b"/*"); - } - DOUBLE_WILD => {} - _ => { - writer.write(b"/**/"); - writer.write(chunk); - in_big_wild = false; + _ => { + if double_wild && &bytes[index..] != b"**" { + bytes[written..written + 3].copy_from_slice(b"**/"); + written += 3; + double_wild = false; + } + let mut write_start = index; + loop { + match bytes.get(index) { + Some(b'/') => { + index += 1; + bytes.copy_within(write_start..index, written); + written += index - write_start; + break; + } + Some(b'$') if matches!(bytes.get(index + 1..index + 4), Some(b"*$*")) => { + index += 2; + bytes.copy_within(write_start..index, written); + written += index - write_start; + let skip = bytes[index + 4..] + .windows(2) + .take_while(|s| s == b"$*") + .count(); + index += (1 + skip) * 2; + write_start = index; + } + Some(_) => index += 1, + None => { + bytes.copy_within(write_start..index, written); + written += index - write_start; + return written; + } } } - } else if chunk == DOUBLE_WILD { - in_big_wild = true; - } else { - writer.write_byte(DELIMITER); - writer.write(if chunk == b"$*" { b"*" } else { chunk }); - } - } - if in_big_wild { - if writer.len != 0 { - writer.write_byte(DELIMITER); } - writer.write(DOUBLE_WILD) - } - *self = unsafe { - str::from_utf8_unchecked_mut(slice::from_raw_parts_mut(writer.ptr, writer.len)) } } } +impl Canonize for &mut str { + fn canonize(&mut self) { + // SAFETY: canonize leave an UTF8 string within the returned length, + // and remaining garbage bytes are zeroed + let bytes = unsafe { self.as_bytes_mut() }; + let length = canonize(bytes); + bytes[length..].fill(b'\0'); + } +} + impl Canonize for String { fn canonize(&mut self) { - let mut s = self.as_mut(); - s.canonize(); - let len = s.len(); - self.truncate(len); + // SAFETY: canonize leave an UTF8 string within the returned length, + // and remaining garbage bytes are truncated + let bytes = unsafe { self.as_mut_vec() }; + let length = canonize(bytes); + bytes.truncate(length); } } @@ -150,6 +138,9 @@ fn canonizer() { let mut s = String::from("hello/**/**/bye"); s.canonize(); assert_eq!(s, "hello/**/bye"); + let mut s = String::from("hello/**/**"); + s.canonize(); + assert_eq!(s, "hello/**"); // Any $* chunk is replaced by a * chunk let mut s = String::from("hello/$*/bye"); @@ -172,4 +163,9 @@ fn canonizer() { let mut s = String::from("hello/**/*"); s.canonize(); assert_eq!(s, "hello/*/**"); + + // &mut str remaining part is zeroed + let mut s = String::from("$*$*$*/hello/$*$*/bye/$*$*"); + s.as_mut_str().canonize(); + assert_eq!(s, "*/hello/*/bye/*\0\0\0\0\0\0\0\0\0\0\0"); } diff --git a/commons/zenoh-keyexpr/src/key_expr/utils.rs b/commons/zenoh-keyexpr/src/key_expr/utils.rs index 628477174a..63f4b4c088 100644 --- a/commons/zenoh-keyexpr/src/key_expr/utils.rs +++ b/commons/zenoh-keyexpr/src/key_expr/utils.rs @@ -11,25 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use core::ptr; - -pub(crate) struct Writer { - pub ptr: *mut u8, - pub len: usize, -} - -impl Writer { - pub(crate) fn write(&mut self, slice: &[u8]) { - let len = slice.len(); - unsafe { ptr::copy(slice.as_ptr(), self.ptr.add(self.len), len) }; - self.len += len - } - pub(crate) fn write_byte(&mut self, byte: u8) { - unsafe { *self.ptr.add(self.len) = byte }; - self.len += 1 - } -} - #[derive(Debug)] pub struct Splitter<'a, S: ?Sized, D: ?Sized> { s: Option<&'a S>, From 866a6ad48784040213d9e6eb08df3ced3f65e4a3 Mon Sep 17 00:00:00 2001 From: eclipse-zenoh-bot Date: Tue, 16 Jul 2024 13:44:09 +0000 Subject: [PATCH 533/598] chore: Sync Rust toolchain --- Cargo.toml | 2 +- rust-toolchain.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ce8ae1643d..e7eef7248a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,7 +56,7 @@ members = [ exclude = ["ci/nostd-check", "ci/valgrind-check"] [workspace.package] -rust-version = "1.72.0" +rust-version = "1.75" version = "0.11.0-dev" # Zenoh version repository = "https://github.com/eclipse-zenoh/zenoh" homepage = "http://zenoh.io" diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 4dd8e5c567..a9aa3929e9 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.75.0" \ No newline at end of file +channel = "1.75" \ No newline at end of file From a97f6e54457fc57f11ccad262070d74cb34e4067 Mon Sep 17 00:00:00 2001 From: eclipse-zenoh-bot Date: Tue, 16 Jul 2024 13:46:06 +0000 Subject: [PATCH 534/598] chore: Sync Rust toolchain --- Cargo.toml | 2 +- rust-toolchain.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e7eef7248a..a3a370971b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,7 +56,7 @@ members = [ exclude = ["ci/nostd-check", "ci/valgrind-check"] [workspace.package] -rust-version = "1.75" +rust-version = "1.75.0" version = "0.11.0-dev" # Zenoh version repository = "https://github.com/eclipse-zenoh/zenoh" homepage = "http://zenoh.io" diff --git a/rust-toolchain.toml b/rust-toolchain.toml index a9aa3929e9..4dd8e5c567 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.75" \ No newline at end of file +channel = "1.75.0" \ No newline at end of file From 7f8b85471c2846e73fe7adc6f003e732b6a316c7 Mon Sep 17 00:00:00 2001 From: yellowhatter <104833606+yellowhatter@users.noreply.github.com> Date: Thu, 18 Jul 2024 15:09:47 +0300 Subject: [PATCH 535/598] Null value opts for shm (#1241) * adopt some SHM data structures to support NPO (needed as elegant way to equalize some data structure sizes in zenoh-c) * Update commons/zenoh-shm/src/api/provider/types.rs wyfo's addition Co-authored-by: Joseph Perez * Review fixes * fix examples * fiix CI * fix typo * review fix --------- Co-authored-by: Joseph Perez --- commons/zenoh-codec/src/core/shm.rs | 29 ++++++- commons/zenoh-shm/Cargo.toml | 2 +- .../posix/posix_shm_provider_backend.rs | 37 +++++---- .../posix/posix_shm_segment.rs | 6 +- commons/zenoh-shm/src/api/provider/chunk.rs | 6 +- .../src/api/provider/shm_provider.rs | 20 +++-- .../src/api/provider/shm_provider_backend.rs | 7 +- commons/zenoh-shm/src/api/provider/types.rs | 78 ++++++++++++------- commons/zenoh-shm/src/lib.rs | 15 ++-- examples/examples/z_alloc_shm.rs | 6 +- 10 files changed, 130 insertions(+), 76 deletions(-) diff --git a/commons/zenoh-codec/src/core/shm.rs b/commons/zenoh-codec/src/core/shm.rs index 4f272f0ed4..b67716611d 100644 --- a/commons/zenoh-codec/src/core/shm.rs +++ b/commons/zenoh-codec/src/core/shm.rs @@ -11,6 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::num::NonZeroUsize; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -62,6 +64,18 @@ where } } +impl WCodec for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: NonZeroUsize) -> Self::Output { + self.write(&mut *writer, x.get())?; + Ok(()) + } +} + impl WCodec<&ShmBufInfo, &mut W> for Zenoh080 where W: Writer, @@ -80,7 +94,7 @@ where self.write(&mut *writer, data_descriptor)?; self.write(&mut *writer, shm_protocol)?; - self.write(&mut *writer, data_len)?; + self.write(&mut *writer, *data_len)?; self.write(&mut *writer, watchdog_descriptor)?; self.write(&mut *writer, header_descriptor)?; self.write(&mut *writer, generation)?; @@ -138,6 +152,19 @@ where } } +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let size: usize = self.read(&mut *reader)?; + let size = NonZeroUsize::new(size).ok_or(DidntRead)?; + Ok(size) + } +} + impl RCodec for Zenoh080 where R: Reader, diff --git a/commons/zenoh-shm/Cargo.toml b/commons/zenoh-shm/Cargo.toml index a76cf896d3..5e3dec390e 100644 --- a/commons/zenoh-shm/Cargo.toml +++ b/commons/zenoh-shm/Cargo.toml @@ -52,4 +52,4 @@ lockfree = { workspace = true } stabby = { workspace = true } [dev-dependencies] -libc = { workspace = true } +libc = { workspace = true } \ No newline at end of file diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_provider_backend.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_provider_backend.rs index 7de9e9f22d..663379e034 100644 --- a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_provider_backend.rs +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_provider_backend.rs @@ -16,6 +16,7 @@ use std::{ borrow::Borrow, cmp, collections::BinaryHeap, + num::NonZeroUsize, sync::{ atomic::{AtomicPtr, AtomicUsize, Ordering}, Mutex, @@ -31,7 +32,7 @@ use crate::api::{ provider::{ chunk::{AllocatedChunk, ChunkDescriptor}, shm_provider_backend::ShmProviderBackend, - types::{AllocAlignment, ChunkAllocResult, MemoryLayout, ZAllocError}, + types::{AllocAlignment, ChunkAllocResult, MemoryLayout, ZAllocError, ZLayoutError}, }, }; @@ -45,7 +46,7 @@ const MIN_FREE_CHUNK_SIZE: usize = 1_024; #[derive(Eq, Copy, Clone, Debug)] struct Chunk { offset: ChunkID, - size: usize, + size: NonZeroUsize, } impl Ord for Chunk { @@ -86,7 +87,7 @@ impl PosixShmProviderBackendBuilder { self, size: usize, alignment: AllocAlignment, - ) -> ZResult> { + ) -> Result, ZLayoutError> { let layout = MemoryLayout::new(size, alignment)?; Ok(LayoutedPosixShmProviderBackendBuilder { layout }) } @@ -96,7 +97,7 @@ impl PosixShmProviderBackendBuilder { pub fn with_size( self, size: usize, - ) -> ZResult> { + ) -> Result, ZLayoutError> { let layout = MemoryLayout::new(size, AllocAlignment::default())?; Ok(LayoutedPosixShmProviderBackendBuilder { layout }) } @@ -149,7 +150,7 @@ impl PosixShmProviderBackend { ); Ok(Self { - available: AtomicUsize::new(layout.size()), + available: AtomicUsize::new(layout.size().get()), segment, free_list: Mutex::new(free_list), alignment: layout.alignment(), @@ -163,7 +164,7 @@ impl ShmProviderBackend for PosixShmProviderBackend { let required_len = layout.size(); - if self.available.load(Ordering::Relaxed) < required_len { + if self.available.load(Ordering::Relaxed) < required_len.get() { tracing::trace!( "PosixShmProviderBackend does not have sufficient free memory to allocate {:?}, try de-fragmenting!", layout); return Err(ZAllocError::OutOfMemory); } @@ -176,16 +177,20 @@ impl ShmProviderBackend for PosixShmProviderBackend { Some(mut chunk) if chunk.size >= required_len => { // NOTE: don't loose any chunks here, as it will lead to memory leak tracing::trace!("Allocator selected Chunk ({:?})", &chunk); - if chunk.size - required_len >= MIN_FREE_CHUNK_SIZE { + if chunk.size.get() - required_len.get() >= MIN_FREE_CHUNK_SIZE { let free_chunk = Chunk { - offset: chunk.offset + required_len as ChunkID, - size: chunk.size - required_len, + offset: chunk.offset + required_len.get() as ChunkID, + // SAFETY: this is safe because we always operate on a leftover, which is checked above! + size: unsafe { + NonZeroUsize::new_unchecked(chunk.size.get() - required_len.get()) + }, }; tracing::trace!("The allocation will leave a Free Chunk: {:?}", &free_chunk); guard.push(free_chunk); chunk.size = required_len; } - self.available.fetch_sub(chunk.size, Ordering::Relaxed); + self.available + .fetch_sub(chunk.size.get(), Ordering::Relaxed); let descriptor = ChunkDescriptor::new(self.segment.segment.id(), chunk.offset, chunk.size); @@ -219,16 +224,18 @@ impl ShmProviderBackend for PosixShmProviderBackend { offset: chunk.chunk, size: chunk.len, }; - self.available.fetch_add(free_chunk.size, Ordering::Relaxed); + self.available + .fetch_add(free_chunk.size.get(), Ordering::Relaxed); zlock!(self.free_list).push(free_chunk); } fn defragment(&self) -> usize { fn try_merge_adjacent_chunks(a: &Chunk, b: &Chunk) -> Option { - let end_offset = a.offset as usize + a.size; + let end_offset = a.offset as usize + a.size.get(); if end_offset == b.offset as usize { Some(Chunk { - size: a.size + b.size, + // SAFETY: this is safe because we operate on non-zero sizes and it will never overflow + size: unsafe { NonZeroUsize::new_unchecked(a.size.get() + b.size.get()) }, offset: a.offset, }) } else { @@ -256,7 +263,7 @@ impl ShmProviderBackend for PosixShmProviderBackend { match try_merge_adjacent_chunks(¤t, &next) { Some(c) => { current = c; - largest = largest.max(current.size); + largest = largest.max(current.size.get()); if i == n { guard.push(current) } @@ -279,7 +286,7 @@ impl ShmProviderBackend for PosixShmProviderBackend { self.available.load(Ordering::Relaxed) } - fn layout_for(&self, layout: MemoryLayout) -> ZResult { + fn layout_for(&self, layout: MemoryLayout) -> Result { layout.extend(self.alignment) } } diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_segment.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_segment.rs index dd103462e4..3a08d2be55 100644 --- a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_segment.rs +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_segment.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // -use std::sync::atomic::AtomicPtr; +use std::{num::NonZeroUsize, sync::atomic::AtomicPtr}; use zenoh_result::ZResult; @@ -32,8 +32,8 @@ pub(crate) struct PosixShmSegment { } impl PosixShmSegment { - pub(crate) fn create(alloc_size: usize) -> ZResult { - let segment = ArrayInSHM::create(alloc_size, POSIX_SHM_SEGMENT_PREFIX)?; + pub(crate) fn create(alloc_size: NonZeroUsize) -> ZResult { + let segment = ArrayInSHM::create(alloc_size.get(), POSIX_SHM_SEGMENT_PREFIX)?; Ok(Self { segment }) } diff --git a/commons/zenoh-shm/src/api/provider/chunk.rs b/commons/zenoh-shm/src/api/provider/chunk.rs index 939758a345..fe7d0d5cb6 100644 --- a/commons/zenoh-shm/src/api/provider/chunk.rs +++ b/commons/zenoh-shm/src/api/provider/chunk.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // -use std::sync::atomic::AtomicPtr; +use std::{num::NonZeroUsize, sync::atomic::AtomicPtr}; use crate::api::common::types::{ChunkID, SegmentID}; @@ -22,13 +22,13 @@ use crate::api::common::types::{ChunkID, SegmentID}; pub struct ChunkDescriptor { pub segment: SegmentID, pub chunk: ChunkID, - pub len: usize, + pub len: NonZeroUsize, } impl ChunkDescriptor { /// Create a new Chunk Descriptor #[zenoh_macros::unstable_doc] - pub fn new(segment: SegmentID, chunk: ChunkID, len: usize) -> Self { + pub fn new(segment: SegmentID, chunk: ChunkID, len: NonZeroUsize) -> Self { Self { segment, chunk, diff --git a/commons/zenoh-shm/src/api/provider/shm_provider.rs b/commons/zenoh-shm/src/api/provider/shm_provider.rs index 8773498b61..1487a1ee18 100644 --- a/commons/zenoh-shm/src/api/provider/shm_provider.rs +++ b/commons/zenoh-shm/src/api/provider/shm_provider.rs @@ -16,6 +16,7 @@ use std::{ collections::VecDeque, future::{Future, IntoFuture}, marker::PhantomData, + num::NonZeroUsize, pin::Pin, sync::{atomic::Ordering, Arc, Mutex}, time::Duration, @@ -159,7 +160,7 @@ where IDSource: ProtocolIDSource, Backend: ShmProviderBackend, { - size: usize, + size: NonZeroUsize, provider_layout: MemoryLayout, provider: &'a ShmProvider, } @@ -185,6 +186,7 @@ where // Create layout for specified arguments let layout = MemoryLayout::new(data.size, data.alignment) .map_err(|_| ZLayoutError::IncorrectLayoutArgs)?; + let size = layout.size(); // Obtain provider's layout for our layout let provider_layout = data @@ -194,7 +196,7 @@ where .map_err(|_| ZLayoutError::ProviderIncompatibleLayout)?; Ok(Self { - size: data.size, + size, provider_layout, provider: data.provider, }) @@ -320,7 +322,7 @@ where let result = InnerPolicy::alloc(layout, provider); if let Err(ZAllocError::OutOfMemory) = result { // try to alloc again only if GC managed to reclaim big enough chunk - if provider.garbage_collect() >= layout.size() { + if provider.garbage_collect() >= layout.size().get() { return AltPolicy::alloc(layout, provider); } } @@ -352,7 +354,7 @@ where let result = InnerPolicy::alloc(layout, provider); if let Err(ZAllocError::NeedDefragment) = result { // try to alloc again only if big enough chunk was defragmented - if provider.defragment() >= layout.size() { + if provider.defragment() >= layout.size().get() { return AltPolicy::alloc(layout, provider); } } @@ -803,6 +805,8 @@ where /// Remember that chunk's len may be >= len! #[zenoh_macros::unstable_doc] pub fn map(&self, chunk: AllocatedChunk, len: usize) -> ZResult { + let len = len.try_into()?; + // allocate resources for SHM buffer let (allocated_header, allocated_watchdog, confirmed_watchdog) = Self::alloc_resources()?; @@ -837,7 +841,7 @@ where if is_free_chunk(maybe_free) { tracing::trace!("Garbage Collecting Chunk: {:?}", maybe_free); self.backend.free(&maybe_free.descriptor); - largest = largest.max(maybe_free.descriptor.len); + largest = largest.max(maybe_free.descriptor.len.get()); return false; } true @@ -868,7 +872,7 @@ where } } - fn alloc_inner(&self, size: usize, layout: &MemoryLayout) -> BufAllocResult + fn alloc_inner(&self, size: NonZeroUsize, layout: &MemoryLayout) -> BufAllocResult where Policy: AllocPolicy, { @@ -914,7 +918,7 @@ where fn wrap( &self, chunk: AllocatedChunk, - len: usize, + len: NonZeroUsize, allocated_header: AllocatedHeaderDescriptor, allocated_watchdog: AllocatedWatchdog, confirmed_watchdog: ConfirmedDescriptor, @@ -971,7 +975,7 @@ where { async fn alloc_inner_async( &self, - size: usize, + size: NonZeroUsize, backend_layout: &MemoryLayout, ) -> BufAllocResult where diff --git a/commons/zenoh-shm/src/api/provider/shm_provider_backend.rs b/commons/zenoh-shm/src/api/provider/shm_provider_backend.rs index 933940cac1..51795f5880 100644 --- a/commons/zenoh-shm/src/api/provider/shm_provider_backend.rs +++ b/commons/zenoh-shm/src/api/provider/shm_provider_backend.rs @@ -11,12 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // - -use zenoh_result::ZResult; - use super::{ chunk::ChunkDescriptor, - types::{ChunkAllocResult, MemoryLayout}, + types::{ChunkAllocResult, MemoryLayout, ZLayoutError}, }; /// The provider backend trait @@ -48,5 +45,5 @@ pub trait ShmProviderBackend { /// - validate, if the provided layout can be used with this backend /// - adopt the layout for backend capabilities #[zenoh_macros::unstable_doc] - fn layout_for(&self, layout: MemoryLayout) -> ZResult; + fn layout_for(&self, layout: MemoryLayout) -> Result; } diff --git a/commons/zenoh-shm/src/api/provider/types.rs b/commons/zenoh-shm/src/api/provider/types.rs index 603c4a481a..bb04dfa5fc 100644 --- a/commons/zenoh-shm/src/api/provider/types.rs +++ b/commons/zenoh-shm/src/api/provider/types.rs @@ -12,9 +12,7 @@ // ZettaScale Zenoh Team, // -use std::fmt::Display; - -use zenoh_result::{bail, ZResult}; +use std::{fmt::Display, num::NonZeroUsize}; use super::chunk::AllocatedChunk; use crate::api::buffer::zshmmut::ZShmMut; @@ -60,15 +58,24 @@ impl Default for AllocAlignment { } impl AllocAlignment { + /// Try to create a new AllocAlignment from alignment representation in powers of 2. + /// + /// # Errors + /// + /// This function will return an error if provided alignment power cannot fit into usize. #[zenoh_macros::unstable_doc] - pub fn new(pow: u8) -> Self { - Self { pow } + pub const fn new(pow: u8) -> Result { + match pow { + pow if pow < usize::BITS as u8 => Ok(Self { pow }), + _ => Err(ZLayoutError::IncorrectLayoutArgs), + } } /// Get alignment in normal units (bytes) #[zenoh_macros::unstable_doc] - pub fn get_alignment_value(&self) -> usize { - 1usize << self.pow + pub fn get_alignment_value(&self) -> NonZeroUsize { + // SAFETY: this is safe because we limit pow in new based on usize size + unsafe { NonZeroUsize::new_unchecked(1usize << self.pow) } } /// Align size according to inner alignment. @@ -78,17 +85,25 @@ impl AllocAlignment { /// ``` /// use zenoh_shm::api::provider::types::AllocAlignment; /// - /// let alignment = AllocAlignment::new(2); // 4-byte alignment - /// let initial_size: usize = 7; + /// let alignment = AllocAlignment::new(2).unwrap(); // 4-byte alignment + /// let initial_size = 7.try_into().unwrap(); /// let aligned_size = alignment.align_size(initial_size); - /// assert_eq!(aligned_size, 8); + /// assert_eq!(aligned_size.get(), 8); /// ``` #[zenoh_macros::unstable_doc] - pub fn align_size(&self, size: usize) -> usize { + pub fn align_size(&self, size: NonZeroUsize) -> NonZeroUsize { let alignment = self.get_alignment_value(); - match size % alignment { + match size.get() % alignment { 0 => size, - remainder => size + (alignment - remainder), + // SAFETY: + // This unsafe block is always safe: + // 1. 0 < remainder < alignment + // 2. because of 1, the value of (alignment.get() - remainder) is always > 0 + // 3. because of 2, we add nonzero size to nonzero (alignment.get() - remainder) and it is always positive if no overflow + // 4. we make sure that there is no overflow condition in 3 by means of alignment limitation in `new` by limiting pow value + remainder => unsafe { + NonZeroUsize::new_unchecked(size.get() + (alignment.get() - remainder)) + }, } } } @@ -97,7 +112,7 @@ impl AllocAlignment { #[zenoh_macros::unstable_doc] #[derive(Debug)] pub struct MemoryLayout { - size: usize, + size: NonZeroUsize, alignment: AllocAlignment, } @@ -111,18 +126,29 @@ impl Display for MemoryLayout { } impl MemoryLayout { - /// Try to create a new memory layout + /// Try to create a new memory layout. + /// + /// # Errors + /// + /// This function will return an error if zero size have passed or if the provided size is not the multiply of the alignment. #[zenoh_macros::unstable_doc] - pub fn new(size: usize, alignment: AllocAlignment) -> ZResult { + pub fn new(size: T, alignment: AllocAlignment) -> Result + where + T: TryInto, + { + let Ok(size) = size.try_into() else { + return Err(ZLayoutError::IncorrectLayoutArgs); + }; + // size of an allocation must be a miltiple of it's alignment! - match size % alignment.get_alignment_value() { + match size.get() % alignment.get_alignment_value() { 0 => Ok(Self { size, alignment }), - _ => bail!("size of an allocation must be a miltiple of it's alignment!"), + _ => Err(ZLayoutError::IncorrectLayoutArgs), } } #[zenoh_macros::unstable_doc] - pub fn size(&self) -> usize { + pub fn size(&self) -> NonZeroUsize { self.size } @@ -139,27 +165,23 @@ impl MemoryLayout { /// use zenoh_shm::api::provider::types::MemoryLayout; /// /// // 8 bytes with 4-byte alignment - /// let layout4b = MemoryLayout::new(8, AllocAlignment::new(2)).unwrap(); + /// let layout4b = MemoryLayout::new(8, AllocAlignment::new(2).unwrap()).unwrap(); /// /// // Try to realign with 2-byte alignment - /// let layout2b = layout4b.extend(AllocAlignment::new(1)); + /// let layout2b = layout4b.extend(AllocAlignment::new(1).unwrap()); /// assert!(layout2b.is_err()); // fails because new alignment must be >= old /// /// // Try to realign with 8-byte alignment - /// let layout8b = layout4b.extend(AllocAlignment::new(3)); + /// let layout8b = layout4b.extend(AllocAlignment::new(3).unwrap()); /// assert!(layout8b.is_ok()); // ok /// ``` #[zenoh_macros::unstable_doc] - pub fn extend(&self, new_alignment: AllocAlignment) -> ZResult { + pub fn extend(&self, new_alignment: AllocAlignment) -> Result { if self.alignment <= new_alignment { let new_size = new_alignment.align_size(self.size); return MemoryLayout::new(new_size, new_alignment); } - bail!( - "Cannot extend alignment form {} to {}: new alignment must be >= old!", - self.alignment, - new_alignment - ) + Err(ZLayoutError::IncorrectLayoutArgs) } } diff --git a/commons/zenoh-shm/src/lib.rs b/commons/zenoh-shm/src/lib.rs index 19f8a1c76f..8ec2458931 100644 --- a/commons/zenoh-shm/src/lib.rs +++ b/commons/zenoh-shm/src/lib.rs @@ -13,6 +13,7 @@ // use std::{ any::Any, + num::NonZeroUsize, sync::{ atomic::{AtomicPtr, Ordering}, Arc, @@ -62,7 +63,7 @@ pub struct ShmBufInfo { /// Actual data length /// NOTE: data_descriptor's len is >= of this len and describes the actual memory length /// dedicated in shared memory segment for this particular buffer. - pub data_len: usize, + pub data_len: NonZeroUsize, /// The watchdog descriptor pub watchdog_descriptor: Descriptor, @@ -76,7 +77,7 @@ impl ShmBufInfo { pub fn new( data_descriptor: ChunkDescriptor, shm_protocol: ProtocolID, - data_len: usize, + data_len: NonZeroUsize, watchdog_descriptor: Descriptor, header_descriptor: HeaderDescriptor, generation: u32, @@ -122,14 +123,10 @@ impl std::fmt::Debug for ShmBufInner { } impl ShmBufInner { - pub fn len(&self) -> usize { + pub fn len(&self) -> NonZeroUsize { self.info.data_len } - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - fn is_valid(&self) -> bool { self.header.header().generation.load(Ordering::SeqCst) == self.info.generation } @@ -156,7 +153,7 @@ impl ShmBufInner { fn as_slice(&self) -> &[u8] { tracing::trace!("ShmBufInner::as_slice() == len = {:?}", self.info.data_len); let bp = self.buf.load(Ordering::SeqCst); - unsafe { std::slice::from_raw_parts(bp, self.info.data_len) } + unsafe { std::slice::from_raw_parts(bp, self.info.data_len.get()) } } unsafe fn dec_ref_count(&self) { @@ -176,7 +173,7 @@ impl ShmBufInner { /// guarantee that your in applications only one process at the time will actually write. unsafe fn as_mut_slice_inner(&mut self) -> &mut [u8] { let bp = self.buf.load(Ordering::SeqCst); - std::slice::from_raw_parts_mut(bp, self.info.data_len) + std::slice::from_raw_parts_mut(bp, self.info.data_len.get()) } } diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index eceb74f35b..e96ca7dab1 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -54,7 +54,7 @@ async fn run() -> ZResult<()> { // OPTION: Allocation with custom alignment and alloc policy customization let _comprehensive = provider .alloc(512) - .with_alignment(AllocAlignment::new(2)) + .with_alignment(AllocAlignment::new(2).unwrap()) // for more examples on policies, please see allocation policy usage below (for layout allocation API) .with_policy::() .wait() @@ -63,7 +63,7 @@ async fn run() -> ZResult<()> { // OPTION: Allocation with custom alignment and async alloc policy let _async = provider .alloc(512) - .with_alignment(AllocAlignment::new(2)) + .with_alignment(AllocAlignment::new(2).unwrap()) // for more examples on policies, please see allocation policy usage below (for layout allocation API) .with_policy::>>() .await @@ -83,7 +83,7 @@ async fn run() -> ZResult<()> { // OPTION: Comprehensive configuration: let _comprehensive_layout = provider .alloc(512) - .with_alignment(AllocAlignment::new(2)) + .with_alignment(AllocAlignment::new(2).unwrap()) .into_layout() .unwrap(); From 8529eb637e4b969813aa57ff5e2f928a7cdaaf16 Mon Sep 17 00:00:00 2001 From: yellowhatter <104833606+yellowhatter@users.noreply.github.com> Date: Thu, 18 Jul 2024 15:19:48 +0300 Subject: [PATCH 536/598] Fix establish failure on shm (#1242) * adopt some SHM data structures to support NPO (needed as elegant way to equalize some data structure sizes in zenoh-c) * Update commons/zenoh-shm/src/api/provider/types.rs wyfo's addition Co-authored-by: Joseph Perez * Review fixes * fix examples * fiix CI * fix typo * review fix * Do not fail the whole Establish when failing on shm segment check --------- Co-authored-by: Joseph Perez --- io/zenoh-transport/src/unicast/establishment/ext/shm.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs index 1a6f272d42..e2068af94a 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs @@ -418,7 +418,13 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { }; // Read Alice's SHM Segment - let alice_segment = AuthSegment::open(init_syn.alice_segment)?; + let alice_segment = match AuthSegment::open(init_syn.alice_segment) { + Ok(buff) => buff, + Err(e) => { + tracing::trace!("{} {}", S, e); + return Ok(None); + } + }; Ok(Some(alice_segment)) } From c726e1312b842703c33a3826629885ddd8bf1afe Mon Sep 17 00:00:00 2001 From: yellowhatter <104833606+yellowhatter@users.noreply.github.com> Date: Thu, 18 Jul 2024 18:05:58 +0300 Subject: [PATCH 537/598] Do not trigger transport error in case of SHM buffer invalidation (#1245) * Do not trigger transport error in case of SHM buffer invalidation * Fix spelling * Drop the whole ZBuf in case of SHM error! --- io/zenoh-transport/src/multicast/rx.rs | 7 ++++++- io/zenoh-transport/src/shm.rs | 3 +-- io/zenoh-transport/src/unicast/lowlatency/rx.rs | 7 ++++++- io/zenoh-transport/src/unicast/universal/rx.rs | 7 ++++++- 4 files changed, 19 insertions(+), 5 deletions(-) diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index 9a6cdb0d4d..d8a6aaeb02 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -13,6 +13,8 @@ // use std::sync::MutexGuard; +#[cfg(feature = "shared-memory")] +use tracing::error; use zenoh_core::{zlock, zread}; use zenoh_protocol::{ core::{Locator, Priority, Reliability}, @@ -44,7 +46,10 @@ impl TransportMulticastInner { #[cfg(feature = "shared-memory")] { if self.manager.config.multicast.is_shm { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr)?; + if let Err(e) = crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr) { + error!("Error receiving SHM buffer: {e}"); + return Ok(()); + } } } diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 8450ad878e..c562e47135 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -334,8 +334,7 @@ pub fn map_zslice_to_shmbuf(zslice: &mut ZSlice, shmr: &ShmReader) -> ZResult<() let smb = shmr.read_shmbuf(&shmbinfo)?; // Replace the content of the slice - let zs: ZSlice = smb.into(); - *zslice = zs; + *zslice = smb.into(); Ok(()) } diff --git a/io/zenoh-transport/src/unicast/lowlatency/rx.rs b/io/zenoh-transport/src/unicast/lowlatency/rx.rs index c82e172c7b..0484a4a028 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/rx.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/rx.rs @@ -11,6 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // +#[cfg(feature = "shared-memory")] +use tracing::error; use zenoh_buffers::{ reader::{HasReader, Reader}, ZSlice, @@ -37,7 +39,10 @@ impl TransportUnicastLowlatency { #[cfg(feature = "shared-memory")] { if self.config.shm.is_some() { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr)?; + if let Err(e) = crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr) { + error!("Error receiving SHM buffer: {e}"); + return Ok(()); + } } } callback.handle_message(msg) diff --git a/io/zenoh-transport/src/unicast/universal/rx.rs b/io/zenoh-transport/src/unicast/universal/rx.rs index f97f29b0c7..71e674bdb8 100644 --- a/io/zenoh-transport/src/unicast/universal/rx.rs +++ b/io/zenoh-transport/src/unicast/universal/rx.rs @@ -13,6 +13,8 @@ // use std::sync::MutexGuard; +#[cfg(feature = "shared-memory")] +use tracing::error; use zenoh_core::{zlock, zread}; use zenoh_link::Link; use zenoh_protocol::{ @@ -45,7 +47,10 @@ impl TransportUnicastUniversal { #[cfg(feature = "shared-memory")] { if self.config.shm.is_some() { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr)?; + if let Err(e) = crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr) { + error!("Error receiving SHM buffer: {e}"); + return Ok(()); + } } } callback.handle_message(msg) From b31a41027684bfb92640d73aa77a26a60d2e0ff1 Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Fri, 19 Jul 2024 09:42:31 +0200 Subject: [PATCH 538/598] Configure `scouting/*/autoconnect/*` with a sequence (#1224) * Configure `scouting/*/autoconnect` with a sequence * Fix `zenoh/tests/routing.rs` * Fix `zenoh/tests/routing.rs` (again) * Fix zenoh-config tests * Fix `scouting/multicast/autoconnect/router` in default config * Keep string representation * Fix `ModeDependentValue` de impl * Remove `#[serde(deserialize_with = "treat_error_as_none")]` This attribute makes it hard to debug certain errors and was only present on autoconnect fields. --- DEFAULT_CONFIG.json5 | 25 +++++----- commons/zenoh-config/src/lib.rs | 15 +----- commons/zenoh-config/src/mode_dependent.rs | 6 +-- commons/zenoh-protocol/src/core/whatami.rs | 54 +++++++++++----------- zenoh/tests/routing.rs | 5 +- 5 files changed, 45 insertions(+), 60 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 1e9921bbe3..bf9fdd3f22 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -14,7 +14,7 @@ /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ metadata: { name: "strawberry", - location: "Penny Lane" + location: "Penny Lane", }, /// Which endpoints to connect to. E.g. tcp/localhost:7447. @@ -27,7 +27,7 @@ /// or different values for router, peer and client (e.g. timeout_ms: { router: -1, peer: -1, client: 0 }). timeout_ms: { router: -1, peer: -1, client: 0 }, - /// The list of endpoints to connect to. + /// The list of endpoints to connect to. /// Accepts a single list (e.g. endpoints: ["tcp/10.10.10.10:7447", "tcp/11.11.11.11:7447"]) /// or different lists for router, peer and client (e.g. endpoints: { router: ["tcp/10.10.10.10:7447"], peer: ["tcp/11.11.11.11:7447"] }). endpoints: [ @@ -64,7 +64,7 @@ /// or different values for router, peer and client (e.g. timeout_ms: { router: -1, peer: -1, client: 0 }). timeout_ms: 0, - /// The list of endpoints to listen on. + /// The list of endpoints to listen on. /// Accepts a single list (e.g. endpoints: ["tcp/[::]:7447", "udp/[::]:7447"]) /// or different lists for router, peer and client (e.g. endpoints: { router: ["tcp/[::]:7447"], peer: ["tcp/[::]:0"] }). endpoints: { router: ["tcp/[::]:7447"], peer: ["tcp/[::]:0"] }, @@ -104,10 +104,10 @@ /// The time-to-live on multicast scouting packets ttl: 1, /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. - /// Accepts a single value (e.g. autoconnect: "router|peer") - /// or different values for router, peer and client (e.g. autoconnect: { router: "", peer: "router|peer" }). + /// Accepts a single value (e.g. autoconnect: ["router", "peer"]) + /// or different values for router, peer and client (e.g. autoconnect: { router: [], peer: ["router", "peer"] }). /// Each value is bit-or-like combinations of "peer", "router" and "client". - autoconnect: { router: "", peer: "router|peer" }, + autoconnect: { router: [], peer: ["router", "peer"] }, /// Whether or not to listen for scout messages on UDP multicast and reply to them. listen: true, }, @@ -122,10 +122,10 @@ /// direct connectivity with each other. multihop: false, /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. - /// Accepts a single value (e.g. autoconnect: "router|peer") - /// or different values for router, peer and client (e.g. autoconnect: { router: "", peer: "router|peer" }). + /// Accepts a single value (e.g. autoconnect: ["router", "peer"]) + /// or different values for router, peer and client (e.g. autoconnect: { router: [], peer: ["router", "peer"] }). /// Each value is bit-or-like combinations of "peer", "router" and "client". - autoconnect: { router: "", peer: "router|peer" }, + autoconnect: { router: [], peer: ["router", "peer"] }, }, }, @@ -208,7 +208,7 @@ // "interfaces": [ // "lo0" // ], - // /// Subjects can be cert_common_names when using TLS or Quic + // /// Subjects can be cert_common_names when using TLS or Quic // "cert_common_names": [ // "example.zenoh.io" // ], @@ -238,7 +238,7 @@ /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to /// enable 'lowlatency' you need to explicitly disable 'qos'. - /// NOTE: LowLatency transport does not support the fragmentation, so the message size should be + /// NOTE: LowLatency transport does not support the fragmentation, so the message size should be /// smaller than the tx batch_size. lowlatency: false, /// Enables QoS on unicast communications. @@ -317,7 +317,7 @@ /// Using CongestionControl::Drop the message might be dropped, depending on conditions configured here. congestion_control: { /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. - wait_before_drop: 1000 + wait_before_drop: 1000, }, /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. /// Higher values lead to a more aggressive batching but it will introduce additional latency. @@ -539,5 +539,4 @@ // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", // } // }, - } diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index e239ac8b7a..fec1a1cf8d 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -197,15 +197,6 @@ fn config_keys() { dbg!(c.keys()); } -fn treat_error_as_none<'a, T, D>(deserializer: D) -> Result, D::Error> -where - T: serde::de::Deserialize<'a>, - D: serde::de::Deserializer<'a>, -{ - let value: Value = serde::de::Deserialize::deserialize(deserializer)?; - Ok(T::deserialize(value).ok()) -} - validated_struct::validator! { /// The main configuration structure for Zenoh. /// @@ -264,7 +255,6 @@ validated_struct::validator! { /// The time-to-live on multicast scouting packets. (default: 1) pub ttl: Option, /// Which type of Zenoh instances to automatically establish sessions with upon discovery through UDP multicast. - #[serde(deserialize_with = "treat_error_as_none")] autoconnect: Option>, /// Whether or not to listen for scout messages on UDP multicast and reply to them. listen: Option>, @@ -281,7 +271,6 @@ validated_struct::validator! { /// direct connectivity with each other. multihop: Option, /// Which type of Zenoh instances to automatically establish sessions with upon discovery through gossip. - #[serde(deserialize_with = "treat_error_as_none")] autoconnect: Option>, }, }, @@ -571,7 +560,7 @@ fn config_deser() { scouting: { multicast: { enabled: false, - autoconnect: "peer|router" + autoconnect: ["peer", "router"] } } }"#, @@ -598,7 +587,7 @@ fn config_deser() { scouting: { multicast: { enabled: false, - autoconnect: {router: "", peer: "peer|router"} + autoconnect: {router: [], peer: ["peer", "router"]} } } }"#, diff --git a/commons/zenoh-config/src/mode_dependent.rs b/commons/zenoh-config/src/mode_dependent.rs index 6a06f967ba..6576161473 100644 --- a/commons/zenoh-config/src/mode_dependent.rs +++ b/commons/zenoh-config/src/mode_dependent.rs @@ -255,12 +255,12 @@ impl<'a> serde::Deserialize<'a> for ModeDependentValue { formatter.write_str("WhatAmIMatcher or mode dependent WhatAmIMatcher") } - fn visit_str(self, value: &str) -> Result + fn visit_seq(self, seq: A) -> Result where - E: de::Error, + A: de::SeqAccess<'de>, { WhatAmIMatcherVisitor {} - .visit_str(value) + .visit_seq(seq) .map(ModeDependentValue::Unique) } diff --git a/commons/zenoh-protocol/src/core/whatami.rs b/commons/zenoh-protocol/src/core/whatami.rs index 9eb9628e3f..9ae0690382 100644 --- a/commons/zenoh-protocol/src/core/whatami.rs +++ b/commons/zenoh-protocol/src/core/whatami.rs @@ -146,6 +146,7 @@ impl WhatAmIMatcher { Self::U8_R_C => formatcp!("{}|{}", WhatAmI::STR_R, WhatAmI::STR_C), Self::U8_P_C => formatcp!("{}|{}", WhatAmI::STR_P, WhatAmI::STR_C), Self::U8_R_P_C => formatcp!("{}|{}|{}", WhatAmI::STR_R, WhatAmI::STR_P, WhatAmI::STR_C), + _ => unreachable!(), } } @@ -329,41 +330,40 @@ impl<'de> serde::de::Visitor<'de> for WhatAmIMatcherVisitor { fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { write!( formatter, - "a | separated list of whatami variants ('{}', '{}', '{}')", + "a list of whatami variants ('{}', '{}', '{}')", WhatAmI::STR_R, WhatAmI::STR_P, WhatAmI::STR_C ) } - fn visit_str(self, v: &str) -> Result + fn visit_seq(self, mut seq: A) -> Result where - E: serde::de::Error, + A: serde::de::SeqAccess<'de>, { - v.parse().map_err(|_| { - serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &formatcp!( - "a | separated list of whatami variants ('{}', '{}', '{}')", - WhatAmI::STR_R, - WhatAmI::STR_P, - WhatAmI::STR_C - ), - ) - }) - } + let mut inner = 0; - fn visit_borrowed_str(self, v: &'de str) -> Result - where - E: serde::de::Error, - { - self.visit_str(v) - } - fn visit_string(self, v: String) -> Result - where - E: serde::de::Error, - { - self.visit_str(&v) + while let Some(s) = seq.next_element::()? { + match s.as_str() { + WhatAmI::STR_R => inner |= WhatAmI::U8_R, + WhatAmI::STR_P => inner |= WhatAmI::U8_P, + WhatAmI::STR_C => inner |= WhatAmI::U8_C, + _ => { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(&s), + &formatcp!( + "one of ('{}', '{}', '{}')", + WhatAmI::STR_R, + WhatAmI::STR_P, + WhatAmI::STR_C + ), + )) + } + } + } + + Ok(WhatAmIMatcher::try_from(inner) + .expect("`WhatAmIMatcher` should be valid by construction")) } } @@ -372,6 +372,6 @@ impl<'de> serde::Deserialize<'de> for WhatAmIMatcher { where D: serde::Deserializer<'de>, { - deserializer.deserialize_str(WhatAmIMatcherVisitor) + deserializer.deserialize_seq(WhatAmIMatcherVisitor) } } diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index f65c939533..07971b7853 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // use std::{ - str::FromStr, sync::{ atomic::{AtomicUsize, Ordering}, Arc, @@ -540,9 +539,7 @@ async fn static_failover_brokering() -> Result<()> { config .scouting .gossip - .set_autoconnect(Some(ModeDependentValue::Unique( - WhatAmIMatcher::from_str("").unwrap(), - ))) + .set_autoconnect(Some(ModeDependentValue::Unique(WhatAmIMatcher::empty()))) .unwrap(); Some(config) }; From 2e6edcaec488f074d474616c428b94ff2cee0a6e Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 19 Jul 2024 10:45:33 +0200 Subject: [PATCH 539/598] Log to debug SHM error (#1246) * Log to debug SHM error * Fix clippy --- io/zenoh-transport/src/multicast/rx.rs | 4 +--- io/zenoh-transport/src/unicast/lowlatency/rx.rs | 4 +--- io/zenoh-transport/src/unicast/universal/rx.rs | 4 +--- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index d8a6aaeb02..93dc3c727a 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -13,8 +13,6 @@ // use std::sync::MutexGuard; -#[cfg(feature = "shared-memory")] -use tracing::error; use zenoh_core::{zlock, zread}; use zenoh_protocol::{ core::{Locator, Priority, Reliability}, @@ -47,7 +45,7 @@ impl TransportMulticastInner { { if self.manager.config.multicast.is_shm { if let Err(e) = crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr) { - error!("Error receiving SHM buffer: {e}"); + tracing::debug!("Error receiving SHM buffer: {e}"); return Ok(()); } } diff --git a/io/zenoh-transport/src/unicast/lowlatency/rx.rs b/io/zenoh-transport/src/unicast/lowlatency/rx.rs index 0484a4a028..3dd499000d 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/rx.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/rx.rs @@ -11,8 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "shared-memory")] -use tracing::error; use zenoh_buffers::{ reader::{HasReader, Reader}, ZSlice, @@ -40,7 +38,7 @@ impl TransportUnicastLowlatency { { if self.config.shm.is_some() { if let Err(e) = crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr) { - error!("Error receiving SHM buffer: {e}"); + tracing::debug!("Error receiving SHM buffer: {e}"); return Ok(()); } } diff --git a/io/zenoh-transport/src/unicast/universal/rx.rs b/io/zenoh-transport/src/unicast/universal/rx.rs index 71e674bdb8..afd8e114d7 100644 --- a/io/zenoh-transport/src/unicast/universal/rx.rs +++ b/io/zenoh-transport/src/unicast/universal/rx.rs @@ -13,8 +13,6 @@ // use std::sync::MutexGuard; -#[cfg(feature = "shared-memory")] -use tracing::error; use zenoh_core::{zlock, zread}; use zenoh_link::Link; use zenoh_protocol::{ @@ -48,7 +46,7 @@ impl TransportUnicastUniversal { { if self.config.shm.is_some() { if let Err(e) = crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr) { - error!("Error receiving SHM buffer: {e}"); + tracing::debug!("Error receiving SHM buffer: {e}"); return Ok(()); } } From 81eacc58816a573534fad241100ba79be9a1382b Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 19 Jul 2024 14:44:10 +0200 Subject: [PATCH 540/598] Admin keys remapping (#1209) * Change admin keys to @//** * Improve storage manager plugin * Remove ke_for_sure macro --------- Co-authored-by: Luca Cominardi --- Cargo.lock | 1 + DEFAULT_CONFIG.json5 | 2 +- README.md | 10 ++-- plugins/zenoh-plugin-rest/src/lib.rs | 8 ++-- .../zenoh-plugin-storage-manager/Cargo.toml | 1 + .../zenoh-plugin-storage-manager/src/lib.rs | 3 +- .../src/replica/mod.rs | 31 +++++------- .../src/storages_mgt.rs | 2 +- zenoh/src/api/admin.rs | 34 ++++++------- zenoh/src/api/plugins.rs | 6 +-- zenoh/src/net/runtime/adminspace.rs | 48 +++++++++---------- zenoh/tests/events.rs | 20 ++++---- 12 files changed, 83 insertions(+), 83 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 44002406fa..afed03cf2e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5903,6 +5903,7 @@ dependencies = [ "futures", "git-version", "jsonschema", + "lazy_static", "libloading", "rustc_version 0.4.0", "schemars", diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index bf9fdd3f22..f6e10f77ca 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -11,7 +11,7 @@ /// The node's mode (router, peer or client) mode: "peer", - /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ + /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenoh and available in admin space @//router, @//peer or @//client metadata: { name: "strawberry", location: "Penny Lane", diff --git a/README.md b/README.md index 9a6216d3ef..05d3233139 100644 --- a/README.md +++ b/README.md @@ -126,15 +126,15 @@ Zenoh's router is built as `target/release/zenohd`. All the examples are built i * run the Zenoh router with permission to perform config changes via the admin space, and with a memory storage: `./target/release/zenohd --adminspace-permissions=rw --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` * in another shell, get info of the zenoh router via the zenoh admin space: - `curl http://localhost:8000/@/router/local` + `curl http://localhost:8000/@/local/router` * get the volumes of the router (only memory by default): - `curl 'http://localhost:8000/@/router/local/**/volumes/*'` + `curl 'http://localhost:8000/@/local/router/**/volumes/*'` * get the storages of the local router (the memory storage configured at startup on '/demo/example/**' should be present): - `curl 'http://localhost:8000/@/router/local/**/storages/*'` + `curl 'http://localhost:8000/@/local/router/**/storages/*'` * add another memory storage on `/demo/mystore/**`: - `curl -X PUT -H 'content-type:application/json' -d '{"key_expr":"demo/mystore/**","volume":"memory"}' http://localhost:8000/@/router/local/config/plugins/storage_manager/storages/mystore` + `curl -X PUT -H 'content-type:application/json' -d '{"key_expr":"demo/mystore/**","volume":"memory"}' http://localhost:8000/@/local/router/config/plugins/storage_manager/storages/mystore` * check it has been created: - `curl 'http://localhost:8000/@/router/local/**/storages/*'` + `curl 'http://localhost:8000/@/local/router/**/storages/*'` ### Configuration options diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index d6db7c74cb..7ef21ace7c 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -515,10 +515,10 @@ pub async fn run(runtime: Runtime, conf: Config) -> ZResult<()> { fn path_to_key_expr<'a>(path: &'a str, zid: &str) -> ZResult> { let path = path.strip_prefix('/').unwrap_or(path); - if path == "@/router/local" { - KeyExpr::try_from(format!("@/router/{zid}")) - } else if let Some(suffix) = path.strip_prefix("@/router/local/") { - KeyExpr::try_from(format!("@/router/{zid}/{suffix}")) + if path == "@/local" { + KeyExpr::try_from(format!("@/{zid}")) + } else if let Some(suffix) = path.strip_prefix("@/local/") { + KeyExpr::try_from(format!("@/{zid}/{suffix}")) } else { KeyExpr::try_from(path) } diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index a6694108db..9ef1846d72 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -40,6 +40,7 @@ derive-new = { workspace = true } flume = { workspace = true } futures = { workspace = true } git-version = { workspace = true } +lazy_static = { workspace = true } libloading = { workspace = true } tracing = { workspace = true } serde = { workspace = true, features = ["default"] } diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 7399d3e507..3c64e3fe35 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -94,8 +94,9 @@ struct StorageRuntimeInner { impl StorageRuntimeInner { fn status_key(&self) -> String { format!( - "@/router/{}/status/plugins/{}", + "@/{}/{}/status/plugins/{}", &self.runtime.zid(), + &self.runtime.whatami().to_str(), &self.name ) } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 014fdc697e..930b4511a2 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -16,7 +16,7 @@ use std::{ collections::{HashMap, HashSet}, - str::{self, FromStr}, + str, time::{Duration, SystemTime}, }; @@ -26,8 +26,7 @@ use async_std::{ }; use flume::{Receiver, Sender}; use futures::{pin_mut, select, FutureExt}; -use urlencoding::encode; -use zenoh::prelude::*; +use zenoh::{key_expr::keyexpr, prelude::*}; use zenoh_backend_traits::config::{ReplicaConfig, StorageConfig}; use crate::{backends_mgt::StoreIntercept, storages_mgt::StorageMessage}; @@ -50,10 +49,12 @@ const INTERVALS: &str = "intervals"; const SUBINTERVALS: &str = "subintervals"; const CONTENTS: &str = "contents"; pub const EPOCH_START: SystemTime = SystemTime::UNIX_EPOCH; - -pub const ALIGN_PREFIX: &str = "@-digest"; pub const SUBINTERVAL_CHUNKS: usize = 10; +lazy_static::lazy_static!( + static ref KE_PREFIX_DIGEST: &'static keyexpr = unsafe { keyexpr::from_str_unchecked("@-digest") }; +); + // A replica consists of a storage service and services required for anti-entropy // To perform anti-entropy, we need a `Digest` that contains the state of the datastore // `Snapshotter` computes the `Digest` and maintains all related information @@ -135,7 +136,7 @@ impl Replica { // digest sub let digest_sub = replica.start_digest_sub(tx_digest).fuse(); // queryable for alignment - let digest_key = Replica::get_digest_key(&replica.key_expr, ALIGN_PREFIX); + let digest_key = Replica::get_digest_key(&replica.key_expr); let align_q = AlignQueryable::start_align_queryable( replica.session.clone(), digest_key.clone(), @@ -199,9 +200,7 @@ impl Replica { pub async fn start_digest_sub(&self, tx: Sender<(String, Digest)>) { let mut received = HashMap::::new(); - let digest_key = Replica::get_digest_key(&self.key_expr, ALIGN_PREFIX) - .join("**") - .unwrap(); + let digest_key = Replica::get_digest_key(&self.key_expr).join("**").unwrap(); tracing::debug!( "[DIGEST_SUB] Declaring Subscriber named {} on '{}'", @@ -222,8 +221,8 @@ impl Replica { continue; } }; - let from = &sample.key_expr().as_str() - [Replica::get_digest_key(&self.key_expr, ALIGN_PREFIX).len() + 1..]; + let from = + &sample.key_expr().as_str()[Replica::get_digest_key(&self.key_expr).len() + 1..]; let digest: Digest = match serde_json::from_reader(sample.payload().reader()) { Ok(digest) => digest, @@ -267,7 +266,7 @@ impl Replica { // Create a publisher to periodically publish digests from the snapshotter // Publish on // pub async fn start_digest_pub(&self, snapshotter: Arc) { - let digest_key = Replica::get_digest_key(&self.key_expr, ALIGN_PREFIX) + let digest_key = Replica::get_digest_key(&self.key_expr) .join(&self.name) .unwrap(); @@ -333,12 +332,8 @@ impl Replica { true } - fn get_digest_key(key_expr: &OwnedKeyExpr, align_prefix: &str) -> OwnedKeyExpr { - let key_expr = encode(key_expr).to_string(); - OwnedKeyExpr::from_str(align_prefix) - .unwrap() - .join(&key_expr) - .unwrap() + fn get_digest_key(key_expr: &keyexpr) -> OwnedKeyExpr { + *KE_PREFIX_DIGEST / key_expr } pub fn get_hot_interval_number(publication_interval: Duration, delta: Duration) -> usize { diff --git a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs index 27dbaf58f6..4ca39cb093 100644 --- a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs @@ -28,7 +28,7 @@ pub(crate) async fn start_storage( admin_key: String, zenoh: Arc, ) -> ZResult> { - // Ex: @/router/390CEC11A1E34977A1C609A35BC015E6/status/plugins/storage_manager/storages/demo1 -> 390CEC11A1E34977A1C609A35BC015E6/demo1 (/ needed????) + // Ex: @/390CEC11A1E34977A1C609A35BC015E6/router/status/plugins/storage_manager/storages/demo1 -> 390CEC11A1E34977A1C609A35BC015E6/demo1 (/ needed????) let parts: Vec<&str> = admin_key.split('/').collect(); let uuid = parts[2]; let storage_name = parts[7]; diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index 9f2e073f75..e794c87db5 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -34,22 +34,17 @@ use super::{ subscriber::SubscriberKind, }; -macro_rules! ke_for_sure { - ($val:expr) => { - unsafe { keyexpr::from_str_unchecked($val) } - }; -} - lazy_static::lazy_static!( - static ref KE_STARSTAR: &'static keyexpr = ke_for_sure!("**"); - static ref KE_PREFIX: &'static keyexpr = ke_for_sure!("@/session"); - static ref KE_TRANSPORT_UNICAST: &'static keyexpr = ke_for_sure!("transport/unicast"); - static ref KE_LINK: &'static keyexpr = ke_for_sure!("link"); + static ref KE_STARSTAR: &'static keyexpr = unsafe { keyexpr::from_str_unchecked("**") }; + static ref KE_PREFIX: &'static keyexpr = unsafe { keyexpr::from_str_unchecked("@") }; + static ref KE_SESSION: &'static keyexpr = unsafe { keyexpr::from_str_unchecked("session") }; + static ref KE_TRANSPORT_UNICAST: &'static keyexpr = unsafe { keyexpr::from_str_unchecked("transport/unicast") }; + static ref KE_LINK: &'static keyexpr = unsafe { keyexpr::from_str_unchecked("link") }; ); pub(crate) fn init(session: &Session) { if let Ok(own_zid) = keyexpr::new(&session.zid().to_string()) { - let admin_key = KeyExpr::from(*KE_PREFIX / own_zid / *KE_STARSTAR) + let admin_key = KeyExpr::from(*KE_PREFIX / own_zid / *KE_SESSION / *KE_STARSTAR) .to_wire(session) .to_owned(); @@ -69,7 +64,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { fn reply_peer(own_zid: &keyexpr, query: &Query, peer: TransportPeer) { let zid = peer.zid.to_string(); if let Ok(zid) = keyexpr::new(&zid) { - let key_expr = *KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid; + let key_expr = *KE_PREFIX / own_zid / *KE_SESSION / *KE_TRANSPORT_UNICAST / zid; if query.key_expr().intersects(&key_expr) { if let Ok(value) = serde_json::value::to_value(peer.clone()) { match ZBytes::try_from(value) { @@ -85,8 +80,13 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { let mut s = DefaultHasher::new(); link.hash(&mut s); if let Ok(lid) = keyexpr::new(&s.finish().to_string()) { - let key_expr = - *KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid / *KE_LINK / lid; + let key_expr = *KE_PREFIX + / own_zid + / *KE_SESSION + / *KE_TRANSPORT_UNICAST + / zid + / *KE_LINK + / lid; if query.key_expr().intersects(&key_expr) { if let Ok(value) = serde_json::value::to_value(link) { match ZBytes::try_from(value) { @@ -157,8 +157,10 @@ impl TransportMulticastEventHandler for Handler { ) -> ZResult> { if let Ok(own_zid) = keyexpr::new(&self.session.zid().to_string()) { if let Ok(zid) = keyexpr::new(&peer.zid.to_string()) { - let expr = WireExpr::from(&(*KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid)) - .to_owned(); + let expr = WireExpr::from( + &(*KE_PREFIX / own_zid / *KE_SESSION / *KE_TRANSPORT_UNICAST / zid), + ) + .to_owned(); let info = DataInfo { encoding: Some(Encoding::APPLICATION_JSON), ..Default::default() diff --git a/zenoh/src/api/plugins.rs b/zenoh/src/api/plugins.rs index cfa53edc44..2f51f78a85 100644 --- a/zenoh/src/api/plugins.rs +++ b/zenoh/src/api/plugins.rs @@ -94,12 +94,12 @@ pub trait RunningPluginTrait: Send + Sync + PluginControl { /// Thus the plugin can reply its contribution to the global admin space of this zenohd. /// Parameters: /// * `key_expr`: the key_expr selector of the query. This key_expr is - /// exactly the same as it was requested by user, for example "@/router/ROUTER_ID/plugins/PLUGIN_NAME/some/plugin/info" or "@/router/*/plugins/*/foo/bar". + /// exactly the same as it was requested by user, for example "@/ROUTER_ID/router/plugins/PLUGIN_NAME/some/plugin/info" or "@/*/router/plugins/*/foo/bar". /// But the plugin's [RunningPluginTrait::adminspace_getter] is called only if the key_expr matches the `plugin_status_key` - /// * `plugin_status_key`: the actual path to plugin's status in the admin space. For example "@/router/ROUTER_ID/plugins/PLUGIN_NAME" + /// * `plugin_status_key`: the actual path to plugin's status in the admin space. For example "@/ROUTER_ID/router/plugins/PLUGIN_NAME" /// Returns value: /// * `Ok(Vec)`: the list of responses to the query. For example if plugins can return information on subleys "foo", "bar", "foo/buzz" and "bar/buzz" - /// and it's requested with the query "@/router/ROUTER_ID/plugins/PLUGIN_NAME/*", it should return only information on "foo" and "bar" subkeys, but not on "foo/buzz" and "bar/buzz" + /// and it's requested with the query "@/ROUTER_ID/router/plugins/PLUGIN_NAME/*", it should return only information on "foo" and "bar" subkeys, but not on "foo/buzz" and "bar/buzz" /// as they doesn't match the query. /// * `Err(ZError)`: Problem occurred when processing the query. /// diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index e2dad5c844..d3e96b650f 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -152,19 +152,19 @@ impl AdminSpace { let zid_str = runtime.state.zid.to_string(); let whatami_str = runtime.state.whatami.to_str(); let mut config = runtime.config().lock(); - let root_key: OwnedKeyExpr = format!("@/{whatami_str}/{zid_str}").try_into().unwrap(); + let root_key: OwnedKeyExpr = format!("@/{zid_str}/{whatami_str}").try_into().unwrap(); let mut handlers: HashMap<_, Handler> = HashMap::new(); handlers.insert(root_key.clone(), Arc::new(local_data)); handlers.insert( - format!("@/{whatami_str}/{zid_str}/metrics") + format!("@/{zid_str}/{whatami_str}/metrics") .try_into() .unwrap(), Arc::new(metrics), ); if runtime.state.whatami == WhatAmI::Router { handlers.insert( - format!("@/{whatami_str}/{zid_str}/linkstate/routers") + format!("@/{zid_str}/{whatami_str}/linkstate/routers") .try_into() .unwrap(), Arc::new(routers_linkstate_data), @@ -174,20 +174,20 @@ impl AdminSpace { && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate" { handlers.insert( - format!("@/{whatami_str}/{zid_str}/linkstate/peers") + format!("@/{zid_str}/{whatami_str}/linkstate/peers") .try_into() .unwrap(), Arc::new(peers_linkstate_data), ); } handlers.insert( - format!("@/{whatami_str}/{zid_str}/subscriber/**") + format!("@/{zid_str}/{whatami_str}/subscriber/**") .try_into() .unwrap(), Arc::new(subscribers_data), ); handlers.insert( - format!("@/{whatami_str}/{zid_str}/queryable/**") + format!("@/{zid_str}/{whatami_str}/queryable/**") .try_into() .unwrap(), Arc::new(queryables_data), @@ -195,7 +195,7 @@ impl AdminSpace { #[cfg(feature = "plugins")] handlers.insert( - format!("@/{whatami_str}/{zid_str}/plugins/**") + format!("@/{zid_str}/{whatami_str}/plugins/**") .try_into() .unwrap(), Arc::new(plugins_data), @@ -203,7 +203,7 @@ impl AdminSpace { #[cfg(feature = "plugins")] handlers.insert( - format!("@/{whatami_str}/{zid_str}/status/plugins/**") + format!("@/{zid_str}/{whatami_str}/status/plugins/**") .try_into() .unwrap(), Arc::new(plugins_status), @@ -381,24 +381,24 @@ impl Primitives for AdminSpace { if let Some(key) = msg.wire_expr.as_str().strip_prefix(&format!( "@/{}/{}/config/", - self.context.runtime.state.whatami, self.context.runtime.state.zid + self.context.runtime.state.zid, self.context.runtime.state.whatami, )) { match msg.payload { PushBody::Put(put) => match std::str::from_utf8(&put.payload.contiguous()) { Ok(json) => { tracing::trace!( - "Insert conf value /@/{}/{}/config/{} : {}", - self.context.runtime.state.whatami, + "Insert conf value @/{}/{}/config/{} : {}", self.context.runtime.state.zid, + self.context.runtime.state.whatami, key, json ); if let Err(e) = (&self.context.runtime.state.config).insert_json5(key, json) { error!( - "Error inserting conf value /@/{}/{}/config/{} : {} - {}", - self.context.runtime.state.whatami, + "Error inserting conf value @/{}/{}/config/{} : {} - {}", self.context.runtime.state.zid, + self.context.runtime.state.whatami, key, json, e @@ -406,15 +406,15 @@ impl Primitives for AdminSpace { } } Err(e) => error!( - "Received non utf8 conf value on /@/{}/{}/config/{} : {}", - self.context.runtime.state.whatami, self.context.runtime.state.zid, key, e + "Received non utf8 conf value on @/{}/{}/config/{} : {}", + self.context.runtime.state.zid, self.context.runtime.state.whatami, key, e ), }, PushBody::Del(_) => { tracing::trace!( "Deleting conf value /@/{}/{}/config/{}", - self.context.runtime.state.whatami, self.context.runtime.state.zid, + self.context.runtime.state.whatami, key ); if let Err(e) = self.context.runtime.state.config.remove(key) { @@ -534,7 +534,7 @@ impl crate::net::primitives::EPrimitives for AdminSpace { fn local_data(context: &AdminContext, query: Query) { let reply_key: OwnedKeyExpr = format!( "@/{}/{}", - context.runtime.state.whatami, context.runtime.state.zid + context.runtime.state.zid, context.runtime.state.whatami ) .try_into() .unwrap(); @@ -638,7 +638,7 @@ fn local_data(context: &AdminContext, query: Query) { fn metrics(context: &AdminContext, query: Query) { let reply_key: OwnedKeyExpr = format!( "@/{}/{}/metrics", - context.runtime.state.whatami, context.runtime.state.zid + context.runtime.state.zid, context.runtime.state.whatami ) .try_into() .unwrap(); @@ -673,7 +673,7 @@ zenoh_build{{version="{}"}} 1 fn routers_linkstate_data(context: &AdminContext, query: Query) { let reply_key: OwnedKeyExpr = format!( "@/{}/{}/linkstate/routers", - context.runtime.state.whatami, context.runtime.state.zid + context.runtime.state.zid, context.runtime.state.whatami ) .try_into() .unwrap(); @@ -692,7 +692,7 @@ fn routers_linkstate_data(context: &AdminContext, query: Query) { fn peers_linkstate_data(context: &AdminContext, query: Query) { let reply_key: OwnedKeyExpr = format!( "@/{}/{}/linkstate/peers", - context.runtime.state.whatami, context.runtime.state.zid + context.runtime.state.zid, context.runtime.state.whatami ) .try_into() .unwrap(); @@ -713,8 +713,8 @@ fn subscribers_data(context: &AdminContext, query: Query) { for sub in tables.hat_code.get_subscriptions(&tables) { let key = KeyExpr::try_from(format!( "@/{}/{}/subscriber/{}", - context.runtime.state.whatami, context.runtime.state.zid, + context.runtime.state.whatami, sub.0.expr() )) .unwrap(); @@ -737,8 +737,8 @@ fn queryables_data(context: &AdminContext, query: Query) { for qabl in tables.hat_code.get_queryables(&tables) { let key = KeyExpr::try_from(format!( "@/{}/{}/queryable/{}", - context.runtime.state.whatami, context.runtime.state.zid, + context.runtime.state.whatami, qabl.0.expr() )) .unwrap(); @@ -761,7 +761,7 @@ fn plugins_data(context: &AdminContext, query: Query) { let guard = context.runtime.plugins_manager(); let root_key = format!( "@/{}/{}/plugins", - context.runtime.state.whatami, &context.runtime.state.zid + &context.runtime.state.zid, context.runtime.state.whatami ); let root_key = unsafe { keyexpr::from_str_unchecked(&root_key) }; tracing::debug!("requested plugins status {:?}", query.key_expr()); @@ -793,7 +793,7 @@ fn plugins_status(context: &AdminContext, query: Query) { let guard = context.runtime.plugins_manager(); let mut root_key = format!( "@/{}/{}/status/plugins/", - context.runtime.state.whatami, &context.runtime.state.zid + &context.runtime.state.zid, context.runtime.state.whatami ); for plugin in guard.started_plugins_iter() { diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index c6931f1c2c..11a6e18b53 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -56,10 +56,10 @@ async fn zenoh_events() { let session = open_session(&["tcp/127.0.0.1:18447"], &[]).await; let zid = session.zid(); let sub1 = - ztimeout!(session.declare_subscriber(format!("@/session/{zid}/transport/unicast/*"))) + ztimeout!(session.declare_subscriber(format!("@/{zid}/session/transport/unicast/*"))) .unwrap(); let sub2 = ztimeout!( - session.declare_subscriber(format!("@/session/{zid}/transport/unicast/*/link/*")) + session.declare_subscriber(format!("@/{zid}/session/transport/unicast/*/link/*")) ) .unwrap(); @@ -69,47 +69,47 @@ async fn zenoh_events() { let sample = ztimeout!(sub1.recv_async()); assert!(sample.is_ok()); let key_expr = sample.as_ref().unwrap().key_expr().as_str(); - assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); + assert!(key_expr.eq(&format!("@/{zid}/session/transport/unicast/{zid2}"))); assert!(sample.as_ref().unwrap().kind() == SampleKind::Put); let sample = ztimeout!(sub2.recv_async()); assert!(sample.is_ok()); let key_expr = sample.as_ref().unwrap().key_expr().as_str(); - assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); + assert!(key_expr.starts_with(&format!("@/{zid}/session/transport/unicast/{zid2}/link/"))); assert!(sample.as_ref().unwrap().kind() == SampleKind::Put); let replies: Vec = - ztimeout!(session.get(format!("@/session/{zid}/transport/unicast/*"))) + ztimeout!(session.get(format!("@/{zid}/session/transport/unicast/*"))) .unwrap() .into_iter() .collect(); assert!(replies.len() == 1); assert!(replies[0].result().is_ok()); let key_expr = replies[0].result().unwrap().key_expr().as_str(); - assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); + assert!(key_expr.eq(&format!("@/{zid}/session/transport/unicast/{zid2}"))); let replies: Vec = - ztimeout!(session.get(format!("@/session/{zid}/transport/unicast/*/link/*"))) + ztimeout!(session.get(format!("@/{zid}/session/transport/unicast/*/link/*"))) .unwrap() .into_iter() .collect(); assert!(replies.len() == 1); assert!(replies[0].result().is_ok()); let key_expr = replies[0].result().unwrap().key_expr().as_str(); - assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); + assert!(key_expr.starts_with(&format!("@/{zid}/session/transport/unicast/{zid2}/link/"))); close_session(session2).await; let sample = ztimeout!(sub1.recv_async()); assert!(sample.is_ok()); let key_expr = sample.as_ref().unwrap().key_expr().as_str(); - assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); + assert!(key_expr.eq(&format!("@/{zid}/session/transport/unicast/{zid2}"))); assert!(sample.as_ref().unwrap().kind() == SampleKind::Delete); let sample = ztimeout!(sub2.recv_async()); assert!(sample.is_ok()); let key_expr = sample.as_ref().unwrap().key_expr().as_str(); - assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); + assert!(key_expr.starts_with(&format!("@/{zid}/session/transport/unicast/{zid2}/link/"))); assert!(sample.as_ref().unwrap().kind() == SampleKind::Delete); ztimeout!(sub2.undeclare()).unwrap(); From b2f88ae9e8d7776a68f74bfc642ae16994af44bf Mon Sep 17 00:00:00 2001 From: yellowhatter <104833606+yellowhatter@users.noreply.github.com> Date: Mon, 22 Jul 2024 15:26:53 +0300 Subject: [PATCH 541/598] Do not require mandatory "unstable" for "internal" feature (#1250) --- zenoh/src/lib.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 024c1303af..86fb9e918f 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -364,10 +364,6 @@ pub mod config { pub use zenoh_config::*; } -#[cfg(all(feature = "internal", not(feature = "unstable")))] -compile_error!( - "All internal functionality is unstable. The `unstable` feature must be enabled to use `internal`." -); #[cfg(all( feature = "plugins", not(all(feature = "unstable", feature = "internal")) From c58fcdc67b0e5d0454ba09fe7f86ae4ded851e66 Mon Sep 17 00:00:00 2001 From: yellowhatter <104833606+yellowhatter@users.noreply.github.com> Date: Mon, 22 Jul 2024 17:59:44 +0300 Subject: [PATCH 542/598] Fix unstable marker atachment (#1251) * attachment API for Sample is not unstable * fix SampleBuilderTrait import * fix clippy * fix clippy --- zenoh/src/api/query.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index f1807333c7..2a1016db5f 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -27,11 +27,6 @@ use zenoh_protocol::core::ZenohIdProto; use zenoh_protocol::core::{CongestionControl, Parameters}; use zenoh_result::ZResult; -#[cfg(feature = "unstable")] -use super::{ - builders::sample::SampleBuilderTrait, bytes::OptionZBytes, sample::SourceInfo, - selector::ZenohParameters, -}; use super::{ builders::sample::{EncodingBuilderTrait, QoSBuilderTrait}, bytes::ZBytes, @@ -44,6 +39,9 @@ use super::{ session::Session, value::Value, }; +#[cfg(feature = "unstable")] +use super::{sample::SourceInfo, selector::ZenohParameters}; +use crate::{bytes::OptionZBytes, sample::SampleBuilderTrait}; /// The [`Queryable`](crate::query::Queryable)s that should be target of a [`get`](Session::get). pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; @@ -211,9 +209,8 @@ pub struct SessionGetBuilder<'a, 'b, Handler> { pub(crate) source_info: SourceInfo, } -#[zenoh_macros::unstable] impl SampleBuilderTrait for SessionGetBuilder<'_, '_, Handler> { - #[cfg(feature = "unstable")] + #[zenoh_macros::unstable] fn source_info(self, source_info: SourceInfo) -> Self { Self { source_info, From 9408f744c3ba8504c88e5e9b427ccf9173c35551 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Tue, 23 Jul 2024 14:10:40 +0200 Subject: [PATCH 543/598] fix: fix canonization not updating `&mut str` length (#1254) --- commons/zenoh-keyexpr/src/key_expr/canon.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/commons/zenoh-keyexpr/src/key_expr/canon.rs b/commons/zenoh-keyexpr/src/key_expr/canon.rs index 8187467004..a8950b6d0c 100644 --- a/commons/zenoh-keyexpr/src/key_expr/canon.rs +++ b/commons/zenoh-keyexpr/src/key_expr/canon.rs @@ -98,6 +98,7 @@ impl Canonize for &mut str { let bytes = unsafe { self.as_bytes_mut() }; let length = canonize(bytes); bytes[length..].fill(b'\0'); + *self = &mut core::mem::take(self)[..length]; } } @@ -166,6 +167,8 @@ fn canonizer() { // &mut str remaining part is zeroed let mut s = String::from("$*$*$*/hello/$*$*/bye/$*$*"); - s.as_mut_str().canonize(); + let mut s_mut = s.as_mut_str(); + s_mut.canonize(); + assert_eq!(s_mut, "*/hello/*/bye/*"); assert_eq!(s, "*/hello/*/bye/*\0\0\0\0\0\0\0\0\0\0\0"); } From cf4d3d37d2480490e1d1f63721e84baf6c203ca6 Mon Sep 17 00:00:00 2001 From: "ChenYing Kuo (CY)" Date: Tue, 23 Jul 2024 23:42:53 +0800 Subject: [PATCH 544/598] Support bytes::Bytes for ZBytes (#1248) * Support bytes::Bytes for ZBytes. Signed-off-by: ChenYing Kuo * Avoid Bytes copy in ZBytes serialization --------- Signed-off-by: ChenYing Kuo Co-authored-by: Luca Cominardi --- Cargo.lock | 5 +-- Cargo.toml | 1 + zenoh/Cargo.toml | 1 + zenoh/src/api/bytes.rs | 81 +++++++++++++++++++++++++++++++++++++++++- 4 files changed, 85 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index afed03cf2e..d9eae88f3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -648,9 +648,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952" [[package]] name = "cache-padded" @@ -5382,6 +5382,7 @@ dependencies = [ "ahash", "async-trait", "base64 0.22.1", + "bytes", "event-listener 5.3.1", "flume", "form_urlencoded", diff --git a/Cargo.toml b/Cargo.toml index a3a370971b..b1d5f4bf37 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,6 +85,7 @@ async-std = { version = "=1.12.0", default-features = false } # Default features async-trait = "0.1.60" base64 = "0.22.1" bincode = "1.3.3" +bytes = "1.6.1" clap = { version = "4.4.11", features = ["derive"] } console-subscriber = "0.3.0" const_format = "0.2.30" diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 7961c787eb..d0ac151c01 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -71,6 +71,7 @@ tokio-util = { workspace = true } ahash = { workspace = true } async-trait = { workspace = true } base64 = { workspace = true } +bytes = { workspace = true } event-listener = { workspace = true } flume = { workspace = true } form_urlencoded = { workspace = true } diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 572ac16cab..1a0935f846 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -24,7 +24,7 @@ use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, reader::{DidntRead, HasReader, Reader}, writer::HasWriter, - ZBuf, ZBufReader, ZBufWriter, ZSlice, + ZBuf, ZBufReader, ZBufWriter, ZSlice, ZSliceBuffer, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{ @@ -2102,6 +2102,81 @@ impl TryFrom<&mut ZBytes> for serde_pickle::Value { } } +// bytes::Bytes + +// Define a transparent wrapper type to get around Rust's orphan rule. +// This allows to use bytes::Bytes directly as supporting buffer of a +// ZSlice resulting in zero-copy and zero-alloc bytes::Bytes serialization. +#[repr(transparent)] +#[derive(Debug)] +struct BytesWrap(bytes::Bytes); + +impl ZSliceBuffer for BytesWrap { + fn as_slice(&self) -> &[u8] { + &self.0 + } + + fn as_any(&self) -> &dyn std::any::Any { + self + } + + fn as_any_mut(&mut self) -> &mut dyn std::any::Any { + self + } +} + +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: bytes::Bytes) -> Self::Output { + ZBytes::new(BytesWrap(s)) + } +} + +impl From for ZBytes { + fn from(t: bytes::Bytes) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = Infallible; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + // bytes::Bytes can be constructed only by passing ownership to the constructor. + // Thereofore, here we are forced to allocate a vector and copy the whole ZBytes + // content since bytes::Bytes does not support anything else than Box (and its + // variants like Vec and String). + let v: Vec = ZSerde.deserialize(v).unwrap_infallible(); + Ok(bytes::Bytes::from(v)) + } +} + +impl TryFrom for bytes::Bytes { + type Error = Infallible; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for bytes::Bytes { + type Error = Infallible; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for bytes::Bytes { + type Error = Infallible; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Shared memory conversion #[cfg(feature = "shared-memory")] impl Serialize for ZSerde { @@ -3168,6 +3243,10 @@ mod tests { serialize_deserialize!(Parameters, Parameters::from("")); serialize_deserialize!(Parameters, Parameters::from("a=1;b=2;c3")); + // Bytes + serialize_deserialize!(bytes::Bytes, bytes::Bytes::from(vec![1, 2, 3, 4])); + serialize_deserialize!(bytes::Bytes, bytes::Bytes::from("Hello World")); + // Tuple serialize_deserialize!((usize, usize), (0, 1)); serialize_deserialize!((usize, String), (0, String::from("a"))); From cb9fc8a59f7e3f92d7396c773935eb59743bc178 Mon Sep 17 00:00:00 2001 From: oteffahi <70609372+oteffahi@users.noreply.github.com> Date: Tue, 23 Jul 2024 17:58:51 +0200 Subject: [PATCH 545/598] Update ACL config format to support AND/OR logic between subjects (#1200) * Add trie dependency * Start replacing subject HashMap with TrieMap * Complete SubjectTrieMap implementation * Add new ACL config schema to zenoh config * Add new ACL config parsing logic * Fix empty subject lists edge-case for cartesian product of subjects * Format code, apply clippy suggestions * Fix edge-case where a subject-combination is repeated in config * Update new transport ACL logic with subject-combinations support * Make ACL config lists mandatory when ACL config is enabled * Update ACL tests * Update authentication tests * Break ACL and authentication test into multiple tests that can run concurrently * Fix entry_id value in error message * Add policy entry rules/subjects id validation * Update DEFAULT_CONFIG * Fix missing port number in test client config * Add ACL subject combination tests * Empty commit to trigger CI * Fix unsoundness in `SubjectMap` This replaces the trie data structure with a vector and allows querying any combination of subject properties in any order. Moreover, undefined subject properties are now always interpreted as wildcards. * Address review comments from original pull request * Fix typos * Fix clippy errors * Minor edits * Check for empty subject attributes * Rename ACL config field actions to messages * Rename ACL config field policy to policies * Update DEFAULT_CONFIG * Update ACL tests config * Add warning when applying ACL on transport with multiple interfaces * Improve ACL subject logs * Improve ACL no matching subject log * Separate empty ACL config logs * Replace unwrap with expect * Fix unmodified copy/pasted code * Rename ACL config message 'get' to 'query' * Rename ACL 'get' to 'query' in DEFAULT_CONFIG * Rename 'get' to 'query' in tests --------- Co-authored-by: Mahmoud Mazouz --- Cargo.lock | 18 +- Cargo.toml | 1 + DEFAULT_CONFIG.json5 | 77 +- commons/zenoh-config/src/defaults.rs | 2 + commons/zenoh-config/src/lib.rs | 70 +- zenoh/Cargo.toml | 1 + .../net/routing/interceptor/access_control.rs | 190 +++-- .../net/routing/interceptor/authorization.rs | 511 +++++++++---- zenoh/tests/acl.rs | 345 +++++---- zenoh/tests/authentication.rs | 679 ++++++++++++++++-- 10 files changed, 1422 insertions(+), 472 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d9eae88f3e..04348c0dde 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -942,7 +942,7 @@ dependencies = [ "clap", "criterion-plot", "is-terminal", - "itertools", + "itertools 0.10.5", "num-traits", "once_cell", "oorandom", @@ -963,7 +963,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools", + "itertools 0.10.5", ] [[package]] @@ -2015,6 +2015,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.9" @@ -2992,7 +3001,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", @@ -3005,7 +3014,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2", "quote", "syn 2.0.52", @@ -5388,6 +5397,7 @@ dependencies = [ "form_urlencoded", "futures", "git-version", + "itertools 0.13.0", "lazy_static", "once_cell", "ordered-float", diff --git a/Cargo.toml b/Cargo.toml index b1d5f4bf37..254cdc19b9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -107,6 +107,7 @@ hmac = { version = "0.12.1", features = ["std"] } home = "0.5.4" http-types = "2.12.0" humantime = "2.1.0" +itertools = "0.13.0" json5 = "0.4.1" jsonschema = { version = "0.18.0", default-features = false } keyed-set = "1.0.0" diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index f6e10f77ca..33c9b3acdd 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -186,27 +186,52 @@ // }, // ], - // /// configure access control (ACL) rules + // /// Configure access control (ACL) rules // access_control: { - // ///[true/false] acl will be activated only if this is set to true + // /// [true/false] acl will be activated only if this is set to true // "enabled": false, - // ///[deny/allow] default permission is deny (even if this is left empty or not specified) + // /// [deny/allow] default permission is deny (even if this is left empty or not specified) // "default_permission": "deny", - // ///rule set for permissions allowing or denying access to key-expressions + // /// Rule set for permissions allowing or denying access to key-expressions // "rules": // [ // { - // "actions": [ - // "put", "get", "declare_subscriber", "declare_queryable" + // /// Id has to be unique within the rule set + // "id": "rule1", + // "messages": [ + // "put", "query", "declare_subscriber", "declare_queryable" // ], // "flows":["egress","ingress"], // "permission": "allow", // "key_exprs": [ // "test/demo" // ], + // }, + // { + // "id": "rule2", + // "messages": [ + // "put", "query", "declare_subscriber", "declare_queryable" + // ], + // "flows":["ingress"], + // "permission": "allow", + // "key_exprs": [ + // "**" + // ], + // }, + // ], + // /// List of combinations of subjects. + // /// + // /// If a subject property (i.e. username, certificate common name or interface) is empty + // /// it is interpreted as a wildcard. Moreover, a subject property cannot be an empty list. + // "subjects": + // [ + // { + // /// Id has to be unique within the subjects list + // "id": "subject1", // /// Subjects can be interfaces // "interfaces": [ - // "lo0" + // "lo0", + // "en0", // ], // /// Subjects can be cert_common_names when using TLS or Quic // "cert_common_names": [ @@ -215,9 +240,43 @@ // /// Subjects can be usernames when using user/password authentication // "usernames": [ // "zenoh-example" - // ] + // ], + // /// This instance translates internally to this filter: + // /// (interface="lo0" && cert_common_name="example.zenoh.io" && username="zenoh-example") || + // /// (interface="en0" && cert_common_name="example.zenoh.io" && username="zenoh-example") // }, - // ] + // { + // "id": "subject2", + // "interfaces": [ + // "lo0", + // "en0", + // ], + // "cert_common_names": [ + // "example2.zenoh.io" + // ], + // /// This instance translates internally to this filter: + // /// (interface="lo0" && cert_common_name="example2.zenoh.io") || + // /// (interface="en0" && cert_common_name="example2.zenoh.io") + // }, + // { + // "id": "subject3", + // /// An empty subject combination is a wildcard + // }, + // ], + // /// The policies list associates rules to subjects + // "policies": + // [ + // /// Each policy associates one or multiple rules to one or multiple subject combinations + // { + // /// Rules and Subjects are identified with their unique IDs declared above + // "rules": ["rule1"], + // "subjects": ["subject1", "subject2"], + // }, + // { + // "rules": ["rule2"], + // "subjects": ["subject3"], + // }, + // ] //}, /// Configure internal transport parameters diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index bbb03a7eff..810e0931e2 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -257,6 +257,8 @@ impl Default for AclConfig { enabled: false, default_permission: Permission::Deny, rules: None, + subjects: None, + policies: None, } } } diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index fec1a1cf8d..270cf950c3 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -104,40 +104,70 @@ pub struct DownsamplingItemConf { } #[derive(Serialize, Debug, Deserialize, Clone)] -pub struct AclConfigRules { - pub interfaces: Option>, - pub cert_common_names: Option>, - pub usernames: Option>, +pub struct AclConfigRule { + pub id: String, pub key_exprs: Vec, - pub actions: Vec, + pub messages: Vec, pub flows: Option>, pub permission: Permission, } +#[derive(Serialize, Debug, Deserialize, Clone)] +pub struct AclConfigSubjects { + pub id: String, + pub interfaces: Option>, + pub cert_common_names: Option>, + pub usernames: Option>, +} + +#[derive(Serialize, Debug, Deserialize, Clone, PartialEq, Eq, Hash)] +pub struct Interface(pub String); + +impl std::fmt::Display for Interface { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Interface({})", self.0) + } +} + +#[derive(Serialize, Debug, Deserialize, Clone, PartialEq, Eq, Hash)] +pub struct CertCommonName(pub String); + +impl std::fmt::Display for CertCommonName { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "CertCommonName({})", self.0) + } +} + +#[derive(Serialize, Debug, Deserialize, Clone, PartialEq, Eq, Hash)] +pub struct Username(pub String); + +impl std::fmt::Display for Username { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Username({})", self.0) + } +} + +#[derive(Serialize, Debug, Deserialize, Clone, PartialEq, Eq, Hash)] +pub struct AclConfigPolicyEntry { + pub rules: Vec, + pub subjects: Vec, +} + #[derive(Clone, Serialize, Debug, Deserialize)] pub struct PolicyRule { - pub subject: Subject, + pub subject_id: usize, pub key_expr: String, - pub action: Action, + pub message: AclMessage, pub permission: Permission, pub flow: InterceptorFlow, } -#[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] -#[serde(untagged)] -#[serde(rename_all = "snake_case")] -pub enum Subject { - Interface(String), - CertCommonName(String), - Username(String), -} - #[derive(Clone, Copy, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] #[serde(rename_all = "snake_case")] -pub enum Action { +pub enum AclMessage { Put, DeclareSubscriber, - Get, + Query, DeclareQueryable, } @@ -505,7 +535,9 @@ validated_struct::validator! { pub access_control: AclConfig { pub enabled: bool, pub default_permission: Permission, - pub rules: Option> + pub rules: Option>, + pub subjects: Option>, + pub policies: Option>, }, /// A list of directories where plugins may be searched for if no `__path__` was specified for them. diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index d0ac151c01..605efd16a0 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -77,6 +77,7 @@ flume = { workspace = true } form_urlencoded = { workspace = true } futures = { workspace = true } git-version = { workspace = true } +itertools = { workspace = true } lazy_static = { workspace = true } tracing = { workspace = true } ordered-float = { workspace = true } diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 1e95104967..9e749e1258 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -18,9 +18,12 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use std::{any::Any, sync::Arc}; +use std::{any::Any, collections::HashSet, iter, sync::Arc}; -use zenoh_config::{AclConfig, Action, InterceptorFlow, Permission, Subject}; +use itertools::Itertools; +use zenoh_config::{ + AclConfig, AclMessage, CertCommonName, InterceptorFlow, Interface, Permission, Username, +}; use zenoh_protocol::{ core::ZenohIdProto, network::{Declare, DeclareBody, NetworkBody, NetworkMessage, Push, Request}, @@ -36,11 +39,14 @@ use super::{ authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, InterceptorFactoryTrait, InterceptorTrait, }; -use crate::{api::key_expr::KeyExpr, net::routing::RoutingContext}; +use crate::{ + api::key_expr::KeyExpr, + net::routing::{interceptor::authorization::SubjectQuery, RoutingContext}, +}; pub struct AclEnforcer { enforcer: Arc, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct AuthSubject { id: usize, name: String, @@ -86,73 +92,112 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, transport: &TransportUnicast, ) -> (Option, Option) { - let mut authn_ids = vec![]; - if let Ok(ids) = transport.get_auth_ids() { - for auth_id in ids { - match auth_id { - AuthId::CertCommonName(name) => { - let subject = &Subject::CertCommonName(name.clone()); - if let Some(val) = self.enforcer.subject_map.get(subject) { - authn_ids.push(AuthSubject { id: *val, name }); - } - } - AuthId::Username(name) => { - let subject = &Subject::Username(name.clone()); - if let Some(val) = self.enforcer.subject_map.get(subject) { - authn_ids.push(AuthSubject { id: *val, name }); - } - } - AuthId::None => {} - } + let auth_ids = match transport.get_auth_ids() { + Ok(auth_ids) => auth_ids, + Err(err) => { + tracing::error!("Couldn't get Transport Auth IDs: {}", err); + return (None, None); } - } - match transport.get_zid() { - Ok(zid) => { - match transport.get_links() { - Ok(links) => { - for link in links { - for face in link.interfaces { - let subject = &Subject::Interface(face.clone()); - if let Some(val) = self.enforcer.subject_map.get(subject) { - authn_ids.push(AuthSubject { - id: *val, - name: face, - }); - } - } - } - } - Err(e) => { - tracing::error!("Couldn't get interface list with error: {}", e); + }; + + let mut cert_common_names = Vec::new(); + let mut username = None; + + for auth_id in auth_ids { + match auth_id { + AuthId::CertCommonName(value) => { + cert_common_names.push(Some(CertCommonName(value))); + } + AuthId::Username(value) => { + if username.is_some() { + tracing::error!("Transport should not report more than one username"); return (None, None); } + username = Some(Username(value)); } - let ingress_interceptor = Box::new(IngressAclEnforcer { - policy_enforcer: self.enforcer.clone(), - zid, - subject: authn_ids.clone(), - }); - let egress_interceptor = Box::new(EgressAclEnforcer { - policy_enforcer: self.enforcer.clone(), - zid, - subject: authn_ids, + AuthId::None => {} + } + } + if cert_common_names.is_empty() { + cert_common_names.push(None); + } + + let links = match transport.get_links() { + Ok(links) => links, + Err(err) => { + tracing::error!("Couldn't get Transport links: {}", err); + return (None, None); + } + }; + let mut interfaces = links + .into_iter() + .flat_map(|link| { + link.interfaces + .into_iter() + .map(|interface| Some(Interface(interface))) + }) + .collect::>(); + if interfaces.is_empty() { + interfaces.push(None); + } else if interfaces.len() > 1 { + tracing::warn!("Transport returned multiple network interfaces, current ACL logic might incorrectly apply filters in this case!"); + } + + let mut auth_subjects = HashSet::new(); + + for ((username, interface), cert_common_name) in iter::once(username) + .cartesian_product(interfaces.into_iter()) + .cartesian_product(cert_common_names.into_iter()) + { + let query = SubjectQuery { + interface, + cert_common_name, + username, + }; + + if let Some(entry) = self.enforcer.subject_store.query(&query) { + auth_subjects.insert(AuthSubject { + id: entry.id, + name: format!("{query}"), }); - ( - self.enforcer - .interface_enabled - .ingress - .then_some(ingress_interceptor), - self.enforcer - .interface_enabled - .egress - .then_some(egress_interceptor), - ) } - Err(e) => { - tracing::error!("Failed to get zid with error :{}", e); - (None, None) + } + + let zid = match transport.get_zid() { + Ok(zid) => zid, + Err(err) => { + tracing::error!("Couldn't get Transport zid: {}", err); + return (None, None); } + }; + // FIXME: Investigate if `AuthSubject` can have duplicates above and try to avoid this conversion + let auth_subjects = auth_subjects.into_iter().collect::>(); + if auth_subjects.is_empty() { + tracing::info!( + "{zid} did not match any configured ACL subject. Default permission `{:?}` will be applied on all messages", + self.enforcer.default_permission + ); } + let ingress_interceptor = Box::new(IngressAclEnforcer { + policy_enforcer: self.enforcer.clone(), + zid, + subject: auth_subjects.clone(), + }); + let egress_interceptor = Box::new(EgressAclEnforcer { + policy_enforcer: self.enforcer.clone(), + zid, + subject: auth_subjects, + }); + ( + self.enforcer + .interface_enabled + .ingress + .then_some(ingress_interceptor), + self.enforcer + .interface_enabled + .egress + .then_some(egress_interceptor), + ) } fn new_transport_multicast( @@ -194,7 +239,7 @@ impl InterceptorTrait for IngressAclEnforcer { payload: PushBody::Put(_), .. }) => { - if self.action(Action::Put, "Put (ingress)", key_expr?) == Permission::Deny { + if self.action(AclMessage::Put, "Put (ingress)", key_expr?) == Permission::Deny { return None; } } @@ -202,7 +247,8 @@ impl InterceptorTrait for IngressAclEnforcer { payload: RequestBody::Query(_), .. }) => { - if self.action(Action::Get, "Get (ingress)", key_expr?) == Permission::Deny { + if self.action(AclMessage::Query, "Query (ingress)", key_expr?) == Permission::Deny + { return None; } } @@ -211,7 +257,7 @@ impl InterceptorTrait for IngressAclEnforcer { .. }) => { if self.action( - Action::DeclareSubscriber, + AclMessage::DeclareSubscriber, "Declare Subscriber (ingress)", key_expr?, ) == Permission::Deny @@ -224,7 +270,7 @@ impl InterceptorTrait for IngressAclEnforcer { .. }) => { if self.action( - Action::DeclareQueryable, + AclMessage::DeclareQueryable, "Declare Queryable (ingress)", key_expr?, ) == Permission::Deny @@ -263,7 +309,7 @@ impl InterceptorTrait for EgressAclEnforcer { payload: PushBody::Put(_), .. }) => { - if self.action(Action::Put, "Put (egress)", key_expr?) == Permission::Deny { + if self.action(AclMessage::Put, "Put (egress)", key_expr?) == Permission::Deny { return None; } } @@ -271,7 +317,7 @@ impl InterceptorTrait for EgressAclEnforcer { payload: RequestBody::Query(_), .. }) => { - if self.action(Action::Get, "Get (egress)", key_expr?) == Permission::Deny { + if self.action(AclMessage::Query, "Query (egress)", key_expr?) == Permission::Deny { return None; } } @@ -280,7 +326,7 @@ impl InterceptorTrait for EgressAclEnforcer { .. }) => { if self.action( - Action::DeclareSubscriber, + AclMessage::DeclareSubscriber, "Declare Subscriber (egress)", key_expr?, ) == Permission::Deny @@ -293,7 +339,7 @@ impl InterceptorTrait for EgressAclEnforcer { .. }) => { if self.action( - Action::DeclareQueryable, + AclMessage::DeclareQueryable, "Declare Queryable (egress)", key_expr?, ) == Permission::Deny @@ -311,7 +357,7 @@ pub trait AclActionMethods { fn zid(&self) -> ZenohIdProto; fn flow(&self) -> InterceptorFlow; fn authn_ids(&self) -> Vec; - fn action(&self, action: Action, log_msg: &str, key_expr: &str) -> Permission { + fn action(&self, action: AclMessage, log_msg: &str, key_expr: &str) -> Permission { let policy_enforcer = self.policy_enforcer(); let authn_ids: Vec = self.authn_ids(); let zid = self.zid(); diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 283a02248b..8b8789fc3b 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -17,22 +17,146 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use std::{collections::HashMap, net::Ipv4Addr}; +use std::collections::HashMap; use ahash::RandomState; +use itertools::Itertools; use zenoh_config::{ - AclConfig, AclConfigRules, Action, InterceptorFlow, Permission, PolicyRule, Subject, + AclConfig, AclConfigPolicyEntry, AclConfigRule, AclConfigSubjects, AclMessage, CertCommonName, + InterceptorFlow, Interface, Permission, PolicyRule, Username, }; use zenoh_keyexpr::{ keyexpr, keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}, }; use zenoh_result::ZResult; -use zenoh_util::net::get_interface_names_by_addr; type PolicyForSubject = FlowPolicy; type PolicyMap = HashMap; -type SubjectMap = HashMap; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) struct Subject { + pub(crate) interface: SubjectProperty, + pub(crate) cert_common_name: SubjectProperty, + pub(crate) username: SubjectProperty, +} + +impl Subject { + fn matches(&self, query: &SubjectQuery) -> bool { + self.interface.matches(query.interface.as_ref()) + && self.username.matches(query.username.as_ref()) + && self + .cert_common_name + .matches(query.cert_common_name.as_ref()) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) enum SubjectProperty { + Wildcard, + Exactly(T), +} + +impl SubjectProperty { + fn matches(&self, other: Option<&T>) -> bool { + match (self, other) { + (SubjectProperty::Wildcard, None) => true, + // NOTE: This match arm is the reason why `SubjectProperty` cannot simply be `Option` + (SubjectProperty::Wildcard, Some(_)) => true, + (SubjectProperty::Exactly(_), None) => false, + (SubjectProperty::Exactly(lhs), Some(rhs)) => lhs == rhs, + } + } +} + +#[derive(Debug)] +pub(crate) struct SubjectQuery { + pub(crate) interface: Option, + pub(crate) cert_common_name: Option, + pub(crate) username: Option, +} + +impl std::fmt::Display for SubjectQuery { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let subject_names = [ + self.interface.as_ref().map(|face| format!("{face}")), + self.cert_common_name.as_ref().map(|ccn| format!("{ccn}")), + self.username.as_ref().map(|username| format!("{username}")), + ]; + write!( + f, + "{}", + subject_names + .iter() + .filter_map(|v| v.as_ref()) + .cloned() + .collect::>() + .join("+") + ) + } +} + +#[derive(Debug, Clone)] +pub(crate) struct SubjectEntry { + pub(crate) subject: Subject, + pub(crate) id: usize, +} + +#[derive(Debug, Clone)] +pub(crate) struct SubjectStore { + inner: Vec, +} + +impl SubjectStore { + pub(crate) fn query(&self, query: &SubjectQuery) -> Option<&SubjectEntry> { + // FIXME: Can this search be better than linear? + self.inner.iter().find(|entry| entry.subject.matches(query)) + } +} + +impl Default for SubjectStore { + fn default() -> Self { + SubjectMapBuilder::new().build() + } +} + +pub(crate) struct SubjectMapBuilder { + builder: HashMap, + id_counter: usize, +} + +impl SubjectMapBuilder { + pub(crate) fn new() -> Self { + Self { + // FIXME: Capacity can be calculated from the length of subject properties in configuration + builder: HashMap::new(), + id_counter: 0, + } + } + + pub(crate) fn build(self) -> SubjectStore { + SubjectStore { + inner: self + .builder + .into_iter() + .map(|(subject, id)| SubjectEntry { subject, id }) + .collect(), + } + } + + /// Assumes subject contains at most one instance of each Subject variant + pub(crate) fn insert_or_get(&mut self, subject: Subject) -> usize { + match self.builder.get(&subject).copied() { + Some(id) => id, + None => { + self.id_counter += 1; + self.builder.insert(subject, self.id_counter); + self.id_counter + } + } + } +} + type KeTreeRule = KeBoxTree; #[derive(Default)] @@ -58,27 +182,27 @@ impl PermissionPolicy { } #[derive(Default)] struct ActionPolicy { - get: PermissionPolicy, + query: PermissionPolicy, put: PermissionPolicy, declare_subscriber: PermissionPolicy, declare_queryable: PermissionPolicy, } impl ActionPolicy { - fn action(&self, action: Action) -> &PermissionPolicy { + fn action(&self, action: AclMessage) -> &PermissionPolicy { match action { - Action::Get => &self.get, - Action::Put => &self.put, - Action::DeclareSubscriber => &self.declare_subscriber, - Action::DeclareQueryable => &self.declare_queryable, + AclMessage::Query => &self.query, + AclMessage::Put => &self.put, + AclMessage::DeclareSubscriber => &self.declare_subscriber, + AclMessage::DeclareQueryable => &self.declare_queryable, } } - fn action_mut(&mut self, action: Action) -> &mut PermissionPolicy { + fn action_mut(&mut self, action: AclMessage) -> &mut PermissionPolicy { match action { - Action::Get => &mut self.get, - Action::Put => &mut self.put, - Action::DeclareSubscriber => &mut self.declare_subscriber, - Action::DeclareQueryable => &mut self.declare_queryable, + AclMessage::Query => &mut self.query, + AclMessage::Put => &mut self.put, + AclMessage::DeclareSubscriber => &mut self.declare_subscriber, + AclMessage::DeclareQueryable => &mut self.declare_queryable, } } } @@ -113,14 +237,14 @@ pub struct InterfaceEnabled { pub struct PolicyEnforcer { pub(crate) acl_enabled: bool, pub(crate) default_permission: Permission, - pub(crate) subject_map: SubjectMap, + pub(crate) subject_store: SubjectStore, pub(crate) policy_map: PolicyMap, pub(crate) interface_enabled: InterfaceEnabled, } #[derive(Debug, Clone)] pub struct PolicyInformation { - subject_map: SubjectMap, + subject_map: SubjectStore, policy_rules: Vec, } @@ -129,7 +253,7 @@ impl PolicyEnforcer { PolicyEnforcer { acl_enabled: true, default_permission: Permission::Deny, - subject_map: SubjectMap::default(), + subject_store: SubjectStore::default(), policy_map: PolicyMap::default(), interface_enabled: InterfaceEnabled::default(), } @@ -143,11 +267,23 @@ impl PolicyEnforcer { self.acl_enabled = mut_acl_config.enabled; self.default_permission = mut_acl_config.default_permission; if self.acl_enabled { - if let Some(mut rules) = mut_acl_config.rules { - if rules.is_empty() { - tracing::warn!("Access control rules are empty in config file"); + if let (Some(mut rules), Some(mut subjects), Some(policies)) = ( + mut_acl_config.rules, + mut_acl_config.subjects, + mut_acl_config.policies, + ) { + if rules.is_empty() || subjects.is_empty() || policies.is_empty() { + rules.is_empty().then(|| { + tracing::warn!("Access control rules list is empty in config file") + }); + subjects.is_empty().then(|| { + tracing::warn!("Access control subjects list is empty in config file") + }); + policies.is_empty().then(|| { + tracing::warn!("Access control policies list is empty in config file") + }); self.policy_map = PolicyMap::default(); - self.subject_map = SubjectMap::default(); + self.subject_store = SubjectStore::default(); if self.default_permission == Permission::Deny { self.interface_enabled = InterfaceEnabled { ingress: true, @@ -156,59 +292,71 @@ impl PolicyEnforcer { } } else { // check for undefined values in rules and initialize them to defaults - for (rule_offset, rule) in rules.iter_mut().enumerate() { - if rule.interfaces.is_none() { - tracing::warn!("ACL config interfaces list is empty. Applying rule #{} to all network interfaces", rule_offset); - rule.interfaces = - Some(get_interface_names_by_addr(Ipv4Addr::UNSPECIFIED.into())?); + for rule in rules.iter_mut() { + if rule.id.trim().is_empty() { + bail!("Found empty rule id in rules list"); } if rule.flows.is_none() { - tracing::warn!("ACL config flows list is empty. Applying rule #{} to both Ingress and Egress flows", rule_offset); + tracing::warn!("Rule '{}' flows list is not set. Setting it to both Ingress and Egress", rule.id); rule.flows = Some([InterceptorFlow::Ingress, InterceptorFlow::Egress].into()); } - if rule.usernames.is_none() { - rule.usernames = Some(Vec::new()); + } + // check for undefined values in subjects and initialize them to defaults + for subject in subjects.iter_mut() { + if subject.id.trim().is_empty() { + bail!("Found empty subject id in subjects list"); + } + + if subject + .cert_common_names + .as_ref() + .is_some_and(Vec::is_empty) + { + bail!("Subject property `cert_common_names` cannot be empty"); + } + + if subject.usernames.as_ref().is_some_and(Vec::is_empty) { + bail!("Subject property `usernames` cannot be empty"); } - if rule.cert_common_names.is_none() { - rule.cert_common_names = Some(Vec::new()); + + if subject.interfaces.as_ref().is_some_and(Vec::is_empty) { + bail!("Subject property `interfaces` cannot be empty"); } } - let policy_information = self.policy_information_point(&rules)?; - let subject_map = policy_information.subject_map; - let mut main_policy: PolicyMap = PolicyMap::default(); + let policy_information = + self.policy_information_point(subjects, rules, policies)?; + let mut main_policy: PolicyMap = PolicyMap::default(); for rule in policy_information.policy_rules { - if let Some(index) = subject_map.get(&rule.subject) { - let single_policy = main_policy.entry(*index).or_default(); - single_policy - .flow_mut(rule.flow) - .action_mut(rule.action) - .permission_mut(rule.permission) - .insert(keyexpr::new(&rule.key_expr)?, true); - - if self.default_permission == Permission::Deny { - self.interface_enabled = InterfaceEnabled { - ingress: true, - egress: true, - }; - } else { - match rule.flow { - InterceptorFlow::Ingress => { - self.interface_enabled.ingress = true; - } - InterceptorFlow::Egress => { - self.interface_enabled.egress = true; - } + let subject_policy = main_policy.entry(rule.subject_id).or_default(); + subject_policy + .flow_mut(rule.flow) + .action_mut(rule.message) + .permission_mut(rule.permission) + .insert(keyexpr::new(&rule.key_expr)?, true); + + if self.default_permission == Permission::Deny { + self.interface_enabled = InterfaceEnabled { + ingress: true, + egress: true, + }; + } else { + match rule.flow { + InterceptorFlow::Ingress => { + self.interface_enabled.ingress = true; + } + InterceptorFlow::Egress => { + self.interface_enabled.egress = true; } } - }; + } } self.policy_map = main_policy; - self.subject_map = subject_map; + self.subject_store = policy_information.subject_map; } } else { - tracing::warn!("Access control rules are empty in config file"); + bail!("All ACL rules/subjects/policies config lists must be provided"); } } Ok(()) @@ -219,14 +367,27 @@ impl PolicyEnforcer { */ pub fn policy_information_point( &self, - config_rule_set: &Vec, + subjects: Vec, + rules: Vec, + policies: Vec, ) -> ZResult { let mut policy_rules: Vec = Vec::new(); - for config_rule in config_rule_set { + let mut rule_map = HashMap::new(); + let mut subject_id_map = HashMap::>::new(); + let mut subject_map_builder = SubjectMapBuilder::new(); + + // validate rules config and insert them in hashmaps + for config_rule in rules { + if rule_map.contains_key(&config_rule.id) { + bail!( + "Rule id must be unique: id '{}' is repeated", + config_rule.id + ); + } // Config validation let mut validation_err = String::new(); - if config_rule.actions.is_empty() { - validation_err.push_str("ACL config actions list is empty. "); + if config_rule.messages.is_empty() { + validation_err.push_str("ACL config messages list is empty. "); } if config_rule.flows.as_ref().unwrap().is_empty() { validation_err.push_str("ACL config flows list is empty. "); @@ -235,105 +396,163 @@ impl PolicyEnforcer { validation_err.push_str("ACL config key_exprs list is empty. "); } if !validation_err.is_empty() { - bail!("{}", validation_err); + bail!("Rule '{}' is malformed: {}", config_rule.id, validation_err); } + for key_expr in config_rule.key_exprs.iter() { + if key_expr.trim().is_empty() { + bail!("Found empty key expression in rule '{}'", config_rule.id); + } + } + rule_map.insert(config_rule.id.clone(), config_rule); + } - // At least one must not be empty - let mut subject_validation_err: usize = 0; - validation_err = String::new(); - - if config_rule.interfaces.as_ref().unwrap().is_empty() { - subject_validation_err += 1; - validation_err.push_str("ACL config interfaces list is empty. "); + for config_subject in subjects.into_iter() { + if subject_id_map.contains_key(&config_subject.id) { + bail!( + "Subject id must be unique: id '{}' is repeated", + config_subject.id + ); } - if config_rule.cert_common_names.as_ref().unwrap().is_empty() { - subject_validation_err += 1; - validation_err.push_str("ACL config certificate common names list is empty. "); + // validate subject config fields + if config_subject + .interfaces + .as_ref() + .is_some_and(|interfaces| interfaces.iter().any(|face| face.0.trim().is_empty())) + { + bail!( + "Found empty interface value in subject '{}'", + config_subject.id + ); } - if config_rule.usernames.as_ref().unwrap().is_empty() { - subject_validation_err += 1; - validation_err.push_str("ACL config usernames list is empty. "); + if config_subject + .cert_common_names + .as_ref() + .is_some_and(|cert_common_names| { + cert_common_names.iter().any(|ccn| ccn.0.trim().is_empty()) + }) + { + bail!( + "Found empty cert_common_name value in subject '{}'", + config_subject.id + ); } - - if subject_validation_err == 3 { - bail!("{}", validation_err); + if config_subject.usernames.as_ref().is_some_and(|usernames| { + usernames + .iter() + .any(|username| username.0.trim().is_empty()) + }) { + bail!( + "Found empty username value in subject '{}'", + config_subject.id + ); } + // Map properties to SubjectProperty type + // FIXME: Unnecessary .collect() because of different iterator types + let interfaces = config_subject + .interfaces + .map(|interfaces| { + interfaces + .into_iter() + .map(SubjectProperty::Exactly) + .collect::>() + }) + .unwrap_or(vec![SubjectProperty::Wildcard]); + // FIXME: Unnecessary .collect() because of different iterator types + let cert_common_names = config_subject + .cert_common_names + .map(|cert_common_names| { + cert_common_names + .into_iter() + .map(SubjectProperty::Exactly) + .collect::>() + }) + .unwrap_or(vec![SubjectProperty::Wildcard]); + // FIXME: Unnecessary .collect() because of different iterator types + let usernames = config_subject + .usernames + .map(|usernames| { + usernames + .into_iter() + .map(SubjectProperty::Exactly) + .collect::>() + }) + .unwrap_or(vec![SubjectProperty::Wildcard]); - for subject in config_rule.interfaces.as_ref().unwrap() { - if subject.trim().is_empty() { - bail!("found an empty interface value in interfaces list"); - } - for flow in config_rule.flows.as_ref().unwrap() { - for action in &config_rule.actions { - for key_expr in &config_rule.key_exprs { - if key_expr.trim().is_empty() { - bail!("found an empty key-expression value in key_exprs list"); - } - policy_rules.push(PolicyRule { - subject: Subject::Interface(subject.clone()), - key_expr: key_expr.clone(), - action: *action, - permission: config_rule.permission, - flow: *flow, - }) - } - } - } + // create ACL subject combinations + let subject_combination_ids = interfaces + .into_iter() + .cartesian_product(cert_common_names) + .cartesian_product(usernames) + .map(|((interface, cert_common_name), username)| { + let subject = Subject { + interface, + cert_common_name, + username, + }; + subject_map_builder.insert_or_get(subject) + }) + .collect(); + subject_id_map.insert(config_subject.id.clone(), subject_combination_ids); + } + // finally, handle policy content + for (entry_id, entry) in policies.iter().enumerate() { + // validate policy config lists + if entry.rules.is_empty() || entry.subjects.is_empty() { + bail!( + "Policy #{} is malformed: empty subjects or rules list", + entry_id + ); } - for subject in config_rule.cert_common_names.as_ref().unwrap() { - if subject.trim().is_empty() { - bail!("found an empty value in certificate common names list"); + for subject_config_id in &entry.subjects { + if subject_config_id.trim().is_empty() { + bail!("Found empty subject id in policy #{}", entry_id) } - for flow in config_rule.flows.as_ref().unwrap() { - for action in &config_rule.actions { - for key_expr in &config_rule.key_exprs { - if key_expr.trim().is_empty() { - bail!("found an empty key-expression value in key_exprs list"); - } - policy_rules.push(PolicyRule { - subject: Subject::CertCommonName(subject.clone()), - key_expr: key_expr.clone(), - action: *action, - permission: config_rule.permission, - flow: *flow, - }) - } - } + if !subject_id_map.contains_key(subject_config_id) { + bail!( + "Subject '{}' in policy #{} does not exist in subjects list", + subject_config_id, + entry_id + ) } } - for subject in config_rule.usernames.as_ref().unwrap() { - if subject.trim().is_empty() { - bail!("found an empty value in usernames list"); + // Create PolicyRules + for rule_id in &entry.rules { + if rule_id.trim().is_empty() { + bail!("Found empty rule id in policy #{}", entry_id) } - for flow in config_rule.flows.as_ref().unwrap() { - for action in &config_rule.actions { - for key_expr in &config_rule.key_exprs { - if key_expr.trim().is_empty() { - bail!("found an empty key-expression value in key_exprs list"); + let rule = rule_map.get(rule_id).ok_or(zerror!( + "Rule '{}' in policy #{} does not exist in rules list", + rule_id, + entry_id + ))?; + for subject_config_id in &entry.subjects { + let subject_combination_ids = subject_id_map + .get(subject_config_id) + .expect("config subject id should exist in subject_id_map"); + for subject_id in subject_combination_ids { + for flow in rule + .flows + .as_ref() + .expect("flows list should be defined in rule") + { + for message in &rule.messages { + for key_expr in &rule.key_exprs { + policy_rules.push(PolicyRule { + subject_id: *subject_id, + key_expr: key_expr.clone(), + message: *message, + permission: rule.permission, + flow: *flow, + }); + } } - policy_rules.push(PolicyRule { - subject: Subject::Username(subject.clone()), - key_expr: key_expr.clone(), - action: *action, - permission: config_rule.permission, - flow: *flow, - }) } } } } } - let mut subject_map = SubjectMap::default(); - let mut counter = 1; - // Starting at 1 since 0 is the init value and should not match anything - for rule in policy_rules.iter() { - if !subject_map.contains_key(&rule.subject) { - subject_map.insert(rule.subject.clone(), counter); - counter += 1; - } - } Ok(PolicyInformation { - subject_map, + subject_map: subject_map_builder.build(), policy_rules, }) } @@ -345,7 +564,7 @@ impl PolicyEnforcer { &self, subject: usize, flow: InterceptorFlow, - action: Action, + message: AclMessage, key_expr: &str, ) -> ZResult { let policy_map = &self.policy_map; @@ -356,7 +575,7 @@ impl PolicyEnforcer { Some(single_policy) => { let deny_result = single_policy .flow(flow) - .action(action) + .action(message) .deny .nodes_including(keyexpr::new(&key_expr)?) .count(); @@ -368,7 +587,7 @@ impl PolicyEnforcer { } else { let allow_result = single_policy .flow(flow) - .action(action) + .action(message) .allow .nodes_including(keyexpr::new(&key_expr)?) .count(); diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index d1790dc009..0a08090569 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -33,24 +33,29 @@ mod test { const VALUE: &str = "zenoh"; #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn test_acl() { + async fn test_acl_pub_sub() { zenoh::try_init_log_from_env(); - test_pub_sub_deny().await; - test_pub_sub_allow().await; - test_pub_sub_deny_then_allow().await; - test_pub_sub_allow_then_deny().await; - test_get_qbl_deny().await; - test_get_qbl_allow().await; - test_get_qbl_allow_then_deny().await; - test_get_qbl_deny_then_allow().await; + test_pub_sub_deny(27447).await; + test_pub_sub_allow(27447).await; + test_pub_sub_deny_then_allow(27447).await; + test_pub_sub_allow_then_deny(27447).await; } - async fn get_basic_router_config() -> Config { + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_acl_get_queryable() { + test_get_qbl_deny(27448).await; + test_get_qbl_allow(27448).await; + test_get_qbl_allow_then_deny(27448).await; + test_get_qbl_deny_then_allow(27448).await; + } + + async fn get_basic_router_config(port: u16) -> Config { let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); config .listen .endpoints - .set(vec!["tcp/127.0.0.1:27447".parse().unwrap()]) + .set(vec![format!("tcp/127.0.0.1:{port}").parse().unwrap()]) .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config @@ -61,11 +66,11 @@ mod test { ztimeout!(s.close()).unwrap(); } - async fn get_client_sessions() -> (Session, Session) { + async fn get_client_sessions(port: u16) -> (Session, Session) { println!("Opening client sessions"); - let config = config::client(["tcp/127.0.0.1:27447".parse::().unwrap()]); + let config = config::client([format!("tcp/127.0.0.1:{port}").parse::().unwrap()]); let s01 = ztimeout!(zenoh::open(config)).unwrap(); - let config = config::client(["tcp/127.0.0.1:27447".parse::().unwrap()]); + let config = config::client([format!("tcp/127.0.0.1:{port}").parse::().unwrap()]); let s02 = ztimeout!(zenoh::open(config)).unwrap(); (s01, s02) } @@ -76,27 +81,27 @@ mod test { ztimeout!(s02.close()).unwrap(); } - async fn test_pub_sub_deny() { + async fn test_pub_sub_deny(port: u16) { println!("test_pub_sub_deny"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", r#"{ - "enabled": true, - "default_permission": "deny", - "rules": - [ - ] - }"#, + "enabled": true, + "default_permission": "deny", + "rules": [], + "subjects": [], + "policies": [], + }"#, ) .unwrap(); println!("Opening router session"); let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (sub_session, pub_session) = get_client_sessions().await; + let (sub_session, pub_session) = get_client_sessions(port).await; { let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); let received_value = Arc::new(Mutex::new(String::new())); @@ -120,27 +125,25 @@ mod test { close_router_session(session).await; } - async fn test_pub_sub_allow() { + async fn test_pub_sub_allow(port: u16) { println!("test_pub_sub_allow"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", r#"{ - - "enabled": false, - "default_permission": "allow", - "rules": - [ - ] - - }"#, + "enabled": true, + "default_permission": "allow", + "rules": [], + "subjects": [], + "policies": [], + }"#, ) .unwrap(); println!("Opening router session"); let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (sub_session, pub_session) = get_client_sessions().await; + let (sub_session, pub_session) = get_client_sessions(port).await; { let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); @@ -167,41 +170,51 @@ mod test { close_router_session(session).await; } - async fn test_pub_sub_allow_then_deny() { + async fn test_pub_sub_allow_then_deny(port: u16) { println!("test_pub_sub_allow_then_deny"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", - r#" - {"enabled": true, - "default_permission": "allow", - "rules": - [ - { - "permission": "deny", - "flows": ["egress"], - "actions": [ - "put", - "declare_subscriber" - ], - "key_exprs": [ - "test/demo" - ], - "interfaces": [ - "lo","lo0" - ] - }, - ] - } - "#, + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "id": "r1", + "permission": "deny", + "flows": ["egress"], + "messages": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "interfaces": [ + "lo", "lo0" + ], + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, ) .unwrap(); println!("Opening router session"); let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (sub_session, pub_session) = get_client_sessions().await; + let (sub_session, pub_session) = get_client_sessions(port).await; { let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); @@ -227,41 +240,51 @@ mod test { close_router_session(session).await; } - async fn test_pub_sub_deny_then_allow() { + async fn test_pub_sub_deny_then_allow(port: u16) { println!("test_pub_sub_deny_then_allow"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", - r#" - {"enabled": true, - "default_permission": "deny", - "rules": - [ - { - "permission": "allow", - "flows": ["egress","ingress"], - "actions": [ - "put", - "declare_subscriber" - ], - "key_exprs": [ - "test/demo" - ], - "interfaces": [ - "lo","lo0" - ] - }, - ] - } - "#, + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "id": "r1", + "permission": "allow", + "flows": ["egress", "ingress"], + "messages": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "interfaces": [ + "lo", "lo0" + ], + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, ) .unwrap(); println!("Opening router session"); let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (sub_session, pub_session) = get_client_sessions().await; + let (sub_session, pub_session) = get_client_sessions(port).await; { let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); @@ -287,27 +310,27 @@ mod test { close_router_session(session).await; } - async fn test_get_qbl_deny() { + async fn test_get_qbl_deny(port: u16) { println!("test_get_qbl_deny"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", r#"{ - "enabled": true, - "default_permission": "deny", - "rules": - [ - ] - }"#, + "enabled": true, + "default_permission": "deny", + "rules": [], + "subjects": [], + "policies": [], + }"#, ) .unwrap(); println!("Opening router session"); let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (get_session, qbl_session) = get_client_sessions().await; + let (get_session, qbl_session) = get_client_sessions(port).await; { let mut received_value = String::new(); @@ -341,27 +364,27 @@ mod test { close_router_session(session).await; } - async fn test_get_qbl_allow() { + async fn test_get_qbl_allow(port: u16) { println!("test_get_qbl_allow"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", r#"{ - "enabled": true, - "default_permission": "allow", - "rules": - [ - ] - }"#, + "enabled": true, + "default_permission": "allow", + "rules": [], + "subjects": [], + "policies": [], + }"#, ) .unwrap(); println!("Opening router session"); let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (get_session, qbl_session) = get_client_sessions().await; + let (get_session, qbl_session) = get_client_sessions(port).await; { let mut received_value = String::new(); @@ -395,34 +418,45 @@ mod test { close_router_session(session).await; } - async fn test_get_qbl_deny_then_allow() { + async fn test_get_qbl_deny_then_allow(port: u16) { println!("test_get_qbl_deny_then_allow"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", - r#" - {"enabled": true, - "default_permission": "deny", - "rules": - [ - { - "permission": "allow", - "flows": ["egress","ingress"], - "actions": [ - "get", - "declare_queryable"], - "key_exprs": [ - "test/demo" - ], - "interfaces": [ - "lo","lo0" - ] - }, - ] - } - "#, + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "id": "r1", + "permission": "allow", + "flows": ["egress", "ingress"], + "messages": [ + "query", + "declare_queryable" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "interfaces": [ + "lo", "lo0" + ], + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, ) .unwrap(); @@ -430,7 +464,7 @@ mod test { let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (get_session, qbl_session) = get_client_sessions().await; + let (get_session, qbl_session) = get_client_sessions(port).await; { let mut received_value = String::new(); @@ -464,41 +498,52 @@ mod test { close_router_session(session).await; } - async fn test_get_qbl_allow_then_deny() { + async fn test_get_qbl_allow_then_deny(port: u16) { println!("test_get_qbl_allow_then_deny"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", - r#" - {"enabled": true, - "default_permission": "allow", - "rules": - [ - { - "permission": "deny", - "flows": ["egress"], - "actions": [ - "get", - "declare_queryable" ], - "key_exprs": [ - "test/demo" - ], - "interfaces": [ - "lo","lo0" - ] - }, - ] - } - "#, + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "id": "r1", + "permission": "deny", + "flows": ["egress"], + "messages": [ + "query", + "declare_queryable" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "interfaces": [ + "lo", "lo0" + ], + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, ) .unwrap(); println!("Opening router session"); let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (get_session, qbl_session) = get_client_sessions().await; + let (get_session, qbl_session) = get_client_sessions(port).await; { let mut received_value = String::new(); diff --git a/zenoh/tests/authentication.rs b/zenoh/tests/authentication.rs index 39daff0199..09dd3b74eb 100644 --- a/zenoh/tests/authentication.rs +++ b/zenoh/tests/authentication.rs @@ -15,7 +15,7 @@ mod test { use std::{ fs, path::PathBuf, - sync::{Arc, Mutex}, + sync::{atomic::AtomicBool, Arc, Mutex}, time::Duration, }; @@ -34,36 +34,72 @@ mod test { const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; static TESTFILES_PATH: Lazy = Lazy::new(std::env::temp_dir); + static TESTFILES_CREATED: Lazy = Lazy::new(|| AtomicBool::new(false)); #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn test_authentication() { + async fn test_authentication_usrpwd() { zenoh_util::try_init_log_from_env(); create_new_files(TESTFILES_PATH.to_path_buf()) .await .unwrap(); - println!("testfiles created successfully."); - - test_pub_sub_deny_then_allow_usrpswd().await; - test_pub_sub_allow_then_deny_usrpswd().await; - test_get_qbl_allow_then_deny_usrpswd().await; - test_get_qbl_deny_then_allow_usrpswd().await; + test_pub_sub_deny_then_allow_usrpswd(37447).await; + test_pub_sub_allow_then_deny_usrpswd(37447).await; + test_get_qbl_allow_then_deny_usrpswd(37447).await; + test_get_qbl_deny_then_allow_usrpswd(37447).await; + } - test_pub_sub_deny_then_allow_tls(3774).await; - test_pub_sub_allow_then_deny_tls(3775).await; - test_get_qbl_allow_then_deny_tls(3776).await; - test_get_qbl_deny_then_allow_tls(3777).await; + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_authentication_tls() { + zenoh_util::try_init_log_from_env(); + create_new_files(TESTFILES_PATH.to_path_buf()) + .await + .unwrap(); + test_pub_sub_deny_then_allow_tls(37448).await; + test_pub_sub_allow_then_deny_tls(37449).await; + test_get_qbl_allow_then_deny_tls(37450).await; + test_get_qbl_deny_then_allow_tls(37451).await; + } - test_pub_sub_deny_then_allow_quic(3774, false).await; - test_pub_sub_allow_then_deny_quic(3775).await; - test_get_qbl_deny_then_allow_quic(3776).await; - test_get_qbl_allow_then_deny_quic(3777).await; + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_authentication_quic() { + zenoh_util::try_init_log_from_env(); + create_new_files(TESTFILES_PATH.to_path_buf()) + .await + .unwrap(); + test_pub_sub_deny_then_allow_quic(37452, false).await; + test_pub_sub_allow_then_deny_quic(37453).await; + test_get_qbl_deny_then_allow_quic(37454).await; + test_get_qbl_allow_then_deny_quic(37455).await; + } + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_authentication_lowlatency() { // Test link AuthIds accessibility for lowlatency transport - test_pub_sub_deny_then_allow_quic(3778, true).await; + zenoh_util::try_init_log_from_env(); + create_new_files(TESTFILES_PATH.to_path_buf()) + .await + .unwrap(); + test_pub_sub_deny_then_allow_quic(37456, true).await; + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_authentication_subject_combinations() { + zenoh_util::try_init_log_from_env(); + create_new_files(TESTFILES_PATH.to_path_buf()) + .await + .unwrap(); + test_deny_allow_combination(37457).await; + test_allow_deny_combination(37458).await; } #[allow(clippy::all)] async fn create_new_files(certs_dir: std::path::PathBuf) -> std::io::Result<()> { + let created = TESTFILES_CREATED.fetch_or(true, std::sync::atomic::Ordering::SeqCst); + if created { + // only create files once per tests + println!("Skipping testfile creation: files already created by another test instance"); + return Ok(()); + } use std::io::prelude::*; let ca_pem = b"-----BEGIN CERTIFICATE----- MIIDiTCCAnGgAwIBAgIUO1x6LAlICgKs5+pYUTo4CughfKEwDQYJKoZIhvcNAQEL @@ -229,6 +265,7 @@ client2name:client2passwd"; file.write_all(test_file.value)?; } + println!("testfiles created successfully."); Ok(()) } @@ -332,13 +369,13 @@ client2name:client2passwd"; config } - async fn get_basic_router_config_usrpswd() -> Config { + async fn get_basic_router_config_usrpswd(port: u16) -> Config { let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); config .listen .endpoints - .set(vec!["tcp/127.0.0.1:37447".parse().unwrap()]) + .set(vec![format!("tcp/127.0.0.1:{port}").parse().unwrap()]) .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config @@ -370,6 +407,71 @@ client2name:client2passwd"; ztimeout!(s.close()).unwrap(); } + async fn get_basic_router_config_quic_usrpswd(port: u16) -> Config { + let cert_path = TESTFILES_PATH.to_string_lossy(); + let mut config = config::default(); + config.set_mode(Some(WhatAmI::Router)).unwrap(); + config + .listen + .endpoints + .set(vec![ + format!("quic/127.0.0.1:{port}").parse().unwrap(), + format!("tcp/127.0.0.1:{port}").parse().unwrap(), + ]) + .unwrap(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "quic", "tcp" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + }, + }, + "auth": { + usrpwd: { + user: "routername", + password: "routerpasswd", + }, + }, + }"#, + ) + .unwrap(); + config + .transport + .auth + .usrpwd + .set_dictionary_file(Some(format!( + "{}/credentials.txt", + TESTFILES_PATH.to_string_lossy() + ))) + .unwrap(); + config + .transport + .link + .tls + .set_server_private_key(Some(format!("{}/serversidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_server_certificate(Some(format!("{}/serverside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + config + } + async fn get_client_sessions_tls(port: u16) -> (Session, Session) { let cert_path = TESTFILES_PATH.to_string_lossy(); println!("Opening client sessions"); @@ -549,9 +651,10 @@ client2name:client2passwd"; (s01, s02) } - async fn get_client_sessions_usrpswd() -> (Session, Session) { + async fn get_client_sessions_usrpswd(port: u16) -> (Session, Session) { println!("Opening client sessions"); - let mut config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); + let mut config = + config::client([format!("tcp/127.0.0.1:{port}").parse::().unwrap()]); config .insert_json5( "transport", @@ -566,7 +669,8 @@ client2name:client2passwd"; ) .unwrap(); let s01 = ztimeout!(zenoh::open(config)).unwrap(); - let mut config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); + let mut config = + config::client([format!("tcp/127.0.0.1:{port}").parse::().unwrap()]); config .insert_json5( "transport", @@ -584,6 +688,101 @@ client2name:client2passwd"; (s01, s02) } + async fn get_client_sessions_quic_usrpswd(port: u16) -> (Session, Session) { + let cert_path = TESTFILES_PATH.to_string_lossy(); + println!("Opening client sessions"); + let mut config = config::client([format!("quic/127.0.0.1:{port}") + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "quic" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + } + }, + "auth": { + usrpwd: { + user: "client1name", + password: "client1passwd", + }, + } + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_client_private_key(Some(format!("{}/clientsidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_client_certificate(Some(format!("{}/clientside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); + + let mut config = config::client([format!("quic/127.0.0.1:{}", port) + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "quic" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + } + }, + "auth": { + usrpwd: { + user: "client2name", + password: "client2passwd", + }, + } + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_client_private_key(Some(format!("{}/clientsidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_client_certificate(Some(format!("{}/clientside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); + (s01, s02) + } + async fn close_sessions(s01: Session, s02: Session) { println!("Closing client sessions"); ztimeout!(s01.close()).unwrap(); @@ -603,19 +802,31 @@ client2name:client2passwd"; "default_permission": "deny", "rules": [ { + "id": "r1", "permission": "allow", "flows": ["ingress","egress"], - "actions": [ + "messages": [ "put", "declare_subscriber" ], "key_exprs": [ "test/demo" ], + }, + ], + "subjects": [ + { + "id": "s1", "cert_common_names": [ "client_side" ] - }, + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } ] }"#, ) @@ -659,19 +870,31 @@ client2name:client2passwd"; "default_permission": "allow", "rules": [ { + "id": "r1", "permission": "deny", "flows": ["egress"], - "actions": [ + "messages": [ "put", "declare_subscriber" ], "key_exprs": [ "test/demo" ], + }, + ], + "subjects": [ + { + "id": "s1", "cert_common_names": [ "client_side" ] - }, + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } ] }"#, ) @@ -717,19 +940,31 @@ client2name:client2passwd"; "default_permission": "deny", "rules": [ { + "id": "r1", "permission": "allow", - "flows": ["egress","ingress"], - "actions": [ - "get", + "flows": ["egress", "ingress"], + "messages": [ + "query", "declare_queryable" ], "key_exprs": [ "test/demo" ], + }, + ], + "subjects": [ + { + "id": "s1", "cert_common_names": [ "client_side" ] - }, + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } ] }"#, ) @@ -790,19 +1025,31 @@ client2name:client2passwd"; "default_permission": "allow", "rules": [ { + "id": "r1", "permission": "deny", "flows": ["egress"], - "actions": [ - "get", + "messages": [ + "query", "declare_queryable" ], "key_exprs": [ "test/demo" ], + }, + ], + "subjects": [ + { + "id": "s1", "cert_common_names": [ "client_side" ] - }, + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } ] }"#, ) @@ -863,19 +1110,31 @@ client2name:client2passwd"; "default_permission": "deny", "rules": [ { + "id": "r1", "permission": "allow", - "flows": ["ingress","egress"], - "actions": [ + "flows": ["egress", "ingress"], + "messages": [ "put", "declare_subscriber" ], "key_exprs": [ "test/demo" ], + }, + ], + "subjects": [ + { + "id": "s1", "cert_common_names": [ "client_side" ] - }, + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } ] }"#, ) @@ -921,19 +1180,31 @@ client2name:client2passwd"; "default_permission": "allow", "rules": [ { + "id": "r1", "permission": "deny", "flows": ["egress"], - "actions": [ + "messages": [ "put", "declare_subscriber" ], "key_exprs": [ "test/demo" ], + }, + ], + "subjects": [ + { + "id": "s1", "cert_common_names": [ "client_side" ] - }, + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } ] }"#, ) @@ -980,18 +1251,31 @@ client2name:client2passwd"; "default_permission": "deny", "rules": [ { + "id": "r1", "permission": "allow", - "flows": ["egress","ingress"], - "actions": [ - "get", - "declare_queryable"], + "flows": ["egress", "ingress"], + "messages": [ + "query", + "declare_queryable" + ], "key_exprs": [ "test/demo" ], + }, + ], + "subjects": [ + { + "id": "s1", "cert_common_names": [ "client_side" ] - }, + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } ] }"#, ) @@ -1051,22 +1335,33 @@ client2name:client2passwd"; r#"{ "enabled": true, "default_permission": "allow", - "rules": - [ + "rules": [ { + "id": "r1", "permission": "deny", "flows": ["egress"], - "actions": [ - "get", + "messages": [ + "query", "declare_queryable" ], "key_exprs": [ "test/demo" ], + }, + ], + "subjects": [ + { + "id": "s1", "cert_common_names": [ "client_side" ] - }, + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } ] }"#, ) @@ -1114,10 +1409,10 @@ client2name:client2passwd"; close_router_session(session).await; } - async fn test_pub_sub_deny_then_allow_usrpswd() { + async fn test_pub_sub_deny_then_allow_usrpswd(port: u16) { println!("test_pub_sub_deny_then_allow_usrpswd"); - let mut config_router = get_basic_router_config_usrpswd().await; + let mut config_router = get_basic_router_config_usrpswd(port).await; config_router .insert_json5( @@ -1127,20 +1422,32 @@ client2name:client2passwd"; "default_permission": "deny", "rules": [ { + "id": "r1", "permission": "allow", - "flows": ["ingress","egress"], - "actions": [ + "flows": ["ingress", "egress"], + "messages": [ "put", "declare_subscriber" ], "key_exprs": [ "test/demo" ], + }, + ], + "subjects": [ + { + "id": "s1", "usernames": [ "client1name", "client2name" ] - }, + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } ] }"#, ) @@ -1149,7 +1456,7 @@ client2name:client2passwd"; let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (sub_session, pub_session) = get_client_sessions_usrpswd().await; + let (sub_session, pub_session) = get_client_sessions_usrpswd(port).await; { let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); let received_value = Arc::new(Mutex::new(String::new())); @@ -1173,10 +1480,10 @@ client2name:client2passwd"; close_router_session(session).await; } - async fn test_pub_sub_allow_then_deny_usrpswd() { + async fn test_pub_sub_allow_then_deny_usrpswd(port: u16) { println!("test_pub_sub_allow_then_deny_usrpswd"); - let mut config_router = get_basic_router_config_usrpswd().await; + let mut config_router = get_basic_router_config_usrpswd(port).await; config_router .insert_json5( "access_control", @@ -1185,20 +1492,32 @@ client2name:client2passwd"; "default_permission": "allow", "rules": [ { + "id": "r1", "permission": "deny", "flows": ["egress"], - "actions": [ + "messages": [ "put", "declare_subscriber" ], "key_exprs": [ "test/demo" ], + }, + ], + "subjects": [ + { + "id": "s1", "usernames": [ "client1name", "client2name" ] - }, + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } ] }"#, ) @@ -1206,7 +1525,7 @@ client2name:client2passwd"; println!("Opening router session"); let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (sub_session, pub_session) = get_client_sessions_usrpswd().await; + let (sub_session, pub_session) = get_client_sessions_usrpswd(port).await; { let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); @@ -1232,10 +1551,10 @@ client2name:client2passwd"; close_router_session(session).await; } - async fn test_get_qbl_deny_then_allow_usrpswd() { + async fn test_get_qbl_deny_then_allow_usrpswd(port: u16) { println!("test_get_qbl_deny_then_allow_usrpswd"); - let mut config_router = get_basic_router_config_usrpswd().await; + let mut config_router = get_basic_router_config_usrpswd(port).await; config_router .insert_json5( "access_control", @@ -1244,20 +1563,32 @@ client2name:client2passwd"; "default_permission": "deny", "rules": [ { + "id": "r1", "permission": "allow", - "flows": ["egress","ingress"], - "actions": [ - "get", + "flows": ["ingress", "egress"], + "messages": [ + "query", "declare_queryable" ], "key_exprs": [ "test/demo" ], + }, + ], + "subjects": [ + { + "id": "s1", "usernames": [ "client1name", "client2name" ] - }, + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } ] }"#, ) @@ -1267,7 +1598,7 @@ client2name:client2passwd"; let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (get_session, qbl_session) = get_client_sessions_usrpswd().await; + let (get_session, qbl_session) = get_client_sessions_usrpswd(port).await; { let mut received_value = String::new(); @@ -1306,10 +1637,10 @@ client2name:client2passwd"; close_router_session(session).await; } - async fn test_get_qbl_allow_then_deny_usrpswd() { + async fn test_get_qbl_allow_then_deny_usrpswd(port: u16) { println!("test_get_qbl_allow_then_deny_usrpswd"); - let mut config_router = get_basic_router_config_usrpswd().await; + let mut config_router = get_basic_router_config_usrpswd(port).await; config_router .insert_json5( "access_control", @@ -1318,20 +1649,32 @@ client2name:client2passwd"; "default_permission": "allow", "rules": [ { + "id": "r1", "permission": "deny", "flows": ["egress"], - "actions": [ - "get", + "messages": [ + "query", "declare_queryable" ], "key_exprs": [ "test/demo" ], + }, + ], + "subjects": [ + { + "id": "s1", "usernames": [ "client1name", "client2name" ] - }, + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } ] }"#, ) @@ -1340,7 +1683,7 @@ client2name:client2passwd"; let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (get_session, qbl_session) = get_client_sessions_usrpswd().await; + let (get_session, qbl_session) = get_client_sessions_usrpswd(port).await; { let mut received_value = String::new(); @@ -1378,4 +1721,196 @@ client2name:client2passwd"; close_sessions(get_session, qbl_session).await; close_router_session(session).await; } + + async fn test_deny_allow_combination(port: u16) { + println!("test_deny_allow_combination"); + + let mut config_router = get_basic_router_config_quic_usrpswd(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "id": "r1", + "permission": "allow", + "flows": ["ingress", "egress"], + "messages": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "cert_common_names": [ + "client_side" + ], + "usernames": [ + "client1name", + "client2name" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + + println!("Opening router session"); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions_usrpswd(port).await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + let (sub_session, pub_session) = get_client_sessions_quic_usrpswd(port).await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_router_session(session).await; + } + + async fn test_allow_deny_combination(port: u16) { + println!("test_allow_deny_combination"); + + let mut config_router = get_basic_router_config_quic_usrpswd(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "id": "r1", + "permission": "deny", + "flows": ["egress"], + "messages": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "cert_common_names": [ + "client_side" + ], + "usernames": [ + "client1name", + "client2name" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + + println!("Opening router session"); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions_usrpswd(port).await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + let (sub_session, pub_session) = get_client_sessions_quic_usrpswd(port).await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_router_session(session).await; + } } From f77585321fa665c95d1bdf88f9965fbfcb9695e7 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 24 Jul 2024 11:24:12 +0200 Subject: [PATCH 546/598] Fix timestamp codec len calculation (#1258) --- commons/zenoh-codec/src/core/timestamp.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/commons/zenoh-codec/src/core/timestamp.rs b/commons/zenoh-codec/src/core/timestamp.rs index 3ec059ae1b..95149144dd 100644 --- a/commons/zenoh-codec/src/core/timestamp.rs +++ b/commons/zenoh-codec/src/core/timestamp.rs @@ -23,7 +23,8 @@ use crate::{LCodec, RCodec, WCodec, Zenoh080}; impl LCodec<&Timestamp> for Zenoh080 { fn w_len(self, x: &Timestamp) -> usize { - self.w_len(x.get_time().as_u64()) + self.w_len(x.get_id().size()) + let id = x.get_id(); + self.w_len(x.get_time().as_u64()) + self.w_len(&id.to_le_bytes()[..id.size()]) } } From aace7f1e4755f3b10029f05b8738b5c3a499a4dd Mon Sep 17 00:00:00 2001 From: yellowhatter <104833606+yellowhatter@users.noreply.github.com> Date: Wed, 24 Jul 2024 12:35:13 +0300 Subject: [PATCH 547/598] SHM bugfix (#1256) * Do not trigger transport error in case of SHM buffer invalidation * Fix spelling * Drop the whole ZBuf in case of SHM error! * fix clippy errors * Fix misaligned memory access bug (affects non-64 bit ARM) * fix tests to be platform-agnostic * Update posix_segment.rs * Update posix_segment.rs --- commons/zenoh-shm/src/posix_shm/segment.rs | 22 ++++------ commons/zenoh-shm/tests/posix_array.rs | 22 +++++----- commons/zenoh-shm/tests/posix_segment.rs | 44 +++++++++++-------- .../src/unicast/establishment/ext/shm.rs | 17 ++++--- 4 files changed, 56 insertions(+), 49 deletions(-) diff --git a/commons/zenoh-shm/src/posix_shm/segment.rs b/commons/zenoh-shm/src/posix_shm/segment.rs index 5458ab3e3e..657976ece1 100644 --- a/commons/zenoh-shm/src/posix_shm/segment.rs +++ b/commons/zenoh-shm/src/posix_shm/segment.rs @@ -12,10 +12,7 @@ // ZettaScale Zenoh Team, // -use std::{ - fmt::{Debug, Display}, - mem::size_of, -}; +use std::fmt::{Debug, Display}; use rand::Rng; use shared_memory::{Shmem, ShmemConf, ShmemError}; @@ -63,7 +60,7 @@ where // If creation fails because segment already exists for this id, // the creation attempt will be repeated with another id match ShmemConf::new() - .size(alloc_size + size_of::()) + .size(alloc_size) .os_id(Self::os_id(id.clone(), id_prefix)) .create() { @@ -71,7 +68,6 @@ where tracing::debug!( "Created SHM segment, size: {alloc_size}, prefix: {id_prefix}, id: {id}" ); - unsafe { *(shmem.as_ptr() as *mut usize) = alloc_size }; return Ok(Segment { shmem, id }); } Err(ShmemError::LinkExists) => {} @@ -94,10 +90,6 @@ where ) })?; - if shmem.len() <= size_of::() { - bail!("SHM segment too small") - } - tracing::debug!("Opened SHM segment, prefix: {id_prefix}, id: {id}"); Ok(Self { shmem, id }) @@ -110,17 +102,21 @@ where } pub fn as_ptr(&self) -> *mut u8 { - unsafe { self.shmem.as_ptr().add(size_of::()) } + self.shmem.as_ptr() } + /// Returns the length of this [`Segment`]. + /// NOTE: one some platforms (at least windows) the returned len will be the actual length of an shm segment + /// (a required len rounded up to the nearest multiply of page size), on other (at least linux and macos) this + /// returns a value requested upon segment creation pub fn len(&self) -> usize { - unsafe { *(self.shmem.as_ptr() as *mut usize) } + self.shmem.len() } // TODO: dead code warning occurs because of `tested_crate_module!()` macro when feature `test` is not enabled. Better to fix that #[allow(dead_code)] pub fn is_empty(&self) -> bool { - unsafe { *(self.shmem.as_ptr() as *mut usize) == 0 } + self.len() == 0 } pub fn id(&self) -> ID { diff --git a/commons/zenoh-shm/tests/posix_array.rs b/commons/zenoh-shm/tests/posix_array.rs index 562102ea17..83fdad88fb 100644 --- a/commons/zenoh-shm/tests/posix_array.rs +++ b/commons/zenoh-shm/tests/posix_array.rs @@ -41,25 +41,25 @@ impl TestElem { } fn validate_array( - array1: &mut ArrayInSHM, - array2: &ArrayInSHM, + created_array: &mut ArrayInSHM, + opened_array: &ArrayInSHM, expected_elem_count: usize, ) where ElemIndex: Unsigned + PrimInt + 'static + AsPrimitive, isize: AsPrimitive, usize: AsPrimitive, { - assert!(array1.elem_count() == expected_elem_count); - assert!(array2.elem_count() == expected_elem_count); + assert!(created_array.elem_count() == expected_elem_count); + assert!(opened_array.elem_count() >= expected_elem_count); let mut fill_ctr = 0; let mut validate_ctr = 0; // first of all, fill and validate elements sequentially - for i in 0..array1.elem_count() { + for i in 0..expected_elem_count { unsafe { - let elem1 = &mut *array1.elem_mut(i.as_()); - let elem2 = &*array2.elem(i.as_()); + let elem1 = &mut *created_array.elem_mut(i.as_()); + let elem2 = &*opened_array.elem(i.as_()); elem1.fill(&mut fill_ctr); elem2.validate(&mut validate_ctr); @@ -67,17 +67,17 @@ fn validate_array( } // then fill all the elements... - for i in 0..array1.elem_count() { + for i in 0..expected_elem_count { unsafe { - let elem1 = &mut *array1.elem_mut(i.as_()); + let elem1 = &mut *created_array.elem_mut(i.as_()); elem1.fill(&mut fill_ctr); } } // ...and validate all the elements - for i in 0..array2.elem_count() { + for i in 0..expected_elem_count { unsafe { - let elem2 = &*array2.elem(i.as_()); + let elem2 = &*opened_array.elem(i.as_()); elem2.validate(&mut validate_ctr); } } diff --git a/commons/zenoh-shm/tests/posix_segment.rs b/commons/zenoh-shm/tests/posix_segment.rs index 094ae40a85..879fccf298 100644 --- a/commons/zenoh-shm/tests/posix_segment.rs +++ b/commons/zenoh-shm/tests/posix_segment.rs @@ -19,18 +19,22 @@ use zenoh_shm::posix_shm::segment::Segment; pub mod common; use common::{validate_memory, TEST_SEGMENT_PREFIX}; -fn validate_segment(segment1: &Segment, segment2: &Segment) -where +fn validate_segment( + created_segment: &Segment, + opened_segment: &Segment, + expected_elem_count: usize, +) where rand::distributions::Standard: rand::distributions::Distribution, ID: Clone + Display, { - assert!(segment1.len() == segment2.len()); + assert!(created_segment.len() == expected_elem_count); + assert!(opened_segment.len() >= expected_elem_count); - let ptr1 = segment1.as_ptr(); - let ptr2 = segment2.as_ptr(); + let ptr1 = created_segment.as_ptr(); + let ptr2 = opened_segment.as_ptr(); - let slice1 = unsafe { slice::from_raw_parts_mut(ptr1, segment1.len()) }; - let slice2 = unsafe { slice::from_raw_parts(ptr2, segment2.len()) }; + let slice1 = unsafe { slice::from_raw_parts_mut(ptr1, expected_elem_count) }; + let slice2 = unsafe { slice::from_raw_parts(ptr2, expected_elem_count) }; validate_memory(slice1, slice2); } @@ -40,22 +44,24 @@ where rand::distributions::Standard: rand::distributions::Distribution, ID: Copy + Clone + Display, { - let new_segment: Segment = - Segment::create(900, TEST_SEGMENT_PREFIX).expect("error creating new segment"); + let elem_count = 900; + + let created_segment: Segment = + Segment::create(elem_count, TEST_SEGMENT_PREFIX).expect("error creating new segment"); - let opened_segment_instance_1 = Segment::open(new_segment.id(), TEST_SEGMENT_PREFIX) + let opened_segment_instance_1 = Segment::open(created_segment.id(), TEST_SEGMENT_PREFIX) .expect("error opening existing segment!"); - validate_segment(&new_segment, &opened_segment_instance_1); + validate_segment(&created_segment, &opened_segment_instance_1, elem_count); - let opened_segment_instance_2 = Segment::open(new_segment.id(), TEST_SEGMENT_PREFIX) + let opened_segment_instance_2 = Segment::open(created_segment.id(), TEST_SEGMENT_PREFIX) .expect("error opening existing segment!"); - validate_segment(&new_segment, &opened_segment_instance_1); - validate_segment(&new_segment, &opened_segment_instance_2); + validate_segment(&created_segment, &opened_segment_instance_1, elem_count); + validate_segment(&created_segment, &opened_segment_instance_2, elem_count); drop(opened_segment_instance_1); - validate_segment(&new_segment, &opened_segment_instance_2); + validate_segment(&created_segment, &opened_segment_instance_2, elem_count); } /// UNSIGNED /// @@ -116,19 +122,19 @@ fn segment_i128_id() { #[test] fn segment_open() { - let new_segment: Segment = + let created_segment: Segment = Segment::create(900, TEST_SEGMENT_PREFIX).expect("error creating new segment"); - let _opened_segment = Segment::open(new_segment.id(), TEST_SEGMENT_PREFIX) + let _opened_segment = Segment::open(created_segment.id(), TEST_SEGMENT_PREFIX) .expect("error opening existing segment!"); } #[test] fn segment_open_error() { let id = { - let new_segment: Segment = + let created_segment: Segment = Segment::create(900, TEST_SEGMENT_PREFIX).expect("error creating new segment"); - new_segment.id() + created_segment.id() }; let _opened_segment = Segment::open(id, TEST_SEGMENT_PREFIX) diff --git a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs index e2068af94a..025aaaef44 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs @@ -36,6 +36,10 @@ const AUTH_SEGMENT_PREFIX: &str = "auth"; pub(crate) type AuthSegmentID = u32; pub(crate) type AuthChallenge = u64; +const LEN_INDEX: usize = 0; +const CHALLENGE_INDEX: usize = 1; +const ID_START_INDEX: usize = 2; + #[derive(Debug)] pub struct AuthSegment { array: ArrayInSHM, @@ -44,13 +48,14 @@ pub struct AuthSegment { impl AuthSegment { pub fn create(challenge: AuthChallenge, shm_protocols: &[ProtocolID]) -> ZResult { let array = ArrayInSHM::::create( - 1 + shm_protocols.len(), + ID_START_INDEX + shm_protocols.len(), AUTH_SEGMENT_PREFIX, )?; unsafe { - (*array.elem_mut(0)) = challenge; - for elem in 1..array.elem_count() { - (*array.elem_mut(elem)) = shm_protocols[elem - 1] as u64; + (*array.elem_mut(LEN_INDEX)) = shm_protocols.len() as AuthChallenge; + (*array.elem_mut(CHALLENGE_INDEX)) = challenge; + for elem in ID_START_INDEX..array.elem_count() { + (*array.elem_mut(elem)) = shm_protocols[elem - ID_START_INDEX] as u64; } }; Ok(Self { array }) @@ -62,12 +67,12 @@ impl AuthSegment { } pub fn challenge(&self) -> AuthChallenge { - unsafe { *self.array.elem(0) } + unsafe { *self.array.elem(CHALLENGE_INDEX) } } pub fn protocols(&self) -> Vec { let mut result = vec![]; - for elem in 1..self.array.elem_count() { + for elem in ID_START_INDEX..self.array.elem_count() { result.push(unsafe { *self.array.elem(elem) as u32 }); } result From 7aeb4ade49862f3b654c7df0dc5bbc812106a11f Mon Sep 17 00:00:00 2001 From: oteffahi <70609372+oteffahi@users.noreply.github.com> Date: Wed, 24 Jul 2024 15:44:43 +0200 Subject: [PATCH 548/598] Add publisher delete and queryable reply messages to ACL (#1259) * Expose reply key_expr to interceptors * Add reply message to ACL logic and config * Update DEFAULT_CONFIG * Update ACL get/queryable tests, add reply tests * Improve reply matching * Specify all existing message types in ACL interceptor matching * Add reply to authentication qbl tests configs * Add delete message to ACL logic and config * Add delete message to DEFAULT_CONFIG, format messages * Add delete message to ACL pub/sub tests * Fix clippy errors * Reorder message matching * Revert "Expose reply key_expr to interceptors", use wire_expr for ACL filtering of reply messages This reverts commit 3a78d5da5b98bfbc18cbbee6c359e61fed8f6827. * Revert key_expr parsing change to reply ingress Ingress reply messages are not affected by the unimplemented key_expr in routing/dispatcher/queries.rs --- DEFAULT_CONFIG.json5 | 6 +- commons/zenoh-config/src/lib.rs | 2 + .../net/routing/interceptor/access_control.rs | 113 ++++++- .../net/routing/interceptor/authorization.rs | 6 + zenoh/tests/acl.rs | 276 ++++++++++++++++-- zenoh/tests/authentication.rs | 18 +- 6 files changed, 372 insertions(+), 49 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 33c9b3acdd..2dfb5b2fff 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -199,7 +199,8 @@ // /// Id has to be unique within the rule set // "id": "rule1", // "messages": [ - // "put", "query", "declare_subscriber", "declare_queryable" + // "put", "delete", "declare_subscriber", + // "query", "reply", "declare_queryable", // ], // "flows":["egress","ingress"], // "permission": "allow", @@ -210,7 +211,8 @@ // { // "id": "rule2", // "messages": [ - // "put", "query", "declare_subscriber", "declare_queryable" + // "put", "delete", "declare_subscriber", + // "query", "reply", "declare_queryable", // ], // "flows":["ingress"], // "permission": "allow", diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 270cf950c3..f5fc01aa63 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -166,9 +166,11 @@ pub struct PolicyRule { #[serde(rename_all = "snake_case")] pub enum AclMessage { Put, + Delete, DeclareSubscriber, Query, DeclareQueryable, + Reply, } #[derive(Clone, Copy, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 9e749e1258..6af064a878 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -26,7 +26,7 @@ use zenoh_config::{ }; use zenoh_protocol::{ core::ZenohIdProto, - network::{Declare, DeclareBody, NetworkBody, NetworkMessage, Push, Request}, + network::{Declare, DeclareBody, NetworkBody, NetworkMessage, Push, Request, Response}, zenoh::{PushBody, RequestBody}, }; use zenoh_result::ZResult; @@ -235,6 +235,21 @@ impl InterceptorTrait for IngressAclEnforcer { .or_else(|| ctx.full_expr()); match &ctx.msg.body { + NetworkBody::Request(Request { + payload: RequestBody::Query(_), + .. + }) => { + if self.action(AclMessage::Query, "Query (ingress)", key_expr?) == Permission::Deny + { + return None; + } + } + NetworkBody::Response(Response { .. }) => { + if self.action(AclMessage::Reply, "Reply (ingress)", key_expr?) == Permission::Deny + { + return None; + } + } NetworkBody::Push(Push { payload: PushBody::Put(_), .. @@ -243,11 +258,12 @@ impl InterceptorTrait for IngressAclEnforcer { return None; } } - NetworkBody::Request(Request { - payload: RequestBody::Query(_), + NetworkBody::Push(Push { + payload: PushBody::Del(_), .. }) => { - if self.action(AclMessage::Query, "Query (ingress)", key_expr?) == Permission::Deny + if self.action(AclMessage::Delete, "Delete (ingress)", key_expr?) + == Permission::Deny { return None; } @@ -278,7 +294,38 @@ impl InterceptorTrait for IngressAclEnforcer { return None; } } - _ => {} + // Unfiltered Declare messages + NetworkBody::Declare(Declare { + body: DeclareBody::DeclareKeyExpr(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::DeclareFinal(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::DeclareToken(_), + .. + }) => {} + // Unfiltered Undeclare messages + NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareKeyExpr(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareToken(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareQueryable(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareSubscriber(_), + .. + }) => {} + // Unfiltered remaining message types + NetworkBody::Interest(_) | NetworkBody::OAM(_) | NetworkBody::ResponseFinal(_) => {} } Some(ctx) } @@ -305,6 +352,22 @@ impl InterceptorTrait for EgressAclEnforcer { .or_else(|| ctx.full_expr()); match &ctx.msg.body { + NetworkBody::Request(Request { + payload: RequestBody::Query(_), + .. + }) => { + if self.action(AclMessage::Query, "Query (egress)", key_expr?) == Permission::Deny { + return None; + } + } + NetworkBody::Response(Response { wire_expr, .. }) => { + // @TODO: Remove wire_expr usage when issue #1255 is implemented + if self.action(AclMessage::Reply, "Reply (egress)", wire_expr.as_str()) + == Permission::Deny + { + return None; + } + } NetworkBody::Push(Push { payload: PushBody::Put(_), .. @@ -313,11 +376,12 @@ impl InterceptorTrait for EgressAclEnforcer { return None; } } - NetworkBody::Request(Request { - payload: RequestBody::Query(_), + NetworkBody::Push(Push { + payload: PushBody::Del(_), .. }) => { - if self.action(AclMessage::Query, "Query (egress)", key_expr?) == Permission::Deny { + if self.action(AclMessage::Delete, "Delete (egress)", key_expr?) == Permission::Deny + { return None; } } @@ -347,7 +411,38 @@ impl InterceptorTrait for EgressAclEnforcer { return None; } } - _ => {} + // Unfiltered Declare messages + NetworkBody::Declare(Declare { + body: DeclareBody::DeclareKeyExpr(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::DeclareFinal(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::DeclareToken(_), + .. + }) => {} + // Unfiltered Undeclare messages + NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareKeyExpr(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareToken(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareQueryable(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareSubscriber(_), + .. + }) => {} + // Unfiltered remaining message types + NetworkBody::Interest(_) | NetworkBody::OAM(_) | NetworkBody::ResponseFinal(_) => {} } Some(ctx) } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 8b8789fc3b..a7446382d1 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -184,15 +184,19 @@ impl PermissionPolicy { struct ActionPolicy { query: PermissionPolicy, put: PermissionPolicy, + delete: PermissionPolicy, declare_subscriber: PermissionPolicy, declare_queryable: PermissionPolicy, + reply: PermissionPolicy, } impl ActionPolicy { fn action(&self, action: AclMessage) -> &PermissionPolicy { match action { AclMessage::Query => &self.query, + AclMessage::Reply => &self.reply, AclMessage::Put => &self.put, + AclMessage::Delete => &self.delete, AclMessage::DeclareSubscriber => &self.declare_subscriber, AclMessage::DeclareQueryable => &self.declare_queryable, } @@ -200,7 +204,9 @@ impl ActionPolicy { fn action_mut(&mut self, action: AclMessage) -> &mut PermissionPolicy { match action { AclMessage::Query => &mut self.query, + AclMessage::Reply => &mut self.reply, AclMessage::Put => &mut self.put, + AclMessage::Delete => &mut self.delete, AclMessage::DeclareSubscriber => &mut self.declare_subscriber, AclMessage::DeclareQueryable => &mut self.declare_queryable, } diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 0a08090569..13104338b7 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -23,6 +23,7 @@ mod test { config, config::{EndPoint, WhatAmI}, prelude::*, + sample::SampleKind, Config, Session, }; use zenoh_core::{zlock, ztimeout}; @@ -43,12 +44,21 @@ mod test { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn test_acl_get_queryable() { + zenoh::try_init_log_from_env(); test_get_qbl_deny(27448).await; test_get_qbl_allow(27448).await; test_get_qbl_allow_then_deny(27448).await; test_get_qbl_deny_then_allow(27448).await; } + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_acl_queryable_reply() { + zenoh::try_init_log_from_env(); + // Only test cases not covered by `test_acl_get_queryable` + test_reply_deny(27449).await; + test_reply_allow_then_deny(27449).await; + } + async fn get_basic_router_config(port: u16) -> Config { let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); @@ -105,12 +115,20 @@ mod test { { let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); let received_value = Arc::new(Mutex::new(String::new())); + let deleted = Arc::new(Mutex::new(false)); + let temp_recv_value = received_value.clone(); + let deleted_clone = deleted.clone(); let subscriber = sub_session .declare_subscriber(KEY_EXPR) .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.payload().deserialize::().unwrap(); + if sample.kind() == SampleKind::Put { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + } else if sample.kind() == SampleKind::Delete { + let mut deleted = zlock!(deleted_clone); + *deleted = true; + } }) .await .unwrap(); @@ -119,6 +137,10 @@ mod test { publisher.put(VALUE).await.unwrap(); tokio::time::sleep(SLEEP).await; assert_ne!(*zlock!(received_value), VALUE); + + publisher.delete().await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert!(!(*zlock!(deleted))); ztimeout!(subscriber.undeclare()).unwrap(); } close_sessions(sub_session, pub_session).await; @@ -147,22 +169,32 @@ mod test { { let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); + let deleted = Arc::new(Mutex::new(false)); + let temp_recv_value = received_value.clone(); - let subscriber = - ztimeout!(sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { + let deleted_clone = deleted.clone(); + let subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + if sample.kind() == SampleKind::Put { let mut temp_value = zlock!(temp_recv_value); *temp_value = sample.payload().deserialize::().unwrap(); - })) + } else if sample.kind() == SampleKind::Delete { + let mut deleted = zlock!(deleted_clone); + *deleted = true; + } + }) + .await .unwrap(); tokio::time::sleep(SLEEP).await; - - ztimeout!(publisher.put(VALUE)).unwrap(); + publisher.put(VALUE).await.unwrap(); tokio::time::sleep(SLEEP).await; - assert_eq!(*zlock!(received_value), VALUE); + + publisher.delete().await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert!(*zlock!(deleted)); ztimeout!(subscriber.undeclare()).unwrap(); } @@ -184,9 +216,10 @@ mod test { { "id": "r1", "permission": "deny", - "flows": ["egress"], + "flows": ["egress", "ingress"], "messages": [ "put", + "delete", "declare_subscriber" ], "key_exprs": [ @@ -218,22 +251,32 @@ mod test { { let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); + let deleted = Arc::new(Mutex::new(false)); + let temp_recv_value = received_value.clone(); - let subscriber = - ztimeout!(sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { + let deleted_clone = deleted.clone(); + let subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + if sample.kind() == SampleKind::Put { let mut temp_value = zlock!(temp_recv_value); *temp_value = sample.payload().deserialize::().unwrap(); - })) + } else if sample.kind() == SampleKind::Delete { + let mut deleted = zlock!(deleted_clone); + *deleted = true; + } + }) + .await .unwrap(); tokio::time::sleep(SLEEP).await; - - ztimeout!(publisher.put(VALUE)).unwrap(); + publisher.put(VALUE).await.unwrap(); tokio::time::sleep(SLEEP).await; - assert_ne!(*zlock!(received_value), VALUE); + + publisher.delete().await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert!(!(*zlock!(deleted))); ztimeout!(subscriber.undeclare()).unwrap(); } close_sessions(sub_session, pub_session).await; @@ -257,6 +300,7 @@ mod test { "flows": ["egress", "ingress"], "messages": [ "put", + "delete", "declare_subscriber" ], "key_exprs": [ @@ -288,22 +332,32 @@ mod test { { let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); + let deleted = Arc::new(Mutex::new(false)); + let temp_recv_value = received_value.clone(); - let subscriber = - ztimeout!(sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { + let deleted_clone = deleted.clone(); + let subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + if sample.kind() == SampleKind::Put { let mut temp_value = zlock!(temp_recv_value); *temp_value = sample.payload().deserialize::().unwrap(); - })) + } else if sample.kind() == SampleKind::Delete { + let mut deleted = zlock!(deleted_clone); + *deleted = true; + } + }) + .await .unwrap(); tokio::time::sleep(SLEEP).await; - - ztimeout!(publisher.put(VALUE)).unwrap(); + publisher.put(VALUE).await.unwrap(); tokio::time::sleep(SLEEP).await; - assert_eq!(*zlock!(received_value), VALUE); + + publisher.delete().await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert!(*zlock!(deleted)); ztimeout!(subscriber.undeclare()).unwrap(); } close_sessions(sub_session, pub_session).await; @@ -320,9 +374,24 @@ mod test { r#"{ "enabled": true, "default_permission": "deny", - "rules": [], - "subjects": [], - "policies": [], + "rules": [ + { + "id": "allow reply", + "permission": "allow", + "messages": ["reply"], + "flows": ["egress", "ingress"], + "key_exprs": ["test/demo"], + } + ], + "subjects": [ + { "id": "all" } + ], + "policies": [ + { + "rules": ["allow reply"], + "subjects": ["all"], + } + ], }"#, ) .unwrap(); @@ -435,7 +504,8 @@ mod test { "flows": ["egress", "ingress"], "messages": [ "query", - "declare_queryable" + "declare_queryable", + "reply" ], "key_exprs": [ "test/demo" @@ -512,7 +582,7 @@ mod test { { "id": "r1", "permission": "deny", - "flows": ["egress"], + "flows": ["egress", "ingress"], "messages": [ "query", "declare_queryable" @@ -576,4 +646,146 @@ mod test { close_sessions(get_session, qbl_session).await; close_router_session(session).await; } + + async fn test_reply_deny(port: u16) { + println!("test_reply_deny"); + + let mut config_router = get_basic_router_config(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "id": "allow get/declare qbl", + "permission": "allow", + "messages": ["query", "declare_queryable"], + "key_exprs": ["test/demo"], + } + ], + "subjects": [ + { "id": "all" } + ], + "policies": [ + { + "rules": ["allow get/declare qbl"], + "subjects": ["all"], + } + ], + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions(port).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!("Error : {:?}", e), + } + } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + async fn test_reply_allow_then_deny(port: u16) { + println!("test_reply_allow_then_deny"); + + let mut config_router = get_basic_router_config(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "id": "r1", + "permission": "deny", + "messages": ["reply"], + "flows": ["egress", "ingress"], + "key_exprs": ["test/demo"], + }, + ], + "subjects": [ + { + "id": "s1", + "interfaces": [ + "lo", "lo0" + ], + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions(port).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!("Error : {:?}", e), + } + } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } } diff --git a/zenoh/tests/authentication.rs b/zenoh/tests/authentication.rs index 09dd3b74eb..8d1e404617 100644 --- a/zenoh/tests/authentication.rs +++ b/zenoh/tests/authentication.rs @@ -945,7 +945,8 @@ client2name:client2passwd"; "flows": ["egress", "ingress"], "messages": [ "query", - "declare_queryable" + "declare_queryable", + "reply", ], "key_exprs": [ "test/demo" @@ -1030,7 +1031,8 @@ client2name:client2passwd"; "flows": ["egress"], "messages": [ "query", - "declare_queryable" + "declare_queryable", + "reply" ], "key_exprs": [ "test/demo" @@ -1256,7 +1258,8 @@ client2name:client2passwd"; "flows": ["egress", "ingress"], "messages": [ "query", - "declare_queryable" + "declare_queryable", + "reply" ], "key_exprs": [ "test/demo" @@ -1342,7 +1345,8 @@ client2name:client2passwd"; "flows": ["egress"], "messages": [ "query", - "declare_queryable" + "declare_queryable", + "reply" ], "key_exprs": [ "test/demo" @@ -1568,7 +1572,8 @@ client2name:client2passwd"; "flows": ["ingress", "egress"], "messages": [ "query", - "declare_queryable" + "declare_queryable", + "reply" ], "key_exprs": [ "test/demo" @@ -1654,7 +1659,8 @@ client2name:client2passwd"; "flows": ["egress"], "messages": [ "query", - "declare_queryable" + "declare_queryable", + "reply" ], "key_exprs": [ "test/demo" From 4827f393eb591592dcd9580129af3b3e7d6577ee Mon Sep 17 00:00:00 2001 From: yellowhatter <104833606+yellowhatter@users.noreply.github.com> Date: Wed, 24 Jul 2024 18:09:23 +0300 Subject: [PATCH 549/598] Make SHM sub\queryable examples more robust (#1261) * Make SHM sub\queryable examples more robust * fix clippy * fix clippy --- examples/examples/z_queryable_shm.rs | 69 ++++++++++++++++++++++++---- examples/examples/z_sub_shm.rs | 58 ++++++++++++++++++++--- 2 files changed, 111 insertions(+), 16 deletions(-) diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs index 75da0379e2..a5be1252a0 100644 --- a/examples/examples/z_queryable_shm.rs +++ b/examples/examples/z_queryable_shm.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::{ + bytes::ZBytes, key_expr::KeyExpr, prelude::*, shm::{ @@ -63,18 +64,29 @@ async fn main() { println!("Press CTRL-C to quit..."); while let Ok(query) = queryable.recv_async().await { - print!( - ">> [Queryable] Received Query '{}' ('{}'", - query.selector(), - query.key_expr().as_str(), - ); - if let Some(query_payload) = query.payload() { - match query_payload.deserialize::<&zshm>() { - Ok(p) => print!(": '{}'", String::from_utf8_lossy(p)), - Err(e) => print!(": 'Not a ShmBufInner: {:?}'", e), + // Print overall query payload information + match query.payload() { + Some(payload) => { + let (payload_type, payload) = handle_bytes(payload); + print!( + ">> [Queryable] Received Query [{}] ('{}': '{}')", + payload_type, + query.selector(), + payload + ); + } + None => { + print!(">> Received Query '{}'", query.selector()); } + }; + + // Print attachment information + if let Some(att) = query.attachment() { + let (attachment_type, attachment) = handle_bytes(att); + print!(" ({}: {})", attachment_type, attachment); } - println!(")"); + + println!(); // Allocate an SHM buffer // NOTE: For allocation API please check z_alloc_shm.rs example @@ -119,3 +131,40 @@ fn parse_args() -> (Config, KeyExpr<'static>, String, bool) { let args = Args::parse(); (args.common.into(), args.key, args.payload, args.complete) } + +fn handle_bytes(bytes: &ZBytes) -> (&str, String) { + // Determine buffer type for indication purpose + let bytes_type = { + // if Zenoh is built without SHM support, the only buffer type it can receive is RAW + #[cfg(not(feature = "shared-memory"))] + { + "RAW" + } + + // if Zenoh is built with SHM support but without SHM API (that is unstable), it can + // receive buffers of any type, but there is no way to detect the buffer type + #[cfg(all(feature = "shared-memory", not(feature = "unstable")))] + { + "UNKNOWN" + } + + // if Zenoh is built with SHM support and with SHM API we can detect the exact buffer type + #[cfg(all(feature = "shared-memory", feature = "unstable"))] + match bytes.deserialize::<&zshm>() { + Ok(_) => "SHM", + Err(_) => "RAW", + } + }; + + // In order to indicate the real underlying buffer type the code above is written ^^^ + // Sample is SHM-agnostic: Sample handling code works both with SHM and RAW data transparently. + // In other words, the common application compiled with "shared-memory" feature will be able to + // handle incoming SHM data without any changes in the application code. + // + // Refer to z_bytes.rs to see how to deserialize different types of message + let bytes_string = bytes + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + + (bytes_type, bytes_string) +} diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index e32c6140ac..22836921a5 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -12,7 +12,9 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::{config::Config, key_expr::KeyExpr, prelude::*, shm::zshm}; +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +use zenoh::shm::zshm; +use zenoh::{bytes::ZBytes, config::Config, key_expr::KeyExpr, prelude::*}; use zenoh_examples::CommonArgs; #[tokio::main] @@ -35,16 +37,23 @@ async fn main() { println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { + // Print overall payload information + let (payload_type, payload) = handle_bytes(sample.payload()); print!( - ">> [Subscriber] Received {} ('{}': ", + ">> [Subscriber] Received [{}] {} ('{}': '{}')", + payload_type, sample.kind(), sample.key_expr().as_str(), + payload ); - match sample.payload().deserialize::<&zshm>() { - Ok(payload) => print!("'{}'", String::from_utf8_lossy(payload)), - Err(e) => print!("'Not a ShmBufInner: {:?}'", e), + + // Print attachment information + if let Some(att) = sample.attachment() { + let (attachment_type, attachment) = handle_bytes(att); + print!(" ({}: {})", attachment_type, attachment); } - println!(")"); + + println!(); } // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber @@ -81,3 +90,40 @@ fn parse_args() -> (Config, KeyExpr<'static>) { let args = SubArgs::parse(); (args.common.into(), args.key) } + +fn handle_bytes(bytes: &ZBytes) -> (&str, String) { + // Determine buffer type for indication purpose + let bytes_type = { + // if Zenoh is built without SHM support, the only buffer type it can receive is RAW + #[cfg(not(feature = "shared-memory"))] + { + "RAW" + } + + // if Zenoh is built with SHM support but without SHM API (that is unstable), it can + // receive buffers of any type, but there is no way to detect the buffer type + #[cfg(all(feature = "shared-memory", not(feature = "unstable")))] + { + "UNKNOWN" + } + + // if Zenoh is built with SHM support and with SHM API we can detect the exact buffer type + #[cfg(all(feature = "shared-memory", feature = "unstable"))] + match bytes.deserialize::<&zshm>() { + Ok(_) => "SHM", + Err(_) => "RAW", + } + }; + + // In order to indicate the real underlying buffer type the code above is written ^^^ + // Sample is SHM-agnostic: Sample handling code works both with SHM and RAW data transparently. + // In other words, the common application compiled with "shared-memory" feature will be able to + // handle incoming SHM data without any changes in the application code. + // + // Refer to z_bytes.rs to see how to deserialize different types of message + let bytes_string = bytes + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + + (bytes_type, bytes_string) +} From b55c781220d7ea9f7f117570990f6e4e063e58fe Mon Sep 17 00:00:00 2001 From: yellowhatter <104833606+yellowhatter@users.noreply.github.com> Date: Thu, 25 Jul 2024 12:46:26 +0300 Subject: [PATCH 550/598] SHM on RaspberryPI bugfixes (#1265) * Do not trigger transport error in case of SHM buffer invalidation * Fix spelling * Drop the whole ZBuf in case of SHM error! * fix clippy errors * Fix misaligned memory access bug (affects non-64 bit ARM) * fix tests to be platform-agnostic * Update posix_segment.rs * Update posix_segment.rs From 2e4698f85744425d7a9cbe1eb546718cc5973272 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 25 Jul 2024 17:47:01 +0200 Subject: [PATCH 551/598] Align SHM examples (#1268) --- examples/examples/z_get_shm.rs | 2 +- examples/examples/z_pub_shm.rs | 2 +- examples/examples/z_queryable_shm.rs | 8 ++++---- examples/examples/z_sub_shm.rs | 6 +++--- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index d8ea97da33..b40834afc4 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -67,7 +67,7 @@ async fn main() { let content = payload .take() - .unwrap_or_else(|| "Get from SHM Rust!".to_string()); + .unwrap_or_else(|| "Get from Rust SHM!".to_string()); sbuf[0..content.len()].copy_from_slice(content.as_bytes()); println!("Sending Query '{selector}'..."); diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 457027ba75..5a3ca9590f 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -96,7 +96,7 @@ struct Args { #[arg(short, long, default_value = "demo/example/zenoh-rs-pub")] /// The key expression to publish onto. key: KeyExpr<'static>, - #[arg(short, long, default_value = "Pub from SHM Rust!")] + #[arg(short, long, default_value = "Pub from Rust SHM!")] /// The payload of to publish. payload: String, #[command(flatten)] diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs index a5be1252a0..b0b443d313 100644 --- a/examples/examples/z_queryable_shm.rs +++ b/examples/examples/z_queryable_shm.rs @@ -69,10 +69,10 @@ async fn main() { Some(payload) => { let (payload_type, payload) = handle_bytes(payload); print!( - ">> [Queryable] Received Query [{}] ('{}': '{}')", - payload_type, + ">> [Queryable] Received Query ('{}': '{}') [{}]", query.selector(), - payload + payload, + payload_type, ); } None => { @@ -117,7 +117,7 @@ struct Args { #[arg(short, long, default_value = "demo/example/zenoh-rs-queryable")] /// The key expression matching queries to reply to. key: KeyExpr<'static>, - #[arg(short, long, default_value = "Queryable from SHM Rust!")] + #[arg(short, long, default_value = "Queryable from Rust SHM!")] /// The payload to reply to queries. payload: String, #[arg(long)] diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 22836921a5..6f9bb3f070 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -40,11 +40,11 @@ async fn main() { // Print overall payload information let (payload_type, payload) = handle_bytes(sample.payload()); print!( - ">> [Subscriber] Received [{}] {} ('{}': '{}')", - payload_type, + ">> [Subscriber] Received {} ('{}': '{}') [{}] ", sample.kind(), sample.key_expr().as_str(), - payload + payload, + payload_type, ); // Print attachment information From bc8029b067498db8c038ca59dbc2ad1fa1fa8356 Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Thu, 25 Jul 2024 18:35:08 +0200 Subject: [PATCH 552/598] Fix Clippy errors from Rust 1.80 (#1270) * Resolve `clippy::doc-lazy-continuation` errors * Keep never-constructed logger interceptor * Fix `target_pointer_width` usage * Allow unexpected `doc_auto_cfg` flag * Ignore interior mutability of `Resource` * Fix typo * Upgrade `time@0.3.28` to `time@0.3.36` See https://github.com/time-rs/time/issues/693 * Fix `unused_variables` and `unused_imports` errors for `shared-memory` * Resolve more `clippy::doc-lazy-continuation` errors * Update zenoh/src/net/routing/interceptor/mod.rs --------- Co-authored-by: Luca Cominardi --- Cargo.lock | 46 +++++++++++++------ clippy.toml | 6 +++ commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs | 2 +- commons/zenoh-keyexpr/src/lib.rs | 2 +- zenoh/Cargo.toml | 3 ++ zenoh/src/api/encoding.rs | 4 -- zenoh/src/api/plugins.rs | 10 ++-- zenoh/src/api/session.rs | 4 +- zenoh/src/lib.rs | 2 +- zenoh/src/net/routing/dispatcher/resource.rs | 4 ++ zenoh/src/net/routing/interceptor/mod.rs | 4 ++ zenoh/src/net/runtime/mod.rs | 15 ++---- zenohd/src/main.rs | 5 +- 13 files changed, 67 insertions(+), 40 deletions(-) create mode 100644 clippy.toml diff --git a/Cargo.lock b/Cargo.lock index 04348c0dde..24a39b3d63 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -248,7 +248,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.28", + "time 0.3.36", ] [[package]] @@ -1081,9 +1081,12 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", +] [[package]] name = "derive-new" @@ -2093,7 +2096,7 @@ dependencies = [ "regex", "serde", "serde_json", - "time 0.3.28", + "time 0.3.36", "url", "uuid", ] @@ -2476,6 +2479,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-integer" version = "0.1.46" @@ -2944,6 +2953,12 @@ dependencies = [ "universal-hash", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -3197,7 +3212,7 @@ dependencies = [ "pem", "ring", "rustls-pki-types", - "time 0.3.28", + "time 0.3.36", "yasna", ] @@ -4338,22 +4353,24 @@ dependencies = [ [[package]] name = "time" -version = "0.3.28" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", + "num-conv", + "powerfmt", "serde", "time-core", - "time-macros 0.2.14", + "time-macros 0.2.18", ] [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" @@ -4367,10 +4384,11 @@ dependencies = [ [[package]] name = "time-macros" -version = "0.2.14" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a942f44339478ef67935ab2bbaec2fb0322496cf3cbe84b261e06ac3814c572" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ + "num-conv", "time-core", ] @@ -5335,7 +5353,7 @@ dependencies = [ "oid-registry", "rusticata-macros", "thiserror", - "time 0.3.28", + "time 0.3.36", ] [[package]] @@ -5344,7 +5362,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" dependencies = [ - "time 0.3.28", + "time 0.3.36", ] [[package]] diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 0000000000..49436d7ba9 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,6 @@ +# NOTE: Resources are hashed using their `.suffix` field without using any interior mutable fields. +# See https://github.com/eclipse-zenoh/zenoh/blob/b55c781220d7ea9f7f117570990f6e4e063e58fe/zenoh/src/net/routing/dispatcher/resource.rs#L193 +# A corresponding comment is present in the `Hash` implementation of `Resource` as a reminder that this configuration is set. +ignore-interior-mutability = [ + "zenoh::net::routing::dispatcher::resource::Resource", +] diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs index 5bd0f7dae3..fd36e6fdcc 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs @@ -36,7 +36,7 @@ //! KeTrees come in two flavours: //! - [`KeBoxTree`] is the easier flavour. Much like a HashMap, it uniquely owns all of its nodes and data. //! - [`KeArcTree`] allows the shared ownership of nodes, allowing you to store subsections of the tree elsewhere -//! without worrying about lifetimes. +//! without worrying about lifetimes. //! //! # Usage //! KeTrees were designed to maximize code reuse. As such, their core properties are reflected through the [`IKeyExprTree`] and [`IKeyExprTreeMut`] traits. diff --git a/commons/zenoh-keyexpr/src/lib.rs b/commons/zenoh-keyexpr/src/lib.rs index 5142076b6d..03565fe63a 100644 --- a/commons/zenoh-keyexpr/src/lib.rs +++ b/commons/zenoh-keyexpr/src/lib.rs @@ -24,7 +24,7 @@ //! - [`keyexpr`] is the equivalent of a [`str`], //! - [`OwnedKeyExpr`] works like an [`Arc`](std::sync::Arc), //! - [`KeyExpr`](https://docs.rs/zenoh/latest/zenoh/key_expr/struct.KeyExpr.html) works like a [`Cow`](std::borrow::Cow), but also stores some additional context internal to Zenoh to optimize -//! routing and network usage. +//! routing and network usage. //! //! All of these types [`Deref`](core::ops::Deref) to [`keyexpr`], which notably has methods to check whether a given [`keyexpr::intersects`] with another, //! or even if a [`keyexpr::includes`] another. diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 605efd16a0..968acac805 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -141,3 +141,6 @@ license-file = ["../LICENSE", "0"] depends = "zenohd (=0.11.0-dev-1), zenoh-plugin-rest (=0.11.0-dev-1), zenoh-plugin-storage-manager (=0.11.0-dev-1)" maintainer-scripts = ".deb" assets = [["../README.md", "README.md", "644"]] + +[lints.rust] +unexpected_cfgs = { level = "allow", check-cfg = ['cfg(doc_auto_cfg)'] } diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index 7e86e9091a..bc335a5fc2 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -901,16 +901,12 @@ impl EncodingMapping for u128 { } impl EncodingMapping for usize { - #[cfg(target_pointer_width = "8")] - const ENCODING: Encoding = Encoding::ZENOH_UINT8; #[cfg(target_pointer_width = "16")] const ENCODING: Encoding = Encoding::ZENOH_UINT16; #[cfg(target_pointer_width = "32")] const ENCODING: Encoding = Encoding::ZENOH_UINT32; #[cfg(target_pointer_width = "64")] const ENCODING: Encoding = Encoding::ZENOH_UINT64; - #[cfg(target_pointer_width = "128")] - const ENCODING: Encoding = Encoding::ZENOH_UINT128; } // Zenoh signed integers diff --git a/zenoh/src/api/plugins.rs b/zenoh/src/api/plugins.rs index 2f51f78a85..2623ce2c6f 100644 --- a/zenoh/src/api/plugins.rs +++ b/zenoh/src/api/plugins.rs @@ -94,13 +94,13 @@ pub trait RunningPluginTrait: Send + Sync + PluginControl { /// Thus the plugin can reply its contribution to the global admin space of this zenohd. /// Parameters: /// * `key_expr`: the key_expr selector of the query. This key_expr is - /// exactly the same as it was requested by user, for example "@/ROUTER_ID/router/plugins/PLUGIN_NAME/some/plugin/info" or "@/*/router/plugins/*/foo/bar". - /// But the plugin's [RunningPluginTrait::adminspace_getter] is called only if the key_expr matches the `plugin_status_key` + /// exactly the same as it was requested by user, for example "@/ROUTER_ID/router/plugins/PLUGIN_NAME/some/plugin/info" or "@/*/router/plugins/*/foo/bar". + /// But the plugin's [RunningPluginTrait::adminspace_getter] is called only if the key_expr matches the `plugin_status_key` /// * `plugin_status_key`: the actual path to plugin's status in the admin space. For example "@/ROUTER_ID/router/plugins/PLUGIN_NAME" - /// Returns value: + /// Returns value: /// * `Ok(Vec)`: the list of responses to the query. For example if plugins can return information on subleys "foo", "bar", "foo/buzz" and "bar/buzz" - /// and it's requested with the query "@/ROUTER_ID/router/plugins/PLUGIN_NAME/*", it should return only information on "foo" and "bar" subkeys, but not on "foo/buzz" and "bar/buzz" - /// as they doesn't match the query. + /// and it's requested with the query "@/ROUTER_ID/router/plugins/PLUGIN_NAME/*", it should return only information on "foo" and "bar" subkeys, but not on "foo/buzz" and "bar/buzz" + /// as they doesn't match the query. /// * `Err(ZError)`: Problem occurred when processing the query. /// /// If plugin implements subplugins (as the storage plugin), then it should also reply with information about its subplugins with the same rules. diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 8a5d9e746e..a2745ecd96 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -2033,7 +2033,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// # Arguments /// /// * `key_expr` - The key expression matching the queries the - /// [`Queryable`](crate::query::Queryable) will reply to + /// [`Queryable`](crate::query::Queryable) will reply to /// /// # Examples /// ```no_run @@ -2692,7 +2692,7 @@ pub trait SessionDeclarations<'s, 'a> { /// # Arguments /// /// * `key_expr` - The key expression matching the queries the - /// [`Queryable`](crate::query::Queryable) will reply to + /// [`Queryable`](crate::query::Queryable) will reply to /// /// # Examples /// ```no_run diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 86fb9e918f..02c90ce0ec 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -142,7 +142,7 @@ pub mod prelude; /// - [`keyexpr`](crate::key_expr::keyexpr) is the equivalent of a [`str`], /// - [`OwnedKeyExpr`](crate::key_expr::OwnedKeyExpr) works like an [`std::sync::Arc`], /// - [`KeyExpr`](crate::key_expr::KeyExpr) works like a [`std::borrow::Cow`], but also stores some additional context internal to Zenoh to optimize -/// routing and network usage. +/// routing and network usage. /// /// All of these types [`Deref`](std::ops::Deref) to [`keyexpr`](crate::key_expr::keyexpr), which notably has methods to check whether a given [`intersects`](crate::key_expr::keyexpr::includes) with another, /// or even if a [`includes`](crate::key_expr::keyexpr::includes) another. diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index b829709bf2..f864c39049 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -188,6 +188,10 @@ impl PartialEq for Resource { } impl Eq for Resource {} +// NOTE: The `clippy::mutable_key_type` lint takes issue with the fact that `Resource` contains +// interior mutable data. A configuration option is used to assert that the accessed fields are +// not interior mutable in clippy.toml. Thus care should be taken to ensure soundness of this impl +// as Clippy will not warn about its usage in sets/maps. impl Hash for Resource { fn hash(&self, state: &mut H) { self.expr().hash(state); diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 3be30e9205..ba0209de2d 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -159,6 +159,7 @@ impl InterceptorTrait for ComputeOnMiss { } } +#[allow(dead_code)] pub(crate) struct IngressMsgLogger {} impl InterceptorTrait for IngressMsgLogger { @@ -186,6 +187,8 @@ impl InterceptorTrait for IngressMsgLogger { Some(ctx) } } + +#[allow(dead_code)] pub(crate) struct EgressMsgLogger {} impl InterceptorTrait for EgressMsgLogger { @@ -213,6 +216,7 @@ impl InterceptorTrait for EgressMsgLogger { } } +#[allow(dead_code)] pub(crate) struct LoggerInterceptor {} impl InterceptorFactoryTrait for LoggerInterceptor { diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index b7ba0d11da..9abb01b94e 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -146,22 +146,17 @@ impl RuntimeBuilder { runtime: std::sync::RwLock::new(WeakRuntime { state: Weak::new() }), }); - let transport_manager = TransportManager::builder() + let transport_manager_builder = TransportManager::builder() .from_config(&config) .await? .whatami(whatami) .zid(zid); - #[cfg(feature = "unstable")] - let transport_manager = zcondfeat!( - "shared-memory", - transport_manager.shm_reader(shm_clients.map(ShmReader::new)), - transport_manager - ) - .build(handler.clone())?; + #[cfg(feature = "shared-memory")] + let transport_manager_builder = + transport_manager_builder.shm_reader(shm_clients.map(ShmReader::new)); - #[cfg(not(feature = "unstable"))] - let transport_manager = transport_manager.build(handler.clone())?; + let transport_manager = transport_manager_builder.build(handler.clone())?; // Plugins manager #[cfg(feature = "plugins")] diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 71fa0bce34..60d898d84f 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -77,9 +77,10 @@ struct Args { /// Allows arbitrary configuration changes as column-separated KEY:VALUE pairs, where: /// - KEY must be a valid config path. /// - VALUE must be a valid JSON5 string that can be deserialized to the expected type for the KEY field. + /// /// Examples: - /// --cfg='startup/subscribe:["demo/**"]' - /// --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}' + /// - `--cfg='startup/subscribe:["demo/**"]'` + /// - `--cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` #[arg(long)] cfg: Vec, /// Configure the read and/or write permissions on the admin space. Default is read only. From 0c43c08d6f2a0a46a66748b6322673ed8ba4e74d Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Thu, 25 Jul 2024 18:36:14 +0200 Subject: [PATCH 553/598] Don't load plugins/volumes when `plugins_loading/enabled:false` (#1269) --- commons/zenoh-util/src/lib_loader.rs | 34 ++++++++++--------- .../zenoh-plugin-storage-manager/src/lib.rs | 4 ++- plugins/zenoh-plugin-trait/src/manager.rs | 4 +-- .../src/manager/dynamic_plugin.rs | 20 +++++++---- .../src/manager/static_plugin.rs | 4 +-- zenoh/src/net/runtime/adminspace.rs | 11 +++++- 6 files changed, 48 insertions(+), 29 deletions(-) diff --git a/commons/zenoh-util/src/lib_loader.rs b/commons/zenoh-util/src/lib_loader.rs index a2fb98da23..9d4a52c332 100644 --- a/commons/zenoh-util/src/lib_loader.rs +++ b/commons/zenoh-util/src/lib_loader.rs @@ -20,7 +20,7 @@ use std::{ use libloading::Library; use tracing::{debug, warn}; -use zenoh_core::zconfigurable; +use zenoh_core::{zconfigurable, zerror}; use zenoh_result::{bail, ZResult}; zconfigurable! { @@ -35,15 +35,13 @@ zconfigurable! { /// LibLoader allows search for libraries and to load them. #[derive(Clone, Debug)] pub struct LibLoader { - search_paths: Vec, + search_paths: Option>, } impl LibLoader { /// Return an empty `LibLoader`. pub fn empty() -> LibLoader { - LibLoader { - search_paths: Vec::new(), - } + LibLoader { search_paths: None } } /// Returns the list of search paths used by `LibLoader::default()` @@ -83,12 +81,14 @@ impl LibLoader { } } - LibLoader { search_paths } + LibLoader { + search_paths: Some(search_paths), + } } /// Return the list of search paths used by this [LibLoader] - pub fn search_paths(&self) -> &[PathBuf] { - &self.search_paths + pub fn search_paths(&self) -> Option<&[PathBuf]> { + self.search_paths.as_deref() } /// Load a library from the specified path. @@ -118,7 +118,7 @@ impl LibLoader { /// /// This function calls [libloading::Library::new()](https://docs.rs/libloading/0.7.0/libloading/struct.Library.html#method.new) /// which is unsafe. - pub unsafe fn search_and_load(&self, name: &str) -> ZResult<(Library, PathBuf)> { + pub unsafe fn search_and_load(&self, name: &str) -> ZResult> { let filename = format!("{}{}{}", *LIB_PREFIX, name, *LIB_SUFFIX); let filename_ostr = OsString::from(&filename); tracing::debug!( @@ -126,13 +126,16 @@ impl LibLoader { filename, self.search_paths ); - for dir in &self.search_paths { + let Some(search_paths) = self.search_paths() else { + return Ok(None); + }; + for dir in search_paths { match dir.read_dir() { Ok(read_dir) => { for entry in read_dir.flatten() { if entry.file_name() == filename_ostr { let path = entry.path(); - return Ok((Library::new(path.clone())?, path)); + return Ok(Some((Library::new(path.clone())?, path))); } } } @@ -142,7 +145,7 @@ impl LibLoader { ), } } - bail!("Library file '{}' not found", filename) + Err(zerror!("Library file '{}' not found", filename).into()) } /// Search and load all libraries with filename starting with [struct@LIB_PREFIX]+`prefix` and ending with [struct@LIB_SUFFIX]. @@ -158,7 +161,7 @@ impl LibLoader { pub unsafe fn load_all_with_prefix( &self, prefix: Option<&str>, - ) -> Vec<(Library, PathBuf, String)> { + ) -> Option> { let lib_prefix = format!("{}{}", *LIB_PREFIX, prefix.unwrap_or("")); tracing::debug!( "Search for libraries {}*{} to load in {:?}", @@ -166,9 +169,8 @@ impl LibLoader { *LIB_SUFFIX, self.search_paths ); - let mut result = vec![]; - for dir in &self.search_paths { + for dir in self.search_paths()? { match dir.read_dir() { Ok(read_dir) => { for entry in read_dir.flatten() { @@ -199,7 +201,7 @@ impl LibLoader { ), } } - result + Some(result) } pub fn _plugin_name(path: &std::path::Path) -> Option<&str> { diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 3c64e3fe35..3f98725a5e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -229,7 +229,9 @@ impl StorageRuntimeInner { self.plugins_manager .declare_dynamic_plugin_by_name(volume_id, backend_name, true)? }; - let loaded = declared.load()?; + let loaded = declared + .load()? + .expect("Volumes should not loaded if if the storage-manager plugin is not loaded"); loaded.start(config)?; Ok(()) diff --git a/plugins/zenoh-plugin-trait/src/manager.rs b/plugins/zenoh-plugin-trait/src/manager.rs index 90651532ec..4776aa31a3 100644 --- a/plugins/zenoh-plugin-trait/src/manager.rs +++ b/plugins/zenoh-plugin-trait/src/manager.rs @@ -25,7 +25,7 @@ use crate::*; pub trait DeclaredPlugin: PluginStatus { fn as_status(&self) -> &dyn PluginStatus; - fn load(&mut self) -> ZResult<&mut dyn LoadedPlugin>; + fn load(&mut self) -> ZResult>>; fn loaded(&self) -> Option<&dyn LoadedPlugin>; fn loaded_mut(&mut self) -> Option<&mut dyn LoadedPlugin>; } @@ -88,7 +88,7 @@ impl DeclaredPlugin &dyn PluginStatus { self } - fn load(&mut self) -> ZResult<&mut dyn LoadedPlugin> { + fn load(&mut self) -> ZResult>> { self.0.load() } fn loaded(&self) -> Option<&dyn LoadedPlugin> { diff --git a/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs b/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs index 24c873814e..50bed07a4f 100644 --- a/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs +++ b/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs @@ -13,7 +13,7 @@ use std::path::{Path, PathBuf}; use libloading::Library; -use zenoh_result::{bail, ZResult}; +use zenoh_result::{bail, zerror, ZResult}; use zenoh_util::LibLoader; use crate::*; @@ -28,7 +28,7 @@ pub enum DynamicPluginSource { } impl DynamicPluginSource { - fn load(&self) -> ZResult<(Library, PathBuf)> { + fn load(&self) -> ZResult> { match self { DynamicPluginSource::ByName((libloader, name)) => unsafe { libloader.search_and_load(name) @@ -36,11 +36,11 @@ impl DynamicPluginSource { DynamicPluginSource::ByPaths(paths) => { for path in paths { match unsafe { LibLoader::load_file(path) } { - Ok((l, p)) => return Ok((l, p)), + Ok((l, p)) => return Ok(Some((l, p))), Err(e) => tracing::debug!("Attempt to load {} failed: {}", path, e), } } - bail!("Plugin not found in {:?}", &paths) + Err(zerror!("Plugin not found in {:?}", &paths).into()) } } } @@ -179,16 +179,22 @@ impl DeclaredPlugin &dyn PluginStatus { self } - fn load(&mut self) -> ZResult<&mut dyn LoadedPlugin> { + fn load(&mut self) -> ZResult>> { if self.starter.is_none() { - let (lib, path) = self.source.load().add_error(&mut self.report)?; + let Some((lib, path)) = self.source.load().add_error(&mut self.report)? else { + tracing::warn!( + "Plugin `{}` will not be loaded as plugin loading is disabled", + self.name + ); + return Ok(None); + }; let starter = DynamicPluginStarter::new(lib, path).add_error(&mut self.report)?; tracing::debug!("Plugin {} loaded from {}", self.name, starter.path()); self.starter = Some(starter); } else { tracing::warn!("Plugin `{}` already loaded", self.name); } - Ok(self) + Ok(Some(self)) } fn loaded(&self) -> Option<&dyn LoadedPlugin> { if self.starter.is_some() { diff --git a/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs b/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs index 9a67d8ce16..2354a8f926 100644 --- a/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs +++ b/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs @@ -84,8 +84,8 @@ where fn as_status(&self) -> &dyn PluginStatus { self } - fn load(&mut self) -> ZResult<&mut dyn LoadedPlugin> { - Ok(self) + fn load(&mut self) -> ZResult>> { + Ok(Some(self)) } fn loaded(&self) -> Option<&dyn LoadedPlugin> { Some(self) diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index d3e96b650f..6ee06f10fd 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -131,7 +131,16 @@ impl AdminSpace { ); loaded } else { - declared.load()? + match declared.load()? { + Some(loaded) => loaded, + None => { + tracing::warn!( + "Plugin `{}` will not be loaded as plugin loading is disabled", + config.name + ); + return Ok(()); + } + } }; if let Some(started) = loaded.started_mut() { From 7a9e50ba42b102f3f23337f4ae685313b0d11874 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 25 Jul 2024 18:15:45 +0200 Subject: [PATCH 554/598] Fix failover brokering bug reacting to linkstate changes --- zenoh/src/net/routing/hat/router/pubsub.rs | 146 ++++++++++----------- 1 file changed, 67 insertions(+), 79 deletions(-) diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 948c9eabb7..40d1836e58 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -839,87 +839,75 @@ pub(super) fn pubsub_linkstate_change( links: &[ZenohIdProto], send_declare: &mut SendDeclare, ) { - if let Some(src_face) = tables.get_face(zid).cloned() { + if let Some(mut src_face) = tables.get_face(zid).cloned() { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { - for res in face_hat!(src_face).remote_subs.values() { - let client_subs = res - .session_ctxs - .values() - .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.subs.is_some()); - if !remote_router_subs(tables, res) && !client_subs { - for ctx in get_mut_unchecked(&mut res.clone()) + let to_forget = face_hat!(src_face) + .local_subs + .keys() + .filter(|res| { + let client_subs = res .session_ctxs - .values_mut() - { - let dst_face = &mut get_mut_unchecked(ctx).face; - if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if let Some(id) = face_hat!(dst_face).local_subs.get(res).cloned() { - let forget = !HatTables::failover_brokering_to(links, dst_face.zid) - && { - let ctx_links = hat!(tables) - .peers_net - .as_ref() - .map(|net| net.get_links(dst_face.zid)) - .unwrap_or_else(|| &[]); - res.session_ctxs.values().any(|ctx2| { - ctx2.face.whatami == WhatAmI::Peer - && ctx2.subs.is_some() - && HatTables::failover_brokering_to( - ctx_links, - ctx2.face.zid, - ) - }) - }; - if forget { - send_declare( - &dst_face.primitives, - RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber( - UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }, - ), - }, - res.expr(), - ), - ); - - face_hat_mut!(dst_face).local_subs.remove(res); - } - } else if HatTables::failover_brokering_to(links, ctx.face.zid) { - let dst_face = &mut get_mut_unchecked(ctx).face; - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - }; - send_declare( - &dst_face.primitives, - RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber( - DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: sub_info, - }, - ), - }, - res.expr(), - ), - ); - } + .values() + .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.subs.is_some()); + !remote_router_subs(tables, res) + && !client_subs + && !res.session_ctxs.values().any(|ctx| { + ctx.face.whatami == WhatAmI::Peer + && src_face.zid != ctx.face.zid + && HatTables::failover_brokering_to(links, ctx.face.zid) + }) + }) + .cloned() + .collect::>>(); + for res in to_forget { + if let Some(id) = face_hat_mut!(&mut src_face).local_subs.remove(&res) { + let wire_expr = Resource::get_best_key(&res, "", src_face.id); + send_declare( + &src_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); + } + } + + for dst_face in tables.faces.values_mut() { + if HatTables::failover_brokering_to(links, dst_face.zid) { + for res in face_hat!(src_face).remote_subs.values() { + if !face_hat!(dst_face).local_subs.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + res.expr(), + ), + ); } } } From f7486160c01514d601186cb234f6edf49c76de63 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 26 Jul 2024 11:40:37 +0200 Subject: [PATCH 555/598] Update DEFAULT_CONFIG.json5 --- DEFAULT_CONFIG.json5 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 2dfb5b2fff..e66dea1d37 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -106,7 +106,7 @@ /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. /// Accepts a single value (e.g. autoconnect: ["router", "peer"]) /// or different values for router, peer and client (e.g. autoconnect: { router: [], peer: ["router", "peer"] }). - /// Each value is bit-or-like combinations of "peer", "router" and "client". + /// Each value is a list of: "peer", "router" and/or "client". autoconnect: { router: [], peer: ["router", "peer"] }, /// Whether or not to listen for scout messages on UDP multicast and reply to them. listen: true, From 3afd5fa786fc3485adb0255db20469def763c020 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 26 Jul 2024 15:25:17 +0200 Subject: [PATCH 556/598] Update DEFAULT_CONFIG.json5 --- DEFAULT_CONFIG.json5 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index e66dea1d37..de5baa4725 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -124,7 +124,7 @@ /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. /// Accepts a single value (e.g. autoconnect: ["router", "peer"]) /// or different values for router, peer and client (e.g. autoconnect: { router: [], peer: ["router", "peer"] }). - /// Each value is bit-or-like combinations of "peer", "router" and "client". + /// Each value is a list of: "peer", "router" and/or "client". autoconnect: { router: [], peer: ["router", "peer"] }, }, }, From 32bdded320fbf67a9f64aaae3b8e6212b544d5ce Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Fri, 26 Jul 2024 16:09:09 +0200 Subject: [PATCH 557/598] Don't duplicate token undeclarations in routers (#1275) * Don't duplicate token undeclarations in routers * Fix brokered liveliness tests * Address review comments * Remove `dbg!` --- zenoh/src/net/routing/hat/router/token.rs | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/zenoh/src/net/routing/hat/router/token.rs b/zenoh/src/net/routing/hat/router/token.rs index 06d3a4b14f..644932446c 100644 --- a/zenoh/src/net/routing/hat/router/token.rs +++ b/zenoh/src/net/routing/hat/router/token.rs @@ -386,6 +386,7 @@ fn send_forget_sourced_token_to_net_clildren( fn propagate_forget_simple_token( tables: &mut Tables, res: &Arc, + src_face: Option<&Arc>, send_declare: &mut SendDeclare, ) { for mut face in tables.faces.values().cloned() { @@ -406,9 +407,15 @@ fn propagate_forget_simple_token( res.expr(), ), ); - } else if face_hat!(face).remote_interests.values().any(|(r, o)| { - o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true) && !o.aggregate() - }) { + // NOTE(fuzzypixelz): We need to check that `face` is not the source Face of the token + // undeclaration, otherwise the undeclaration would be duplicated at the source Face. In + // cases where we don't have access to a Face as we didnt't receive an undeclaration and we + // default to true. + } else if src_face.map_or(true, |src_face| src_face.id != face.id) + && face_hat!(face).remote_interests.values().any(|(r, o)| { + o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true) && !o.aggregate() + }) + { // Token has never been declared on this face. // Send an Undeclare with a one shot generated id and a WireExpr ext. send_declare( @@ -578,6 +585,7 @@ fn propagate_forget_sourced_token( fn unregister_router_token( tables: &mut Tables, + face: Option<&Arc>, res: &mut Arc, router: &ZenohIdProto, send_declare: &mut SendDeclare, @@ -594,7 +602,7 @@ fn unregister_router_token( if hat_mut!(tables).full_net(WhatAmI::Peer) { undeclare_peer_token(tables, None, res, &tables.zid.clone()); } - propagate_forget_simple_token(tables, res, send_declare); + propagate_forget_simple_token(tables, res, face, send_declare); } propagate_forget_simple_token_to_peers(tables, res, send_declare); @@ -608,7 +616,7 @@ fn undeclare_router_token( send_declare: &mut SendDeclare, ) { if res_hat!(res).router_tokens.contains(router) { - unregister_router_token(tables, res, router, send_declare); + unregister_router_token(tables, face, res, router, send_declare); propagate_forget_sourced_token(tables, res, face, router, WhatAmI::Router); } } @@ -680,7 +688,7 @@ pub(super) fn undeclare_client_token( let router_tokens = remote_router_tokens(tables, res); let peer_tokens = remote_peer_tokens(tables, res); if client_tokens.is_empty() && !peer_tokens { - undeclare_router_token(tables, None, res, &tables.zid.clone(), send_declare); + undeclare_router_token(tables, Some(face), res, &tables.zid.clone(), send_declare); } else { propagate_forget_simple_token_to_peers(tables, res, send_declare); } @@ -774,7 +782,7 @@ pub(super) fn token_remove_node( .cloned() .collect::>>() { - unregister_router_token(tables, &mut res, node, send_declare); + unregister_router_token(tables, None, &mut res, node, send_declare); Resource::clean(&mut res) } } From e38fc16b9988c5c6add6d2e19667b6badc86993d Mon Sep 17 00:00:00 2001 From: oteffahi <70609372+oteffahi@users.noreply.github.com> Date: Fri, 26 Jul 2024 16:31:52 +0200 Subject: [PATCH 558/598] Optimize RoutingContext keyexpr for Query and Response messages (#1266) * Optimize RoutingContext keyexpr for Query and Response messages Based on existing Push message logic * Update Reply(egress) keyexpr logic in ACL interceptor * Fix parameter names * Change auth lowlatency test to TLS in order to dodge issue with Quic * Optimize ResponseFinal RoutingContext, set its RoutingContext key_expr to None in all cases --- zenoh/src/api/session.rs | 12 +- zenoh/src/net/primitives/mod.rs | 12 +- zenoh/src/net/primitives/mux.rs | 246 +++++++++--------- zenoh/src/net/routing/dispatcher/queries.rs | 111 +++----- .../net/routing/interceptor/access_control.rs | 7 +- zenoh/src/net/runtime/adminspace.rs | 12 +- zenoh/src/net/tests/tables.rs | 10 +- zenoh/tests/authentication.rs | 92 +++---- 8 files changed, 233 insertions(+), 269 deletions(-) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index a2745ecd96..4ca924e023 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -2799,18 +2799,18 @@ impl crate::net::primitives::EPrimitives for Session { } #[inline] - fn send_request(&self, ctx: crate::net::routing::RoutingContext) { - (self as &dyn Primitives).send_request(ctx.msg) + fn send_request(&self, msg: Request) { + (self as &dyn Primitives).send_request(msg) } #[inline] - fn send_response(&self, ctx: crate::net::routing::RoutingContext) { - (self as &dyn Primitives).send_response(ctx.msg) + fn send_response(&self, msg: Response) { + (self as &dyn Primitives).send_response(msg) } #[inline] - fn send_response_final(&self, ctx: crate::net::routing::RoutingContext) { - (self as &dyn Primitives).send_response_final(ctx.msg) + fn send_response_final(&self, msg: ResponseFinal) { + (self as &dyn Primitives).send_response_final(msg) } fn as_any(&self) -> &dyn std::any::Any { diff --git a/zenoh/src/net/primitives/mod.rs b/zenoh/src/net/primitives/mod.rs index 73768a4cca..837571f7f6 100644 --- a/zenoh/src/net/primitives/mod.rs +++ b/zenoh/src/net/primitives/mod.rs @@ -49,11 +49,11 @@ pub(crate) trait EPrimitives: Send + Sync { fn send_push(&self, msg: Push); - fn send_request(&self, ctx: RoutingContext); + fn send_request(&self, msg: Request); - fn send_response(&self, ctx: RoutingContext); + fn send_response(&self, msg: Response); - fn send_response_final(&self, ctx: RoutingContext); + fn send_response_final(&self, msg: ResponseFinal); } #[derive(Default)] @@ -82,11 +82,11 @@ impl EPrimitives for DummyPrimitives { fn send_push(&self, _msg: Push) {} - fn send_request(&self, _ctx: RoutingContext) {} + fn send_request(&self, _msg: Request) {} - fn send_response(&self, _ctx: RoutingContext) {} + fn send_response(&self, _msg: Response) {} - fn send_response_final(&self, _ctx: RoutingContext) {} + fn send_response_final(&self, _msg: ResponseFinal) {} fn as_any(&self) -> &dyn Any { self diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index 4627017a72..bc718ba324 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -271,78 +271,75 @@ impl EPrimitives for Mux { } } - fn send_request(&self, ctx: RoutingContext) { - let ctx = RoutingContext { - msg: NetworkMessage { - body: NetworkBody::Request(ctx.msg), - #[cfg(feature = "stats")] - size: None, - }, - inface: ctx.inface, - outface: ctx.outface, - prefix: ctx.prefix, - full_expr: ctx.full_expr, + fn send_request(&self, msg: Request) { + let msg = NetworkMessage { + body: NetworkBody::Request(msg), + #[cfg(feature = "stats")] + size: None, }; - let prefix = ctx - .wire_expr() - .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) - .flatten() - .cloned(); - let cache = prefix - .as_ref() - .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); - if let Some(ctx) = self.interceptor.intercept(ctx, cache) { - let _ = self.handler.schedule(ctx.msg); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get().and_then(|f| f.upgrade()) { + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(&face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + tracing::error!("Uninitialized multiplexer!"); } } - fn send_response(&self, ctx: RoutingContext) { - let ctx = RoutingContext { - msg: NetworkMessage { - body: NetworkBody::Response(ctx.msg), - #[cfg(feature = "stats")] - size: None, - }, - inface: ctx.inface, - outface: ctx.outface, - prefix: ctx.prefix, - full_expr: ctx.full_expr, + fn send_response(&self, msg: Response) { + let msg = NetworkMessage { + body: NetworkBody::Response(msg), + #[cfg(feature = "stats")] + size: None, }; - let prefix = ctx - .wire_expr() - .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) - .flatten() - .cloned(); - let cache = prefix - .as_ref() - .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); - if let Some(ctx) = self.interceptor.intercept(ctx, cache) { - let _ = self.handler.schedule(ctx.msg); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get().and_then(|f| f.upgrade()) { + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(&face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + tracing::error!("Uninitialized multiplexer!"); } } - fn send_response_final(&self, ctx: RoutingContext) { - let ctx = RoutingContext { - msg: NetworkMessage { - body: NetworkBody::ResponseFinal(ctx.msg), - #[cfg(feature = "stats")] - size: None, - }, - inface: ctx.inface, - outface: ctx.outface, - prefix: ctx.prefix, - full_expr: ctx.full_expr, + fn send_response_final(&self, msg: ResponseFinal) { + let msg = NetworkMessage { + body: NetworkBody::ResponseFinal(msg), + #[cfg(feature = "stats")] + size: None, }; - let prefix = ctx - .wire_expr() - .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) - .flatten() - .cloned(); - let cache = prefix - .as_ref() - .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); - if let Some(ctx) = self.interceptor.intercept(ctx, cache) { - let _ = self.handler.schedule(ctx.msg); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get().and_then(|f| f.upgrade()) { + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(&face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + tracing::error!("Uninitialized multiplexer!"); } } @@ -592,78 +589,75 @@ impl EPrimitives for McastMux { } } - fn send_request(&self, ctx: RoutingContext) { - let ctx = RoutingContext { - msg: NetworkMessage { - body: NetworkBody::Request(ctx.msg), - #[cfg(feature = "stats")] - size: None, - }, - inface: ctx.inface, - outface: ctx.outface, - prefix: ctx.prefix, - full_expr: ctx.full_expr, + fn send_request(&self, msg: Request) { + let msg = NetworkMessage { + body: NetworkBody::Request(msg), + #[cfg(feature = "stats")] + size: None, }; - let prefix = ctx - .wire_expr() - .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) - .flatten() - .cloned(); - let cache = prefix - .as_ref() - .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); - if let Some(ctx) = self.interceptor.intercept(ctx, cache) { - let _ = self.handler.schedule(ctx.msg); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + tracing::error!("Uninitialized multiplexer!"); } } - fn send_response(&self, ctx: RoutingContext) { - let ctx = RoutingContext { - msg: NetworkMessage { - body: NetworkBody::Response(ctx.msg), - #[cfg(feature = "stats")] - size: None, - }, - inface: ctx.inface, - outface: ctx.outface, - prefix: ctx.prefix, - full_expr: ctx.full_expr, + fn send_response(&self, msg: Response) { + let msg = NetworkMessage { + body: NetworkBody::Response(msg), + #[cfg(feature = "stats")] + size: None, }; - let prefix = ctx - .wire_expr() - .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) - .flatten() - .cloned(); - let cache = prefix - .as_ref() - .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); - if let Some(ctx) = self.interceptor.intercept(ctx, cache) { - let _ = self.handler.schedule(ctx.msg); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + tracing::error!("Uninitialized multiplexer!"); } } - fn send_response_final(&self, ctx: RoutingContext) { - let ctx = RoutingContext { - msg: NetworkMessage { - body: NetworkBody::ResponseFinal(ctx.msg), - #[cfg(feature = "stats")] - size: None, - }, - inface: ctx.inface, - outface: ctx.outface, - prefix: ctx.prefix, - full_expr: ctx.full_expr, + fn send_response_final(&self, msg: ResponseFinal) { + let msg = NetworkMessage { + body: NetworkBody::ResponseFinal(msg), + #[cfg(feature = "stats")] + size: None, }; - let prefix = ctx - .wire_expr() - .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) - .flatten() - .cloned(); - let cache = prefix - .as_ref() - .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); - if let Some(ctx) = self.interceptor.intercept(ctx, cache) { - let _ = self.handler.schedule(ctx.msg); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + tracing::error!("Uninitialized multiplexer!"); } } diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 445f138d8d..c117bd51df 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -43,10 +43,7 @@ use super::{ resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}, tables::{NodeId, RoutingExpr, Tables, TablesLock}, }; -use crate::net::routing::{ - hat::{HatTrait, SendDeclare}, - RoutingContext, -}; +use crate::net::routing::hat::{HatTrait, SendDeclare}; pub(crate) struct Query { src_face: Arc, @@ -600,16 +597,11 @@ pub fn route_query( face, qid ); - face.primitives - .clone() - .send_response_final(RoutingContext::with_expr( - ResponseFinal { - rid: qid, - ext_qos: response::ext::QoSType::RESPONSE_FINAL, - ext_tstamp: None, - }, - expr.full_expr().to_string(), - )); + face.primitives.clone().send_response_final(ResponseFinal { + rid: qid, + ext_qos: response::ext::QoSType::RESPONSE_FINAL, + ext_tstamp: None, + }); } else { for ((outface, key_expr, context), qid) in route.values() { QueryCleanup::spawn_query_clean_up_task(outface, tables_ref, *qid, timeout); @@ -621,35 +613,27 @@ pub fn route_query( } tracing::trace!("Propagate query {}:{} to {}", face, qid, outface); - outface.primitives.send_request(RoutingContext::with_expr( - Request { - id: *qid, - wire_expr: key_expr.into(), - ext_qos, - ext_tstamp, - ext_nodeid: ext::NodeIdType { node_id: *context }, - ext_target, - ext_budget, - ext_timeout, - payload: body.clone(), - }, - expr.full_expr().to_string(), - )); + outface.primitives.send_request(Request { + id: *qid, + wire_expr: key_expr.into(), + ext_qos, + ext_tstamp, + ext_nodeid: ext::NodeIdType { node_id: *context }, + ext_target, + ext_budget, + ext_timeout, + payload: body.clone(), + }); } } } else { tracing::debug!("Send final reply {}:{} (not master)", face, qid); drop(rtables); - face.primitives - .clone() - .send_response_final(RoutingContext::with_expr( - ResponseFinal { - rid: qid, - ext_qos: response::ext::QoSType::RESPONSE_FINAL, - ext_tstamp: None, - }, - expr.full_expr().to_string(), - )); + face.primitives.clone().send_response_final(ResponseFinal { + rid: qid, + ext_qos: response::ext::QoSType::RESPONSE_FINAL, + ext_tstamp: None, + }); } } None => { @@ -659,16 +643,11 @@ pub fn route_query( expr.scope, ); drop(rtables); - face.primitives - .clone() - .send_response_final(RoutingContext::with_expr( - ResponseFinal { - rid: qid, - ext_qos: response::ext::QoSType::RESPONSE_FINAL, - ext_tstamp: None, - }, - "".to_string(), - )); + face.primitives.clone().send_response_final(ResponseFinal { + rid: qid, + ext_qos: response::ext::QoSType::RESPONSE_FINAL, + ext_tstamp: None, + }); } } } @@ -705,21 +684,14 @@ pub(crate) fn route_send_response( inc_res_stats!(query.src_face, tx, admin, body) } - query - .src_face - .primitives - .clone() - .send_response(RoutingContext::with_expr( - Response { - rid: query.src_qid, - wire_expr: key_expr.to_owned(), - payload: body, - ext_qos, - ext_tstamp, - ext_respid, - }, - "".to_string(), // @TODO provide the proper key expression of the response for interceptors - )); + query.src_face.primitives.send_response(Response { + rid: query.src_qid, + wire_expr: key_expr.to_owned(), + payload: body, + ext_qos, + ext_tstamp, + ext_respid, + }); } None => tracing::warn!( "Route reply {}:{} from {}: Query not found!", @@ -773,13 +745,10 @@ pub(crate) fn finalize_pending_query(query: (Arc, CancellationToken)) { .src_face .primitives .clone() - .send_response_final(RoutingContext::with_expr( - ResponseFinal { - rid: query.src_qid, - ext_qos: response::ext::QoSType::RESPONSE_FINAL, - ext_tstamp: None, - }, - "".to_string(), - )); + .send_response_final(ResponseFinal { + rid: query.src_qid, + ext_qos: response::ext::QoSType::RESPONSE_FINAL, + ext_tstamp: None, + }); } } diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 6af064a878..839f18bd07 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -360,11 +360,8 @@ impl InterceptorTrait for EgressAclEnforcer { return None; } } - NetworkBody::Response(Response { wire_expr, .. }) => { - // @TODO: Remove wire_expr usage when issue #1255 is implemented - if self.action(AclMessage::Reply, "Reply (egress)", wire_expr.as_str()) - == Permission::Deny - { + NetworkBody::Response(Response { .. }) => { + if self.action(AclMessage::Reply, "Reply (egress)", key_expr?) == Permission::Deny { return None; } } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 6ee06f10fd..ce87d68ef0 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -521,18 +521,18 @@ impl crate::net::primitives::EPrimitives for AdminSpace { } #[inline] - fn send_request(&self, ctx: crate::net::routing::RoutingContext) { - (self as &dyn Primitives).send_request(ctx.msg) + fn send_request(&self, msg: Request) { + (self as &dyn Primitives).send_request(msg) } #[inline] - fn send_response(&self, ctx: crate::net::routing::RoutingContext) { - (self as &dyn Primitives).send_response(ctx.msg) + fn send_response(&self, msg: Response) { + (self as &dyn Primitives).send_response(msg) } #[inline] - fn send_response_final(&self, ctx: crate::net::routing::RoutingContext) { - (self as &dyn Primitives).send_response_final(ctx.msg) + fn send_response_final(&self, msg: ResponseFinal) { + (self as &dyn Primitives).send_response_final(msg) } fn as_any(&self) -> &dyn std::any::Any { diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 8ef9294edc..5fd8a49261 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -567,11 +567,15 @@ impl EPrimitives for ClientPrimitives { *zlock!(self.data) = Some(msg.wire_expr.to_owned()); } - fn send_request(&self, _ctx: RoutingContext) {} + fn send_request(&self, msg: zenoh_protocol::network::Request) { + *zlock!(self.data) = Some(msg.wire_expr.to_owned()); + } - fn send_response(&self, _ctx: RoutingContext) {} + fn send_response(&self, msg: zenoh_protocol::network::Response) { + *zlock!(self.data) = Some(msg.wire_expr.to_owned()); + } - fn send_response_final(&self, _ctx: RoutingContext) {} + fn send_response_final(&self, _msg: zenoh_protocol::network::ResponseFinal) {} fn as_any(&self) -> &dyn std::any::Any { self diff --git a/zenoh/tests/authentication.rs b/zenoh/tests/authentication.rs index 8d1e404617..fcd448f7d1 100644 --- a/zenoh/tests/authentication.rs +++ b/zenoh/tests/authentication.rs @@ -54,7 +54,7 @@ mod test { create_new_files(TESTFILES_PATH.to_path_buf()) .await .unwrap(); - test_pub_sub_deny_then_allow_tls(37448).await; + test_pub_sub_deny_then_allow_tls(37448, false).await; test_pub_sub_allow_then_deny_tls(37449).await; test_get_qbl_allow_then_deny_tls(37450).await; test_get_qbl_deny_then_allow_tls(37451).await; @@ -66,7 +66,7 @@ mod test { create_new_files(TESTFILES_PATH.to_path_buf()) .await .unwrap(); - test_pub_sub_deny_then_allow_quic(37452, false).await; + test_pub_sub_deny_then_allow_quic(37452).await; test_pub_sub_allow_then_deny_quic(37453).await; test_get_qbl_deny_then_allow_quic(37454).await; test_get_qbl_allow_then_deny_quic(37455).await; @@ -79,7 +79,7 @@ mod test { create_new_files(TESTFILES_PATH.to_path_buf()) .await .unwrap(); - test_pub_sub_deny_then_allow_quic(37456, true).await; + test_pub_sub_deny_then_allow_tls(37456, true).await; } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] @@ -269,7 +269,7 @@ client2name:client2passwd"; Ok(()) } - async fn get_basic_router_config_tls(port: u16) -> Config { + async fn get_basic_router_config_tls(port: u16, lowlatency: bool) -> Config { let cert_path = TESTFILES_PATH.to_string_lossy(); let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); @@ -313,9 +313,16 @@ client2name:client2passwd"; .tls .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) .unwrap(); + config.transport.unicast.set_lowlatency(lowlatency).unwrap(); + config + .transport + .unicast + .qos + .set_enabled(!lowlatency) + .unwrap(); config } - async fn get_basic_router_config_quic(port: u16, lowlatency: bool) -> Config { + async fn get_basic_router_config_quic(port: u16) -> Config { let cert_path = TESTFILES_PATH.to_string_lossy(); let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); @@ -359,13 +366,6 @@ client2name:client2passwd"; .tls .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) .unwrap(); - config.transport.unicast.set_lowlatency(lowlatency).unwrap(); - config - .transport - .unicast - .qos - .set_enabled(!lowlatency) - .unwrap(); config } @@ -472,7 +472,7 @@ client2name:client2passwd"; config } - async fn get_client_sessions_tls(port: u16) -> (Session, Session) { + async fn get_client_sessions_tls(port: u16, lowlatency: bool) -> (Session, Session) { let cert_path = TESTFILES_PATH.to_string_lossy(); println!("Opening client sessions"); let mut config = config::client([format!("tls/127.0.0.1:{}", port) @@ -512,6 +512,13 @@ client2name:client2passwd"; .tls .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) .unwrap(); + config.transport.unicast.set_lowlatency(lowlatency).unwrap(); + config + .transport + .unicast + .qos + .set_enabled(!lowlatency) + .unwrap(); let s01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::client([format!("tls/127.0.0.1:{}", port) @@ -551,11 +558,18 @@ client2name:client2passwd"; .tls .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) .unwrap(); + config.transport.unicast.set_lowlatency(lowlatency).unwrap(); + config + .transport + .unicast + .qos + .set_enabled(!lowlatency) + .unwrap(); let s02 = ztimeout!(zenoh::open(config)).unwrap(); (s01, s02) } - async fn get_client_sessions_quic(port: u16, lowlatency: bool) -> (Session, Session) { + async fn get_client_sessions_quic(port: u16) -> (Session, Session) { let cert_path = TESTFILES_PATH.to_string_lossy(); println!("Opening client sessions"); let mut config = config::client([format!("quic/127.0.0.1:{}", port) @@ -595,13 +609,6 @@ client2name:client2passwd"; .tls .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) .unwrap(); - config.transport.unicast.set_lowlatency(lowlatency).unwrap(); - config - .transport - .unicast - .qos - .set_enabled(!lowlatency) - .unwrap(); let s01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::client([format!("quic/127.0.0.1:{}", port) .parse::() @@ -640,13 +647,6 @@ client2name:client2passwd"; .tls .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) .unwrap(); - config.transport.unicast.set_lowlatency(lowlatency).unwrap(); - config - .transport - .unicast - .qos - .set_enabled(!lowlatency) - .unwrap(); let s02 = ztimeout!(zenoh::open(config)).unwrap(); (s01, s02) } @@ -789,10 +789,10 @@ client2name:client2passwd"; ztimeout!(s02.close()).unwrap(); } - async fn test_pub_sub_deny_then_allow_tls(port: u16) { + async fn test_pub_sub_deny_then_allow_tls(port: u16, lowlatency: bool) { println!("test_pub_sub_deny_then_allow_tls"); - let mut config_router = get_basic_router_config_tls(port).await; + let mut config_router = get_basic_router_config_tls(port, lowlatency).await; config_router .insert_json5( @@ -835,7 +835,7 @@ client2name:client2passwd"; let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (sub_session, pub_session) = get_client_sessions_tls(port).await; + let (sub_session, pub_session) = get_client_sessions_tls(port, lowlatency).await; { let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); let received_value = Arc::new(Mutex::new(String::new())); @@ -861,7 +861,7 @@ client2name:client2passwd"; async fn test_pub_sub_allow_then_deny_tls(port: u16) { println!("test_pub_sub_allow_then_deny_tls"); - let mut config_router = get_basic_router_config_tls(port).await; + let mut config_router = get_basic_router_config_tls(port, false).await; config_router .insert_json5( "access_control", @@ -902,7 +902,7 @@ client2name:client2passwd"; println!("Opening router session"); let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (sub_session, pub_session) = get_client_sessions_tls(port).await; + let (sub_session, pub_session) = get_client_sessions_tls(port, false).await; { let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); @@ -931,7 +931,7 @@ client2name:client2passwd"; async fn test_get_qbl_deny_then_allow_tls(port: u16) { println!("test_get_qbl_deny_then_allow_tls"); - let mut config_router = get_basic_router_config_tls(port).await; + let mut config_router = get_basic_router_config_tls(port, false).await; config_router .insert_json5( "access_control", @@ -975,7 +975,7 @@ client2name:client2passwd"; let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (get_session, qbl_session) = get_client_sessions_tls(port).await; + let (get_session, qbl_session) = get_client_sessions_tls(port, false).await; { let mut received_value = String::new(); @@ -1017,7 +1017,7 @@ client2name:client2passwd"; async fn test_get_qbl_allow_then_deny_tls(port: u16) { println!("test_get_qbl_allow_then_deny_tls"); - let mut config_router = get_basic_router_config_tls(port).await; + let mut config_router = get_basic_router_config_tls(port, false).await; config_router .insert_json5( "access_control", @@ -1060,7 +1060,7 @@ client2name:client2passwd"; let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (get_session, qbl_session) = get_client_sessions_tls(port).await; + let (get_session, qbl_session) = get_client_sessions_tls(port, false).await; { let mut received_value = String::new(); @@ -1099,10 +1099,10 @@ client2name:client2passwd"; close_router_session(session).await; } - async fn test_pub_sub_deny_then_allow_quic(port: u16, lowlatency: bool) { + async fn test_pub_sub_deny_then_allow_quic(port: u16) { println!("test_pub_sub_deny_then_allow_quic"); - let mut config_router = get_basic_router_config_quic(port, lowlatency).await; + let mut config_router = get_basic_router_config_quic(port).await; config_router .insert_json5( @@ -1145,7 +1145,7 @@ client2name:client2passwd"; let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (sub_session, pub_session) = get_client_sessions_quic(port, lowlatency).await; + let (sub_session, pub_session) = get_client_sessions_quic(port).await; { let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); let received_value = Arc::new(Mutex::new(String::new())); @@ -1173,7 +1173,7 @@ client2name:client2passwd"; async fn test_pub_sub_allow_then_deny_quic(port: u16) { println!("test_pub_sub_allow_then_deny_quic"); - let mut config_router = get_basic_router_config_quic(port, false).await; + let mut config_router = get_basic_router_config_quic(port).await; config_router .insert_json5( "access_control", @@ -1214,7 +1214,7 @@ client2name:client2passwd"; println!("Opening router session"); let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (sub_session, pub_session) = get_client_sessions_quic(port, false).await; + let (sub_session, pub_session) = get_client_sessions_quic(port).await; { let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); @@ -1244,7 +1244,7 @@ client2name:client2passwd"; async fn test_get_qbl_deny_then_allow_quic(port: u16) { println!("test_get_qbl_deny_then_allow_quic"); - let mut config_router = get_basic_router_config_quic(port, false).await; + let mut config_router = get_basic_router_config_quic(port).await; config_router .insert_json5( "access_control", @@ -1288,7 +1288,7 @@ client2name:client2passwd"; let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (get_session, qbl_session) = get_client_sessions_quic(port, false).await; + let (get_session, qbl_session) = get_client_sessions_quic(port).await; { let mut received_value = String::new(); @@ -1331,7 +1331,7 @@ client2name:client2passwd"; async fn test_get_qbl_allow_then_deny_quic(port: u16) { println!("test_get_qbl_allow_then_deny_quic"); - let mut config_router = get_basic_router_config_quic(port, false).await; + let mut config_router = get_basic_router_config_quic(port).await; config_router .insert_json5( "access_control", @@ -1374,7 +1374,7 @@ client2name:client2passwd"; let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (get_session, qbl_session) = get_client_sessions_quic(port, false).await; + let (get_session, qbl_session) = get_client_sessions_quic(port).await; { let mut received_value = String::new(); From 86f0848b6fb5d950d50429644e95fe4b71b21a19 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 29 Jul 2024 14:18:56 +0200 Subject: [PATCH 559/598] Renaming (#1276) --- zenoh/src/net/routing/hat/client/mod.rs | 12 +- zenoh/src/net/routing/hat/client/pubsub.rs | 26 +-- zenoh/src/net/routing/hat/client/queries.rs | 26 +-- zenoh/src/net/routing/hat/client/token.rs | 28 ++-- .../src/net/routing/hat/linkstate_peer/mod.rs | 76 ++++----- .../net/routing/hat/linkstate_peer/pubsub.rs | 119 +++++++------- .../net/routing/hat/linkstate_peer/queries.rs | 129 ++++++++------- .../net/routing/hat/linkstate_peer/token.rs | 112 +++++++------ zenoh/src/net/routing/hat/p2p_peer/mod.rs | 12 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 16 +- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 32 ++-- zenoh/src/net/routing/hat/p2p_peer/token.rs | 34 ++-- zenoh/src/net/routing/hat/router/mod.rs | 102 ++++++------ zenoh/src/net/routing/hat/router/pubsub.rs | 139 +++++++++------- zenoh/src/net/routing/hat/router/queries.rs | 153 +++++++++++------- zenoh/src/net/routing/hat/router/token.rs | 128 ++++++++------- 16 files changed, 622 insertions(+), 522 deletions(-) diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 230449bb9f..a1a1eb08d1 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -23,7 +23,7 @@ use std::{ sync::{atomic::AtomicU32, Arc}, }; -use token::{token_new_face, undeclare_client_token}; +use token::{token_new_face, undeclare_simple_token}; use zenoh_config::WhatAmI; use zenoh_protocol::network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId, TokenId}, @@ -36,8 +36,8 @@ use zenoh_transport::unicast::TransportUnicast; use self::{ interests::interests_new_face, - pubsub::{pubsub_new_face, undeclare_client_subscription}, - queries::{queries_new_face, undeclare_client_queryable}, + pubsub::{pubsub_new_face, undeclare_simple_subscription}, + queries::{queries_new_face, undeclare_simple_queryable}, }; use super::{ super::dispatcher::{ @@ -163,7 +163,7 @@ impl HatBaseTrait for HatCode { let mut subs_matches = vec![]; for (_id, mut res) in hat_face.remote_subs.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -185,7 +185,7 @@ impl HatBaseTrait for HatCode { let mut qabls_matches = vec![]; for (_id, mut res) in hat_face.remote_qabls.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -206,7 +206,7 @@ impl HatBaseTrait for HatCode { for (_id, mut res) in hat_face.remote_tokens.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_token(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_token(&mut wtables, &mut face_clone, &mut res, send_declare); } drop(wtables); diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 886333f92c..9d7760247e 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -101,7 +101,7 @@ fn propagate_simple_subscription( } } -fn register_client_subscription( +fn register_simple_subscription( _tables: &mut Tables, face: &mut Arc, id: SubscriberId, @@ -129,7 +129,7 @@ fn register_client_subscription( face_hat_mut!(face).remote_subs.insert(id, res.clone()); } -fn declare_client_subscription( +fn declare_simple_subscription( tables: &mut Tables, face: &mut Arc, id: SubscriberId, @@ -137,7 +137,7 @@ fn declare_client_subscription( sub_info: &SubscriberInfo, send_declare: &mut SendDeclare, ) { - register_client_subscription(tables, face, id, res, sub_info); + register_simple_subscription(tables, face, id, res, sub_info); propagate_simple_subscription(tables, res, sub_info, face, send_declare); // This introduced a buffer overflow on windows @@ -164,7 +164,7 @@ fn declare_client_subscription( } #[inline] -fn client_subs(res: &Arc) -> Vec> { +fn simple_subs(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -204,7 +204,7 @@ fn propagate_forget_simple_subscription( } } -pub(super) fn undeclare_client_subscription( +pub(super) fn undeclare_simple_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -215,12 +215,12 @@ pub(super) fn undeclare_client_subscription( get_mut_unchecked(ctx).subs = None; } - let mut client_subs = client_subs(res); - if client_subs.is_empty() { + let mut simple_subs = simple_subs(res); + if simple_subs.is_empty() { propagate_forget_simple_subscription(tables, res, send_declare); } - if client_subs.len() == 1 { - let face = &mut client_subs[0]; + if simple_subs.len() == 1 { + let face = &mut simple_subs[0]; if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { send_declare( &face.primitives, @@ -243,14 +243,14 @@ pub(super) fn undeclare_client_subscription( } } -fn forget_client_subscription( +fn forget_simple_subscription( tables: &mut Tables, face: &mut Arc, id: SubscriberId, send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { - undeclare_client_subscription(tables, face, &mut res, send_declare); + undeclare_simple_subscription(tables, face, &mut res, send_declare); Some(res) } else { None @@ -297,7 +297,7 @@ impl HatPubSubTrait for HatCode { _node_id: NodeId, send_declare: &mut SendDeclare, ) { - declare_client_subscription(tables, face, id, res, sub_info, send_declare); + declare_simple_subscription(tables, face, id, res, sub_info, send_declare); } fn undeclare_subscription( @@ -309,7 +309,7 @@ impl HatPubSubTrait for HatCode { _node_id: NodeId, send_declare: &mut SendDeclare, ) -> Option> { - forget_client_subscription(tables, face, id, send_declare) + forget_simple_subscription(tables, face, id, send_declare) } fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)> { diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 12e594500f..0c394da851 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -127,7 +127,7 @@ fn propagate_simple_queryable( } } -fn register_client_queryable( +fn register_simple_queryable( _tables: &mut Tables, face: &mut Arc, id: QueryableId, @@ -147,7 +147,7 @@ fn register_client_queryable( face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } -fn declare_client_queryable( +fn declare_simple_queryable( tables: &mut Tables, face: &mut Arc, id: QueryableId, @@ -155,12 +155,12 @@ fn declare_client_queryable( qabl_info: &QueryableInfoType, send_declare: &mut SendDeclare, ) { - register_client_queryable(tables, face, id, res, qabl_info); + register_simple_queryable(tables, face, id, res, qabl_info); propagate_simple_queryable(tables, res, Some(face), send_declare); } #[inline] -fn client_qabls(res: &Arc) -> Vec> { +fn simple_qabls(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -200,7 +200,7 @@ fn propagate_forget_simple_queryable( } } -pub(super) fn undeclare_client_queryable( +pub(super) fn undeclare_simple_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -215,14 +215,14 @@ pub(super) fn undeclare_client_queryable( get_mut_unchecked(ctx).qabl = None; } - let mut client_qabls = client_qabls(res); - if client_qabls.is_empty() { + let mut simple_qabls = simple_qabls(res); + if simple_qabls.is_empty() { propagate_forget_simple_queryable(tables, res, send_declare); } else { propagate_simple_queryable(tables, res, None, send_declare); } - if client_qabls.len() == 1 { - let face = &mut client_qabls[0]; + if simple_qabls.len() == 1 { + let face = &mut simple_qabls[0]; if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { send_declare( &face.primitives, @@ -245,14 +245,14 @@ pub(super) fn undeclare_client_queryable( } } -fn forget_client_queryable( +fn forget_simple_queryable( tables: &mut Tables, face: &mut Arc, id: QueryableId, send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { - undeclare_client_queryable(tables, face, &mut res, send_declare); + undeclare_simple_queryable(tables, face, &mut res, send_declare); Some(res) } else { None @@ -291,7 +291,7 @@ impl HatQueriesTrait for HatCode { _node_id: NodeId, send_declare: &mut SendDeclare, ) { - declare_client_queryable(tables, face, id, res, qabl_info, send_declare); + declare_simple_queryable(tables, face, id, res, qabl_info, send_declare); } fn undeclare_queryable( @@ -303,7 +303,7 @@ impl HatQueriesTrait for HatCode { _node_id: NodeId, send_declare: &mut SendDeclare, ) -> Option> { - forget_client_queryable(tables, face, id, send_declare) + forget_simple_queryable(tables, face, id, send_declare) } fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)> { diff --git a/zenoh/src/net/routing/hat/client/token.rs b/zenoh/src/net/routing/hat/client/token.rs index 58af466f00..11fab10466 100644 --- a/zenoh/src/net/routing/hat/client/token.rs +++ b/zenoh/src/net/routing/hat/client/token.rs @@ -81,7 +81,7 @@ fn propagate_simple_token( } } -fn register_client_token( +fn register_simple_token( _tables: &mut Tables, face: &mut Arc, id: TokenId, @@ -108,7 +108,7 @@ fn register_client_token( face_hat_mut!(face).remote_tokens.insert(id, res.clone()); } -fn declare_client_token( +fn declare_simple_token( tables: &mut Tables, face: &mut Arc, id: TokenId, @@ -116,7 +116,7 @@ fn declare_client_token( interest_id: Option, send_declare: &mut SendDeclare, ) { - register_client_token(tables, face, id, res); + register_simple_token(tables, face, id, res); propagate_simple_token(tables, res, face, send_declare); @@ -141,7 +141,7 @@ fn declare_client_token( } #[inline] -fn client_tokens(res: &Arc) -> Vec> { +fn simple_tokens(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -206,7 +206,7 @@ fn propagate_forget_simple_token( } } -pub(super) fn undeclare_client_token( +pub(super) fn undeclare_simple_token( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -221,12 +221,12 @@ pub(super) fn undeclare_client_token( get_mut_unchecked(ctx).token = false; } - let mut client_tokens = client_tokens(res); - if client_tokens.is_empty() { + let mut simple_tokens = simple_tokens(res); + if simple_tokens.is_empty() { propagate_forget_simple_token(tables, res, send_declare); } - if client_tokens.len() == 1 { - let face = &mut client_tokens[0]; + if simple_tokens.len() == 1 { + let face = &mut simple_tokens[0]; if face.whatami != WhatAmI::Client { if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { send_declare( @@ -251,7 +251,7 @@ pub(super) fn undeclare_client_token( } } -fn forget_client_token( +fn forget_simple_token( tables: &mut Tables, face: &mut Arc, id: TokenId, @@ -259,10 +259,10 @@ fn forget_client_token( send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { - undeclare_client_token(tables, face, &mut res, send_declare); + undeclare_simple_token(tables, face, &mut res, send_declare); Some(res) } else if let Some(mut res) = res { - undeclare_client_token(tables, face, &mut res, send_declare); + undeclare_simple_token(tables, face, &mut res, send_declare); Some(res) } else { None @@ -410,7 +410,7 @@ impl HatTokenTrait for HatCode { interest_id: Option, send_declare: &mut SendDeclare, ) { - declare_client_token(tables, face, id, res, interest_id, send_declare); + declare_simple_token(tables, face, id, res, interest_id, send_declare); } fn undeclare_token( @@ -422,6 +422,6 @@ impl HatTokenTrait for HatCode { _node_id: NodeId, send_declare: &mut SendDeclare, ) -> Option> { - forget_client_token(tables, face, id, res, send_declare) + forget_simple_token(tables, face, id, res, send_declare) } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 1dd4e65318..ded87f18ee 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -24,7 +24,7 @@ use std::{ time::Duration, }; -use token::{token_remove_node, undeclare_client_token}; +use token::{token_remove_node, undeclare_simple_token}; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ common::ZExtBody, @@ -43,8 +43,8 @@ use zenoh_transport::unicast::TransportUnicast; use self::{ network::Network, - pubsub::{pubsub_remove_node, undeclare_client_subscription}, - queries::{queries_remove_node, undeclare_client_queryable}, + pubsub::{pubsub_remove_node, undeclare_simple_subscription}, + queries::{queries_remove_node, undeclare_simple_queryable}, }; use super::{ super::dispatcher::{ @@ -117,17 +117,17 @@ macro_rules! face_hat_mut { use face_hat_mut; struct HatTables { - peer_subs: HashSet>, - peer_tokens: HashSet>, - peer_qabls: HashSet>, - peers_net: Option, - peers_trees_task: Option, + linkstatepeer_subs: HashSet>, + linkstatepeer_tokens: HashSet>, + linkstatepeer_qabls: HashSet>, + linkstatepeers_net: Option, + linkstatepeers_trees_task: Option, } impl Drop for HatTables { fn drop(&mut self) { - if self.peers_trees_task.is_some() { - let task = self.peers_trees_task.take().unwrap(); + if self.linkstatepeers_trees_task.is_some() { + let task = self.linkstatepeers_trees_task.take().unwrap(); task.terminate(Duration::from_secs(10)); } } @@ -136,16 +136,16 @@ impl Drop for HatTables { impl HatTables { fn new() -> Self { Self { - peer_subs: HashSet::new(), - peer_tokens: HashSet::new(), - peer_qabls: HashSet::new(), - peers_net: None, - peers_trees_task: None, + linkstatepeer_subs: HashSet::new(), + linkstatepeer_tokens: HashSet::new(), + linkstatepeer_qabls: HashSet::new(), + linkstatepeers_net: None, + linkstatepeers_trees_task: None, } } fn schedule_compute_trees(&mut self, tables_ref: Arc) { - if self.peers_trees_task.is_none() { + if self.linkstatepeers_trees_task.is_none() { let task = TerminatableTask::spawn( zenoh_runtime::ZRuntime::Net, async move { @@ -156,7 +156,11 @@ impl HatTables { let mut tables = zwrite!(tables_ref.tables); tracing::trace!("Compute trees"); - let new_children = hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(); + let new_children = hat_mut!(tables) + .linkstatepeers_net + .as_mut() + .unwrap() + .compute_trees(); tracing::trace!("Compute routes"); pubsub::pubsub_tree_change(&mut tables, &new_children); @@ -164,11 +168,11 @@ impl HatTables { token::token_tree_change(&mut tables, &new_children); tracing::trace!("Computations completed"); - hat_mut!(tables).peers_trees_task = None; + hat_mut!(tables).linkstatepeers_trees_task = None; }, TerminatableTask::create_cancellation_token(), ); - self.peers_trees_task = Some(task); + self.linkstatepeers_trees_task = Some(task); } } } @@ -193,7 +197,7 @@ impl HatBaseTrait for HatCode { unwrap_or_default!(config.routing().router().peers_failover_brokering()); drop(config); - hat_mut!(tables).peers_net = Some(Network::new( + hat_mut!(tables).linkstatepeers_net = Some(Network::new( "[Peers network]".to_string(), tables.zid, runtime, @@ -237,7 +241,7 @@ impl HatBaseTrait for HatCode { _send_declare: &mut SendDeclare, ) -> ZResult<()> { let link_id = if face.state.whatami != WhatAmI::Client { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + if let Some(net) = hat_mut!(tables).linkstatepeers_net.as_mut() { net.add_link(transport.clone()) } else { 0 @@ -290,7 +294,7 @@ impl HatBaseTrait for HatCode { let mut subs_matches = vec![]; for (_id, mut res) in hat_face.remote_subs.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -312,7 +316,7 @@ impl HatBaseTrait for HatCode { let mut qabls_matches = vec![]; for (_, mut res) in hat_face.remote_qabls.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -333,7 +337,7 @@ impl HatBaseTrait for HatCode { for (_id, mut res) in hat_face.remote_tokens.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_token(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_token(&mut wtables, &mut face_clone, &mut res, send_declare); } drop(wtables); @@ -385,7 +389,7 @@ impl HatBaseTrait for HatCode { let whatami = transport.get_whatami()?; if whatami != WhatAmI::Client { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + if let Some(net) = hat_mut!(tables).linkstatepeers_net.as_mut() { let changes = net.link_states(list.link_states, zid); for (_, removed_node) in changes.removed_nodes { @@ -412,7 +416,7 @@ impl HatBaseTrait for HatCode { routing_context: NodeId, ) -> NodeId { hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .unwrap() .get_local_context(routing_context, face_hat!(face).link_id) @@ -429,7 +433,7 @@ impl HatBaseTrait for HatCode { (Ok(zid), Ok(whatami)) => { if whatami != WhatAmI::Client { for (_, removed_node) in hat_mut!(tables) - .peers_net + .linkstatepeers_net .as_mut() .unwrap() .remove_link(&zid) @@ -470,7 +474,7 @@ impl HatBaseTrait for HatCode { fn info(&self, tables: &Tables, kind: WhatAmI) -> String { match kind { WhatAmI::Peer => hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .map(|net| net.dot()) .unwrap_or_else(|| "graph {}".to_string()), @@ -480,17 +484,17 @@ impl HatBaseTrait for HatCode { } struct HatContext { - peer_subs: HashSet, - peer_qabls: HashMap, - peer_tokens: HashSet, + linkstatepeer_subs: HashSet, + linkstatepeer_qabls: HashMap, + linkstatepeer_tokens: HashSet, } impl HatContext { fn new() -> Self { Self { - peer_subs: HashSet::new(), - peer_qabls: HashMap::new(), - peer_tokens: HashSet::new(), + linkstatepeer_subs: HashSet::new(), + linkstatepeer_qabls: HashMap::new(), + linkstatepeer_tokens: HashSet::new(), } } } @@ -525,7 +529,7 @@ impl HatFace { fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { match hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .unwrap() .get_link(face_hat!(face).link_id) @@ -555,7 +559,7 @@ impl HatTrait for HatCode {} #[inline] fn get_routes_entries(tables: &Tables) -> RoutesIndexes { let indexes = hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .unwrap() .graph diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 849921d24b..8b9d97872b 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -200,7 +200,7 @@ fn propagate_sourced_subscription( src_face: Option<&Arc>, source: &ZenohIdProto, ) { - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -230,7 +230,7 @@ fn propagate_sourced_subscription( } } -fn register_peer_subscription( +fn register_linkstatepeer_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -238,11 +238,11 @@ fn register_peer_subscription( peer: ZenohIdProto, send_declare: &mut SendDeclare, ) { - if !res_hat!(res).peer_subs.contains(&peer) { + if !res_hat!(res).linkstatepeer_subs.contains(&peer) { // Register peer subscription { - res_hat_mut!(res).peer_subs.insert(peer); - hat_mut!(tables).peer_subs.insert(res.clone()); + res_hat_mut!(res).linkstatepeer_subs.insert(peer); + hat_mut!(tables).linkstatepeer_subs.insert(res.clone()); } // Propagate subscription to peers @@ -253,7 +253,7 @@ fn register_peer_subscription( propagate_simple_subscription(tables, res, sub_info, face, send_declare); } -fn declare_peer_subscription( +fn declare_linkstatepeer_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -261,10 +261,10 @@ fn declare_peer_subscription( peer: ZenohIdProto, send_declare: &mut SendDeclare, ) { - register_peer_subscription(tables, face, res, sub_info, peer, send_declare); + register_linkstatepeer_subscription(tables, face, res, sub_info, peer, send_declare); } -fn register_client_subscription( +fn register_simple_subscription( _tables: &mut Tables, face: &mut Arc, id: SubscriberId, @@ -292,7 +292,7 @@ fn register_client_subscription( face_hat_mut!(face).remote_subs.insert(id, res.clone()); } -fn declare_client_subscription( +fn declare_simple_subscription( tables: &mut Tables, face: &mut Arc, id: SubscriberId, @@ -300,22 +300,22 @@ fn declare_client_subscription( sub_info: &SubscriberInfo, send_declare: &mut SendDeclare, ) { - register_client_subscription(tables, face, id, res, sub_info); + register_simple_subscription(tables, face, id, res, sub_info); let zid = tables.zid; - register_peer_subscription(tables, face, res, sub_info, zid, send_declare); + register_linkstatepeer_subscription(tables, face, res, sub_info, zid, send_declare); } #[inline] -fn remote_peer_subs(tables: &Tables, res: &Arc) -> bool { +fn remote_linkstatepeer_subs(tables: &Tables, res: &Arc) -> bool { res.context.is_some() && res_hat!(res) - .peer_subs + .linkstatepeer_subs .iter() .any(|peer| peer != &tables.zid) } #[inline] -fn client_subs(res: &Arc) -> Vec> { +fn simple_subs(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -329,7 +329,7 @@ fn client_subs(res: &Arc) -> Vec> { } #[inline] -fn remote_client_subs(res: &Arc, face: &Arc) -> bool { +fn remote_simple_subs(res: &Arc, face: &Arc) -> bool { res.session_ctxs .values() .any(|ctx| ctx.face.id != face.id && ctx.subs.is_some()) @@ -410,7 +410,7 @@ fn propagate_forget_simple_subscription( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_client_subs(&m, &face) || remote_peer_subs(tables, &m)) + && (remote_simple_subs(&m, &face) || remote_linkstatepeer_subs(tables, &m)) }) }) { if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { @@ -442,7 +442,7 @@ fn propagate_forget_sourced_subscription( src_face: Option<&Arc>, source: &ZenohIdProto, ) { - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -477,41 +477,43 @@ fn unregister_peer_subscription( peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); + res_hat_mut!(res) + .linkstatepeer_subs + .retain(|sub| sub != peer); - if res_hat!(res).peer_subs.is_empty() { + if res_hat!(res).linkstatepeer_subs.is_empty() { hat_mut!(tables) - .peer_subs + .linkstatepeer_subs .retain(|sub| !Arc::ptr_eq(sub, res)); propagate_forget_simple_subscription(tables, res, send_declare); } } -fn undeclare_peer_subscription( +fn undeclare_linkstatepeer_subscription( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - if res_hat!(res).peer_subs.contains(peer) { + if res_hat!(res).linkstatepeer_subs.contains(peer) { unregister_peer_subscription(tables, res, peer, send_declare); propagate_forget_sourced_subscription(tables, res, face, peer); } } -fn forget_peer_subscription( +fn forget_linkstatepeer_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - undeclare_peer_subscription(tables, Some(face), res, peer, send_declare); + undeclare_linkstatepeer_subscription(tables, Some(face), res, peer, send_declare); } -pub(super) fn undeclare_client_subscription( +pub(super) fn undeclare_simple_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -522,14 +524,20 @@ pub(super) fn undeclare_client_subscription( get_mut_unchecked(ctx).subs = None; } - let mut client_subs = client_subs(res); - let peer_subs = remote_peer_subs(tables, res); - if client_subs.is_empty() { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone(), send_declare); + let mut simple_subs = simple_subs(res); + let linkstatepeer_subs = remote_linkstatepeer_subs(tables, res); + if simple_subs.is_empty() { + undeclare_linkstatepeer_subscription( + tables, + None, + res, + &tables.zid.clone(), + send_declare, + ); } - if client_subs.len() == 1 && !peer_subs { - let mut face = &mut client_subs[0]; + if simple_subs.len() == 1 && !linkstatepeer_subs { + let mut face = &mut simple_subs[0]; if face.whatami != WhatAmI::Client { if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { send_declare( @@ -558,7 +566,8 @@ pub(super) fn undeclare_client_subscription( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_client_subs(&m, face) || remote_peer_subs(tables, &m)) + && (remote_simple_subs(&m, face) + || remote_linkstatepeer_subs(tables, &m)) }) }) { if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { @@ -588,14 +597,14 @@ pub(super) fn undeclare_client_subscription( } } -fn forget_client_subscription( +fn forget_simple_subscription( tables: &mut Tables, face: &mut Arc, id: SubscriberId, send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { - undeclare_client_subscription(tables, face, &mut res, send_declare); + undeclare_simple_subscription(tables, face, &mut res, send_declare); Some(res) } else { None @@ -608,9 +617,9 @@ pub(super) fn pubsub_remove_node( send_declare: &mut SendDeclare, ) { for mut res in hat!(tables) - .peer_subs + .linkstatepeer_subs .iter() - .filter(|res| res_hat!(res).peer_subs.contains(node)) + .filter(|res| res_hat!(res).linkstatepeer_subs.contains(node)) .cloned() .collect::>>() { @@ -622,7 +631,7 @@ pub(super) fn pubsub_remove_node( } pub(super) fn pubsub_tree_change(tables: &mut Tables, new_children: &[Vec]) { - let net = match hat!(tables).peers_net.as_ref() { + let net = match hat!(tables).linkstatepeers_net.as_ref() { Some(net) => net, None => { tracing::error!("Error accessing peers_net in pubsub_tree_change!"); @@ -636,10 +645,10 @@ pub(super) fn pubsub_tree_change(tables: &mut Tables, new_children: &[Vec Vec<(Arc, Sources)> { // Compute the list of known suscriptions (keys) hat!(tables) - .peer_subs + .linkstatepeer_subs .iter() .map(|s| { ( @@ -839,7 +848,7 @@ impl HatPubSubTrait for HatCode { // sources of those subscriptions Sources { routers: vec![], - peers: Vec::from_iter(res_hat!(s).peer_subs.iter().cloned()), + peers: Vec::from_iter(res_hat!(s).linkstatepeer_subs.iter().cloned()), clients: s .session_ctxs .values() @@ -926,7 +935,7 @@ impl HatPubSubTrait for HatCode { for mres in matches.iter() { let mres = mres.upgrade().unwrap(); - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Router | WhatAmI::Peer => source, _ => net.idx.index() as NodeId, @@ -937,7 +946,7 @@ impl HatPubSubTrait for HatCode { tables, net, peer_source, - &res_hat!(mres).peer_subs, + &res_hat!(mres).linkstatepeer_subs, ); for (sid, context) in &mres.session_ctxs { @@ -1017,13 +1026,13 @@ impl HatPubSubTrait for HatCode { for mres in matches.iter() { let mres = mres.upgrade().unwrap(); - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); insert_faces_for_subs( &mut matching_subscriptions, tables, net, net.idx.index(), - &res_hat!(mres).peer_subs, + &res_hat!(mres).linkstatepeer_subs, ); for (sid, context) in &mres.session_ctxs { diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 883db69975..de70cddf9b 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -83,7 +83,7 @@ fn local_qabl_info( ) -> QueryableInfoType { let info = if res.context.is_some() { res_hat!(res) - .peer_qabls + .linkstatepeer_qabls .iter() .fold(None, |accu, (zid, info)| { if *zid != tables.zid { @@ -220,7 +220,7 @@ fn propagate_sourced_queryable( src_face: Option<&mut Arc>, source: &ZenohIdProto, ) { - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -250,7 +250,7 @@ fn propagate_sourced_queryable( } } -fn register_peer_queryable( +fn register_linkstatepeer_queryable( tables: &mut Tables, mut face: Option<&mut Arc>, res: &mut Arc, @@ -258,12 +258,14 @@ fn register_peer_queryable( peer: ZenohIdProto, send_declare: &mut SendDeclare, ) { - let current_info = res_hat!(res).peer_qabls.get(&peer); + let current_info = res_hat!(res).linkstatepeer_qabls.get(&peer); if current_info.is_none() || current_info.unwrap() != qabl_info { // Register peer queryable { - res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); - hat_mut!(tables).peer_qabls.insert(res.clone()); + res_hat_mut!(res) + .linkstatepeer_qabls + .insert(peer, *qabl_info); + hat_mut!(tables).linkstatepeer_qabls.insert(res.clone()); } // Propagate queryable to peers @@ -274,7 +276,7 @@ fn register_peer_queryable( propagate_simple_queryable(tables, res, face, send_declare); } -fn declare_peer_queryable( +fn declare_linkstatepeer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -283,10 +285,10 @@ fn declare_peer_queryable( send_declare: &mut SendDeclare, ) { let face = Some(face); - register_peer_queryable(tables, face, res, qabl_info, peer, send_declare); + register_linkstatepeer_queryable(tables, face, res, qabl_info, peer, send_declare); } -fn register_client_queryable( +fn register_simple_queryable( _tables: &mut Tables, face: &mut Arc, id: QueryableId, @@ -306,7 +308,7 @@ fn register_client_queryable( face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } -fn declare_client_queryable( +fn declare_simple_queryable( tables: &mut Tables, face: &mut Arc, id: QueryableId, @@ -314,23 +316,23 @@ fn declare_client_queryable( qabl_info: &QueryableInfoType, send_declare: &mut SendDeclare, ) { - register_client_queryable(tables, face, id, res, qabl_info); + register_simple_queryable(tables, face, id, res, qabl_info); let local_details = local_peer_qabl_info(tables, res); let zid = tables.zid; - register_peer_queryable(tables, Some(face), res, &local_details, zid, send_declare); + register_linkstatepeer_queryable(tables, Some(face), res, &local_details, zid, send_declare); } #[inline] -fn remote_peer_qabls(tables: &Tables, res: &Arc) -> bool { +fn remote_linkstatepeer_qabls(tables: &Tables, res: &Arc) -> bool { res.context.is_some() && res_hat!(res) - .peer_qabls + .linkstatepeer_qabls .keys() .any(|peer| peer != &tables.zid) } #[inline] -fn client_qabls(res: &Arc) -> Vec> { +fn simple_qabls(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -344,7 +346,7 @@ fn client_qabls(res: &Arc) -> Vec> { } #[inline] -fn remote_client_qabls(res: &Arc, face: &Arc) -> bool { +fn remote_simple_qabls(res: &Arc, face: &Arc) -> bool { res.session_ctxs .values() .any(|ctx| ctx.face.id != face.id && ctx.qabl.is_some()) @@ -425,7 +427,8 @@ fn propagate_forget_simple_queryable( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_client_qabls(&m, &face) || remote_peer_qabls(tables, &m)) + && (remote_simple_qabls(&m, &face) + || remote_linkstatepeer_qabls(tables, &m)) }) }) { if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { @@ -457,7 +460,7 @@ fn propagate_forget_sourced_queryable( src_face: Option<&Arc>, source: &ZenohIdProto, ) { - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -486,47 +489,47 @@ fn propagate_forget_sourced_queryable( } } -fn unregister_peer_queryable( +fn unregister_linkstatepeer_queryable( tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - res_hat_mut!(res).peer_qabls.remove(peer); + res_hat_mut!(res).linkstatepeer_qabls.remove(peer); - if res_hat!(res).peer_qabls.is_empty() { + if res_hat!(res).linkstatepeer_qabls.is_empty() { hat_mut!(tables) - .peer_qabls + .linkstatepeer_qabls .retain(|qabl| !Arc::ptr_eq(qabl, res)); propagate_forget_simple_queryable(tables, res, send_declare); } } -fn undeclare_peer_queryable( +fn undeclare_linkstatepeer_queryable( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - if res_hat!(res).peer_qabls.contains_key(peer) { - unregister_peer_queryable(tables, res, peer, send_declare); + if res_hat!(res).linkstatepeer_qabls.contains_key(peer) { + unregister_linkstatepeer_queryable(tables, res, peer, send_declare); propagate_forget_sourced_queryable(tables, res, face, peer); } } -fn forget_peer_queryable( +fn forget_linkstatepeer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - undeclare_peer_queryable(tables, Some(face), res, peer, send_declare); + undeclare_linkstatepeer_queryable(tables, Some(face), res, peer, send_declare); } -pub(super) fn undeclare_client_queryable( +pub(super) fn undeclare_simple_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -541,18 +544,25 @@ pub(super) fn undeclare_client_queryable( get_mut_unchecked(ctx).qabl = None; } - let mut client_qabls = client_qabls(res); - let peer_qabls = remote_peer_qabls(tables, res); + let mut simple_qabls = simple_qabls(res); + let linkstatepeer_qabls = remote_linkstatepeer_qabls(tables, res); - if client_qabls.is_empty() { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone(), send_declare); + if simple_qabls.is_empty() { + undeclare_linkstatepeer_queryable(tables, None, res, &tables.zid.clone(), send_declare); } else { let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, None, res, &local_info, tables.zid, send_declare); + register_linkstatepeer_queryable( + tables, + None, + res, + &local_info, + tables.zid, + send_declare, + ); } - if client_qabls.len() == 1 && !peer_qabls { - let mut face = &mut client_qabls[0]; + if simple_qabls.len() == 1 && !linkstatepeer_qabls { + let mut face = &mut simple_qabls[0]; if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { send_declare( &face.primitives, @@ -580,7 +590,8 @@ pub(super) fn undeclare_client_queryable( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_client_qabls(&m, face) || remote_peer_qabls(tables, &m)) + && (remote_simple_qabls(&m, face) + || remote_linkstatepeer_qabls(tables, &m)) }) }) { if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { @@ -607,14 +618,14 @@ pub(super) fn undeclare_client_queryable( } } -fn forget_client_queryable( +fn forget_simple_queryable( tables: &mut Tables, face: &mut Arc, id: QueryableId, send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { - undeclare_client_queryable(tables, face, &mut res, send_declare); + undeclare_simple_queryable(tables, face, &mut res, send_declare); Some(res) } else { None @@ -627,15 +638,15 @@ pub(super) fn queries_remove_node( send_declare: &mut SendDeclare, ) { let mut qabls = vec![]; - for res in hat!(tables).peer_qabls.iter() { - for qabl in res_hat!(res).peer_qabls.keys() { + for res in hat!(tables).linkstatepeer_qabls.iter() { + for qabl in res_hat!(res).linkstatepeer_qabls.keys() { if qabl == node { qabls.push(res.clone()); } } } for mut res in qabls { - unregister_peer_queryable(tables, &mut res, node, send_declare); + unregister_linkstatepeer_queryable(tables, &mut res, node, send_declare); update_matches_query_routes(tables, &res); Resource::clean(&mut res) @@ -643,7 +654,7 @@ pub(super) fn queries_remove_node( } pub(super) fn queries_tree_change(tables: &mut Tables, new_children: &[Vec]) { - let net = match hat!(tables).peers_net.as_ref() { + let net = match hat!(tables).linkstatepeers_net.as_ref() { Some(net) => net, None => { tracing::error!("Error accessing peers_net in queries_tree_change!"); @@ -657,10 +668,10 @@ pub(super) fn queries_tree_change(tables: &mut Tables, new_children: &[Vec Vec<(Arc, Sources)> { // Compute the list of known queryables (keys) hat!(tables) - .peer_qabls + .linkstatepeer_qabls .iter() .map(|s| { ( @@ -907,7 +920,7 @@ impl HatQueriesTrait for HatCode { // sources of those queryables Sources { routers: vec![], - peers: Vec::from_iter(res_hat!(s).peer_qabls.keys().cloned()), + peers: Vec::from_iter(res_hat!(s).linkstatepeer_qabls.keys().cloned()), clients: s .session_ctxs .values() @@ -958,7 +971,7 @@ impl HatQueriesTrait for HatCode { let mres = mres.upgrade().unwrap(); let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Router | WhatAmI::Peer => source, _ => net.idx.index() as NodeId, @@ -969,7 +982,7 @@ impl HatQueriesTrait for HatCode { tables, net, peer_source, - &res_hat!(mres).peer_qabls, + &res_hat!(mres).linkstatepeer_qabls, complete, ); diff --git a/zenoh/src/net/routing/hat/linkstate_peer/token.rs b/zenoh/src/net/routing/hat/linkstate_peer/token.rs index adb55b7bbb..0fa65481cc 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/token.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/token.rs @@ -170,7 +170,7 @@ fn propagate_sourced_token( src_face: Option<&Arc>, source: &ZenohIdProto, ) { - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -199,18 +199,18 @@ fn propagate_sourced_token( } } -fn register_peer_token( +fn register_linkstatepeer_token( tables: &mut Tables, face: &mut Arc, res: &mut Arc, peer: ZenohIdProto, send_declare: &mut SendDeclare, ) { - if !res_hat!(res).peer_tokens.contains(&peer) { + if !res_hat!(res).linkstatepeer_tokens.contains(&peer) { // Register peer liveliness { - res_hat_mut!(res).peer_tokens.insert(peer); - hat_mut!(tables).peer_tokens.insert(res.clone()); + res_hat_mut!(res).linkstatepeer_tokens.insert(peer); + hat_mut!(tables).linkstatepeer_tokens.insert(res.clone()); } // Propagate liveliness to peers @@ -221,17 +221,17 @@ fn register_peer_token( propagate_simple_token(tables, res, face, send_declare); } -fn declare_peer_token( +fn declare_linkstatepeer_token( tables: &mut Tables, face: &mut Arc, res: &mut Arc, peer: ZenohIdProto, send_declare: &mut SendDeclare, ) { - register_peer_token(tables, face, res, peer, send_declare); + register_linkstatepeer_token(tables, face, res, peer, send_declare); } -fn register_client_token( +fn register_simple_token( _tables: &mut Tables, face: &mut Arc, id: TokenId, @@ -258,29 +258,29 @@ fn register_client_token( face_hat_mut!(face).remote_tokens.insert(id, res.clone()); } -fn declare_client_token( +fn declare_simple_token( tables: &mut Tables, face: &mut Arc, id: TokenId, res: &mut Arc, send_declare: &mut SendDeclare, ) { - register_client_token(tables, face, id, res); + register_simple_token(tables, face, id, res); let zid = tables.zid; - register_peer_token(tables, face, res, zid, send_declare); + register_linkstatepeer_token(tables, face, res, zid, send_declare); } #[inline] -fn remote_peer_tokens(tables: &Tables, res: &Arc) -> bool { +fn remote_linkstatepeer_tokens(tables: &Tables, res: &Arc) -> bool { res.context.is_some() && res_hat!(res) - .peer_tokens + .linkstatepeer_tokens .iter() .any(|peer| peer != &tables.zid) } #[inline] -fn client_tokens(res: &Arc) -> Vec> { +fn simple_tokens(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -294,7 +294,7 @@ fn client_tokens(res: &Arc) -> Vec> { } #[inline] -fn remote_client_tokens(res: &Arc, face: &Arc) -> bool { +fn remote_simple_tokens(res: &Arc, face: &Arc) -> bool { res.session_ctxs .values() .any(|ctx| ctx.face.id != face.id && ctx.token) @@ -375,7 +375,8 @@ fn propagate_forget_simple_token( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_client_tokens(&m, &face) || remote_peer_tokens(tables, &m)) + && (remote_simple_tokens(&m, &face) + || remote_linkstatepeer_tokens(tables, &m)) }) }) { if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { @@ -407,7 +408,7 @@ fn propagate_forget_sourced_token( src_face: Option<&Arc>, source: &ZenohIdProto, ) { - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -436,47 +437,49 @@ fn propagate_forget_sourced_token( } } -fn unregister_peer_token( +fn unregister_linkstatepeer_token( tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - res_hat_mut!(res).peer_tokens.retain(|token| token != peer); + res_hat_mut!(res) + .linkstatepeer_tokens + .retain(|token| token != peer); - if res_hat!(res).peer_tokens.is_empty() { + if res_hat!(res).linkstatepeer_tokens.is_empty() { hat_mut!(tables) - .peer_tokens + .linkstatepeer_tokens .retain(|token| !Arc::ptr_eq(token, res)); propagate_forget_simple_token(tables, res, send_declare); } } -fn undeclare_peer_token( +fn undeclare_linkstatepeer_token( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - if res_hat!(res).peer_tokens.contains(peer) { - unregister_peer_token(tables, res, peer, send_declare); + if res_hat!(res).linkstatepeer_tokens.contains(peer) { + unregister_linkstatepeer_token(tables, res, peer, send_declare); propagate_forget_sourced_token(tables, res, face, peer); } } -fn forget_peer_token( +fn forget_linkstatepeer_token( tables: &mut Tables, face: &mut Arc, res: &mut Arc, peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - undeclare_peer_token(tables, Some(face), res, peer, send_declare); + undeclare_linkstatepeer_token(tables, Some(face), res, peer, send_declare); } -pub(super) fn undeclare_client_token( +pub(super) fn undeclare_simple_token( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -491,14 +494,14 @@ pub(super) fn undeclare_client_token( get_mut_unchecked(ctx).token = false; } - let mut client_tokens = client_tokens(res); - let peer_tokens = remote_peer_tokens(tables, res); - if client_tokens.is_empty() { - undeclare_peer_token(tables, None, res, &tables.zid.clone(), send_declare); + let mut simple_tokens = simple_tokens(res); + let linkstatepeer_tokens = remote_linkstatepeer_tokens(tables, res); + if simple_tokens.is_empty() { + undeclare_linkstatepeer_token(tables, None, res, &tables.zid.clone(), send_declare); } - if client_tokens.len() == 1 && !peer_tokens { - let mut face = &mut client_tokens[0]; + if simple_tokens.len() == 1 && !linkstatepeer_tokens { + let mut face = &mut simple_tokens[0]; if face.whatami != WhatAmI::Client { if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { send_declare( @@ -527,8 +530,8 @@ pub(super) fn undeclare_client_token( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_client_tokens(&m, face) - || remote_peer_tokens(tables, &m)) + && (remote_simple_tokens(&m, face) + || remote_linkstatepeer_tokens(tables, &m)) }) }) { if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { @@ -556,14 +559,14 @@ pub(super) fn undeclare_client_token( } } -fn forget_client_token( +fn forget_simple_token( tables: &mut Tables, face: &mut Arc, id: TokenId, send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { - undeclare_client_token(tables, face, &mut res, send_declare); + undeclare_simple_token(tables, face, &mut res, send_declare); Some(res) } else { None @@ -576,19 +579,19 @@ pub(super) fn token_remove_node( send_declare: &mut SendDeclare, ) { for mut res in hat!(tables) - .peer_tokens + .linkstatepeer_tokens .iter() - .filter(|res| res_hat!(res).peer_tokens.contains(node)) + .filter(|res| res_hat!(res).linkstatepeer_tokens.contains(node)) .cloned() .collect::>>() { - unregister_peer_token(tables, &mut res, node, send_declare); + unregister_linkstatepeer_token(tables, &mut res, node, send_declare); Resource::clean(&mut res) } } pub(super) fn token_tree_change(tables: &mut Tables, new_clildren: &[Vec]) { - let net = match hat!(tables).peers_net.as_ref() { + let net = match hat!(tables).linkstatepeers_net.as_ref() { Some(net) => net, None => { tracing::error!("Error accessing peers_net in token_tree_change!"); @@ -602,10 +605,10 @@ pub(super) fn token_tree_change(tables: &mut Tables, new_clildren: &[Vec, id: SubscriberId, @@ -173,7 +173,7 @@ fn register_client_subscription( face_hat_mut!(face).remote_subs.insert(id, res.clone()); } -fn declare_client_subscription( +fn declare_simple_subscription( tables: &mut Tables, face: &mut Arc, id: SubscriberId, @@ -181,7 +181,7 @@ fn declare_client_subscription( sub_info: &SubscriberInfo, send_declare: &mut SendDeclare, ) { - register_client_subscription(tables, face, id, res, sub_info); + register_simple_subscription(tables, face, id, res, sub_info); propagate_simple_subscription(tables, res, sub_info, face, send_declare); // This introduced a buffer overflow on windows @@ -285,7 +285,7 @@ fn propagate_forget_simple_subscription( } } -pub(super) fn undeclare_client_subscription( +pub(super) fn undeclare_simple_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -355,14 +355,14 @@ pub(super) fn undeclare_client_subscription( } } -fn forget_client_subscription( +fn forget_simple_subscription( tables: &mut Tables, face: &mut Arc, id: SubscriberId, send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { - undeclare_client_subscription(tables, face, &mut res, send_declare); + undeclare_simple_subscription(tables, face, &mut res, send_declare); Some(res) } else { None @@ -545,7 +545,7 @@ impl HatPubSubTrait for HatCode { _node_id: NodeId, send_declare: &mut SendDeclare, ) { - declare_client_subscription(tables, face, id, res, sub_info, send_declare); + declare_simple_subscription(tables, face, id, res, sub_info, send_declare); } fn undeclare_subscription( @@ -557,7 +557,7 @@ impl HatPubSubTrait for HatCode { _node_id: NodeId, send_declare: &mut SendDeclare, ) -> Option> { - forget_client_subscription(tables, face, id, send_declare) + forget_simple_subscription(tables, face, id, send_declare) } fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)> { diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 9b0e0e490f..166f63b301 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -149,7 +149,7 @@ fn propagate_simple_queryable( } } -fn register_client_queryable( +fn register_simple_queryable( _tables: &mut Tables, face: &mut Arc, id: QueryableId, @@ -169,7 +169,7 @@ fn register_client_queryable( face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } -fn declare_client_queryable( +fn declare_simple_queryable( tables: &mut Tables, face: &mut Arc, id: QueryableId, @@ -177,12 +177,12 @@ fn declare_client_queryable( qabl_info: &QueryableInfoType, send_declare: &mut SendDeclare, ) { - register_client_queryable(tables, face, id, res, qabl_info); + register_simple_queryable(tables, face, id, res, qabl_info); propagate_simple_queryable(tables, res, Some(face), send_declare); } #[inline] -fn client_qabls(res: &Arc) -> Vec> { +fn simple_qabls(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -196,7 +196,7 @@ fn client_qabls(res: &Arc) -> Vec> { } #[inline] -fn remote_client_qabls(res: &Arc, face: &Arc) -> bool { +fn remote_simple_qabls(res: &Arc, face: &Arc) -> bool { res.session_ctxs .values() .any(|ctx| ctx.face.id != face.id && ctx.qabl.is_some()) @@ -234,7 +234,7 @@ fn propagate_forget_simple_queryable( { if !res.context().matches.iter().any(|m| { m.upgrade() - .is_some_and(|m| m.context.is_some() && remote_client_qabls(&m, face)) + .is_some_and(|m| m.context.is_some() && remote_simple_qabls(&m, face)) }) { if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(&res) { send_declare( @@ -259,7 +259,7 @@ fn propagate_forget_simple_queryable( } } -pub(super) fn undeclare_client_queryable( +pub(super) fn undeclare_simple_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -274,14 +274,14 @@ pub(super) fn undeclare_client_queryable( get_mut_unchecked(ctx).qabl = None; } - let mut client_qabls = client_qabls(res); - if client_qabls.is_empty() { + let mut simple_qabls = simple_qabls(res); + if simple_qabls.is_empty() { propagate_forget_simple_queryable(tables, res, send_declare); } else { propagate_simple_queryable(tables, res, None, send_declare); } - if client_qabls.len() == 1 { - let mut face = &mut client_qabls[0]; + if simple_qabls.len() == 1 { + let mut face = &mut simple_qabls[0]; if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { send_declare( &face.primitives, @@ -308,7 +308,7 @@ pub(super) fn undeclare_client_queryable( { if !res.context().matches.iter().any(|m| { m.upgrade() - .is_some_and(|m| m.context.is_some() && (remote_client_qabls(&m, face))) + .is_some_and(|m| m.context.is_some() && (remote_simple_qabls(&m, face))) }) { if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { send_declare( @@ -334,14 +334,14 @@ pub(super) fn undeclare_client_queryable( } } -fn forget_client_queryable( +fn forget_simple_queryable( tables: &mut Tables, face: &mut Arc, id: QueryableId, send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { - undeclare_client_queryable(tables, face, &mut res, send_declare); + undeclare_simple_queryable(tables, face, &mut res, send_declare); Some(res) } else { None @@ -530,7 +530,7 @@ impl HatQueriesTrait for HatCode { _node_id: NodeId, send_declare: &mut SendDeclare, ) { - declare_client_queryable(tables, face, id, res, qabl_info, send_declare); + declare_simple_queryable(tables, face, id, res, qabl_info, send_declare); } fn undeclare_queryable( @@ -542,7 +542,7 @@ impl HatQueriesTrait for HatCode { _node_id: NodeId, send_declare: &mut SendDeclare, ) -> Option> { - forget_client_queryable(tables, face, id, send_declare) + forget_simple_queryable(tables, face, id, send_declare) } fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)> { diff --git a/zenoh/src/net/routing/hat/p2p_peer/token.rs b/zenoh/src/net/routing/hat/p2p_peer/token.rs index be36a4eb15..539599d2a2 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/token.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/token.rs @@ -119,7 +119,7 @@ fn propagate_simple_token( } } -fn register_client_token( +fn register_simple_token( _tables: &mut Tables, face: &mut Arc, id: TokenId, @@ -146,20 +146,20 @@ fn register_client_token( face_hat_mut!(face).remote_tokens.insert(id, res.clone()); } -fn declare_client_token( +fn declare_simple_token( tables: &mut Tables, face: &mut Arc, id: TokenId, res: &mut Arc, send_declare: &mut SendDeclare, ) { - register_client_token(tables, face, id, res); + register_simple_token(tables, face, id, res); propagate_simple_token(tables, res, face, send_declare); } #[inline] -fn client_tokens(res: &Arc) -> Vec> { +fn simple_tokens(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -173,7 +173,7 @@ fn client_tokens(res: &Arc) -> Vec> { } #[inline] -fn remote_client_tokens(res: &Arc, face: &Arc) -> bool { +fn remote_simple_tokens(res: &Arc, face: &Arc) -> bool { res.session_ctxs .values() .any(|ctx| ctx.face.id != face.id && ctx.token) @@ -234,7 +234,7 @@ fn propagate_forget_simple_token( { if !res.context().matches.iter().any(|m| { m.upgrade() - .is_some_and(|m| m.context.is_some() && remote_client_tokens(&m, &face)) + .is_some_and(|m| m.context.is_some() && remote_simple_tokens(&m, &face)) }) { if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { send_declare( @@ -284,7 +284,7 @@ fn propagate_forget_simple_token( } } -pub(super) fn undeclare_client_token( +pub(super) fn undeclare_simple_token( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -299,13 +299,13 @@ pub(super) fn undeclare_client_token( get_mut_unchecked(ctx).token = false; } - let mut client_tokens = client_tokens(res); - if client_tokens.is_empty() { + let mut simple_tokens = simple_tokens(res); + if simple_tokens.is_empty() { propagate_forget_simple_token(tables, res, send_declare); } - if client_tokens.len() == 1 { - let mut face = &mut client_tokens[0]; + if simple_tokens.len() == 1 { + let mut face = &mut simple_tokens[0]; if face.whatami != WhatAmI::Client { if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { send_declare( @@ -333,7 +333,7 @@ pub(super) fn undeclare_client_token( { if !res.context().matches.iter().any(|m| { m.upgrade() - .is_some_and(|m| m.context.is_some() && remote_client_tokens(&m, face)) + .is_some_and(|m| m.context.is_some() && remote_simple_tokens(&m, face)) }) { if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { send_declare( @@ -360,7 +360,7 @@ pub(super) fn undeclare_client_token( } } -fn forget_client_token( +fn forget_simple_token( tables: &mut Tables, face: &mut Arc, id: TokenId, @@ -368,10 +368,10 @@ fn forget_client_token( send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { - undeclare_client_token(tables, face, &mut res, send_declare); + undeclare_simple_token(tables, face, &mut res, send_declare); Some(res) } else if let Some(mut res) = res { - undeclare_client_token(tables, face, &mut res, send_declare); + undeclare_simple_token(tables, face, &mut res, send_declare); Some(res) } else { None @@ -521,7 +521,7 @@ impl HatTokenTrait for HatCode { _interest_id: Option, send_declare: &mut SendDeclare, ) { - declare_client_token(tables, face, id, res, send_declare) + declare_simple_token(tables, face, id, res, send_declare) } fn undeclare_token( @@ -533,6 +533,6 @@ impl HatTokenTrait for HatCode { _node_id: NodeId, send_declare: &mut SendDeclare, ) -> Option> { - forget_client_token(tables, face, id, res, send_declare) + forget_simple_token(tables, face, id, res, send_declare) } } diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index a48e06987a..4f3a6ab62b 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -25,7 +25,7 @@ use std::{ time::Duration, }; -use token::{token_linkstate_change, token_remove_node, undeclare_client_token}; +use token::{token_linkstate_change, token_remove_node, undeclare_simple_token}; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ common::ZExtBody, @@ -44,8 +44,8 @@ use zenoh_transport::unicast::TransportUnicast; use self::{ network::{shared_nodes, Network}, - pubsub::{pubsub_linkstate_change, pubsub_remove_node, undeclare_client_subscription}, - queries::{queries_linkstate_change, queries_remove_node, undeclare_client_queryable}, + pubsub::{pubsub_linkstate_change, pubsub_remove_node, undeclare_simple_subscription}, + queries::{queries_linkstate_change, queries_remove_node, undeclare_simple_queryable}, }; use super::{ super::dispatcher::{ @@ -119,23 +119,23 @@ use face_hat_mut; struct HatTables { router_subs: HashSet>, - peer_subs: HashSet>, + linkstatepeer_subs: HashSet>, router_tokens: HashSet>, - peer_tokens: HashSet>, + linkstatepeer_tokens: HashSet>, router_qabls: HashSet>, - peer_qabls: HashSet>, + linkstatepeer_qabls: HashSet>, routers_net: Option, - peers_net: Option, + linkstatepeers_net: Option, shared_nodes: Vec, routers_trees_task: Option, - peers_trees_task: Option, + linkstatepeers_trees_task: Option, router_peers_failover_brokering: bool, } impl Drop for HatTables { fn drop(&mut self) { - if self.peers_trees_task.is_some() { - let task = self.peers_trees_task.take().unwrap(); + if self.linkstatepeers_trees_task.is_some() { + let task = self.linkstatepeers_trees_task.take().unwrap(); task.terminate(Duration::from_secs(10)); } if self.routers_trees_task.is_some() { @@ -149,16 +149,16 @@ impl HatTables { fn new(router_peers_failover_brokering: bool) -> Self { Self { router_subs: HashSet::new(), - peer_subs: HashSet::new(), + linkstatepeer_subs: HashSet::new(), router_qabls: HashSet::new(), - peer_qabls: HashSet::new(), + linkstatepeer_qabls: HashSet::new(), router_tokens: HashSet::new(), - peer_tokens: HashSet::new(), + linkstatepeer_tokens: HashSet::new(), routers_net: None, - peers_net: None, + linkstatepeers_net: None, shared_nodes: vec![], routers_trees_task: None, - peers_trees_task: None, + linkstatepeers_trees_task: None, router_peers_failover_brokering, } } @@ -167,7 +167,7 @@ impl HatTables { fn get_net(&self, net_type: WhatAmI) -> Option<&Network> { match net_type { WhatAmI::Router => self.routers_net.as_ref(), - WhatAmI::Peer => self.peers_net.as_ref(), + WhatAmI::Peer => self.linkstatepeers_net.as_ref(), _ => None, } } @@ -181,7 +181,7 @@ impl HatTables { .map(|net| net.full_linkstate) .unwrap_or(false), WhatAmI::Peer => self - .peers_net + .linkstatepeers_net .as_ref() .map(|net| net.full_linkstate) .unwrap_or(false), @@ -191,7 +191,7 @@ impl HatTables { #[inline] fn get_router_links(&self, peer: ZenohIdProto) -> impl Iterator + '_ { - self.peers_net + self.linkstatepeers_net .as_ref() .unwrap() .get_links(peer) @@ -249,7 +249,7 @@ impl HatTables { fn failover_brokering(&self, peer1: ZenohIdProto, peer2: ZenohIdProto) -> bool { self.router_peers_failover_brokering && self - .peers_net + .linkstatepeers_net .as_ref() .map(|net| { let links = net.get_links(peer1); @@ -260,7 +260,7 @@ impl HatTables { fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) - || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) + || (net_type == WhatAmI::Peer && self.linkstatepeers_trees_task.is_none()) { let task = TerminatableTask::spawn( zenoh_runtime::ZRuntime::Net, @@ -278,7 +278,11 @@ impl HatTables { .as_mut() .unwrap() .compute_trees(), - _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), + _ => hat_mut!(tables) + .linkstatepeers_net + .as_mut() + .unwrap() + .compute_trees(), }; tracing::trace!("Compute routes"); @@ -289,14 +293,14 @@ impl HatTables { tracing::trace!("Computations completed"); match net_type { WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, - _ => hat_mut!(tables).peers_trees_task = None, + _ => hat_mut!(tables).linkstatepeers_trees_task = None, }; }, TerminatableTask::create_cancellation_token(), ); match net_type { WhatAmI::Router => self.routers_trees_task = Some(task), - _ => self.peers_trees_task = Some(task), + _ => self.linkstatepeers_trees_task = Some(task), }; } } @@ -336,7 +340,7 @@ impl HatBaseTrait for HatCode { )); } if peer_full_linkstate | gossip { - hat_mut!(tables).peers_net = Some(Network::new( + hat_mut!(tables).linkstatepeers_net = Some(Network::new( "[Peers network]".to_string(), tables.zid, runtime, @@ -350,7 +354,7 @@ impl HatBaseTrait for HatCode { if router_full_linkstate && peer_full_linkstate { hat_mut!(tables).shared_nodes = shared_nodes( hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), + hat!(tables).linkstatepeers_net.as_ref().unwrap(), ); } } @@ -393,7 +397,7 @@ impl HatBaseTrait for HatCode { .unwrap() .add_link(transport.clone()), WhatAmI::Peer => { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + if let Some(net) = hat_mut!(tables).linkstatepeers_net.as_mut() { net.add_link(transport.clone()) } else { 0 @@ -405,7 +409,7 @@ impl HatBaseTrait for HatCode { if hat!(tables).full_net(WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { hat_mut!(tables).shared_nodes = shared_nodes( hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), + hat!(tables).linkstatepeers_net.as_ref().unwrap(), ); } @@ -461,7 +465,7 @@ impl HatBaseTrait for HatCode { let mut subs_matches = vec![]; for (_id, mut res) in hat_face.remote_subs.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -483,7 +487,7 @@ impl HatBaseTrait for HatCode { let mut qabls_matches = vec![]; for (_, mut res) in hat_face.remote_qabls.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -504,7 +508,7 @@ impl HatBaseTrait for HatCode { for (_id, mut res) in hat_face.remote_tokens.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_token(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_token(&mut wtables, &mut face_clone, &mut res, send_declare); } drop(wtables); @@ -587,7 +591,7 @@ impl HatBaseTrait for HatCode { if hat!(tables).full_net(WhatAmI::Peer) { hat_mut!(tables).shared_nodes = shared_nodes( hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), + hat!(tables).linkstatepeers_net.as_ref().unwrap(), ); } @@ -595,7 +599,7 @@ impl HatBaseTrait for HatCode { .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); } WhatAmI::Peer => { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + if let Some(net) = hat_mut!(tables).linkstatepeers_net.as_mut() { let changes = net.link_states(list.link_states, zid); if hat!(tables).full_net(WhatAmI::Peer) { for (_, removed_node) in changes.removed_nodes { @@ -621,7 +625,7 @@ impl HatBaseTrait for HatCode { hat_mut!(tables).shared_nodes = shared_nodes( hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), + hat!(tables).linkstatepeers_net.as_ref().unwrap(), ); hat_mut!(tables) @@ -675,7 +679,7 @@ impl HatBaseTrait for HatCode { WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .unwrap() .get_local_context(routing_context, face_hat!(face).link_id) @@ -727,7 +731,7 @@ impl HatBaseTrait for HatCode { if hat!(tables).full_net(WhatAmI::Peer) { hat_mut!(tables).shared_nodes = shared_nodes( hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), + hat!(tables).linkstatepeers_net.as_ref().unwrap(), ); } @@ -737,7 +741,7 @@ impl HatBaseTrait for HatCode { WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { for (_, removed_node) in hat_mut!(tables) - .peers_net + .linkstatepeers_net .as_mut() .unwrap() .remove_link(&zid) @@ -764,12 +768,12 @@ impl HatBaseTrait for HatCode { hat_mut!(tables).shared_nodes = shared_nodes( hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), + hat!(tables).linkstatepeers_net.as_ref().unwrap(), ); hat_mut!(tables) .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } else if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + } else if let Some(net) = hat_mut!(tables).linkstatepeers_net.as_mut() { net.remove_link(&zid); } } @@ -784,7 +788,7 @@ impl HatBaseTrait for HatCode { #[inline] fn ingress_filter(&self, tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() + || hat!(tables).linkstatepeers_net.is_none() || tables.zid == *hat!(tables).elect_router( &tables.zid, @@ -808,7 +812,7 @@ impl HatBaseTrait for HatCode { } { let dst_master = out_face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() + || hat!(tables).linkstatepeers_net.is_none() || tables.zid == *hat!(tables).elect_router( &tables.zid, @@ -833,7 +837,7 @@ impl HatBaseTrait for HatCode { .map(|net| net.dot()) .unwrap_or_else(|| "graph {}".to_string()), WhatAmI::Peer => hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .map(|net| net.dot()) .unwrap_or_else(|| "graph {}".to_string()), @@ -844,22 +848,22 @@ impl HatBaseTrait for HatCode { struct HatContext { router_subs: HashSet, - peer_subs: HashSet, + linkstatepeer_subs: HashSet, router_qabls: HashMap, - peer_qabls: HashMap, + linkstatepeer_qabls: HashMap, router_tokens: HashSet, - peer_tokens: HashSet, + linkstatepeer_tokens: HashSet, } impl HatContext { fn new() -> Self { Self { router_subs: HashSet::new(), - peer_subs: HashSet::new(), + linkstatepeer_subs: HashSet::new(), router_qabls: HashMap::new(), - peer_qabls: HashMap::new(), + linkstatepeer_qabls: HashMap::new(), router_tokens: HashSet::new(), - peer_tokens: HashSet::new(), + linkstatepeer_tokens: HashSet::new(), } } } @@ -921,7 +925,7 @@ fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option< fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { match hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .unwrap() .get_link(face_hat!(face).link_id) @@ -960,7 +964,7 @@ fn get_routes_entries(tables: &Tables) -> RoutesIndexes { .collect::>(); let peers_indexes = if hat!(tables).full_net(WhatAmI::Peer) { hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .unwrap() .graph diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 40d1836e58..eaaf4ff921 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -236,7 +236,7 @@ fn register_router_subscription( } // Propagate subscription to peers if hat!(tables).full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { - register_peer_subscription(tables, face, res, sub_info, tables.zid) + register_linkstatepeer_subscription(tables, face, res, sub_info, tables.zid) } // Propagate subscription to clients @@ -254,18 +254,18 @@ fn declare_router_subscription( register_router_subscription(tables, face, res, sub_info, router, send_declare); } -fn register_peer_subscription( +fn register_linkstatepeer_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, peer: ZenohIdProto, ) { - if !res_hat!(res).peer_subs.contains(&peer) { + if !res_hat!(res).linkstatepeer_subs.contains(&peer) { // Register peer subscription { - res_hat_mut!(res).peer_subs.insert(peer); - hat_mut!(tables).peer_subs.insert(res.clone()); + res_hat_mut!(res).linkstatepeer_subs.insert(peer); + hat_mut!(tables).linkstatepeer_subs.insert(res.clone()); } // Propagate subscription to peers @@ -273,7 +273,7 @@ fn register_peer_subscription( } } -fn declare_peer_subscription( +fn declare_linkstatepeer_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -281,13 +281,13 @@ fn declare_peer_subscription( peer: ZenohIdProto, send_declare: &mut SendDeclare, ) { - register_peer_subscription(tables, face, res, sub_info, peer); + register_linkstatepeer_subscription(tables, face, res, sub_info, peer); let propa_sub_info = *sub_info; let zid = tables.zid; register_router_subscription(tables, face, res, &propa_sub_info, zid, send_declare); } -fn register_client_subscription( +fn register_simple_subscription( _tables: &mut Tables, face: &mut Arc, id: SubscriberId, @@ -315,7 +315,7 @@ fn register_client_subscription( face_hat_mut!(face).remote_subs.insert(id, res.clone()); } -fn declare_client_subscription( +fn declare_simple_subscription( tables: &mut Tables, face: &mut Arc, id: SubscriberId, @@ -323,7 +323,7 @@ fn declare_client_subscription( sub_info: &SubscriberInfo, send_declare: &mut SendDeclare, ) { - register_client_subscription(tables, face, id, res, sub_info); + register_simple_subscription(tables, face, id, res, sub_info); let zid = tables.zid; register_router_subscription(tables, face, res, sub_info, zid, send_declare); } @@ -338,16 +338,16 @@ fn remote_router_subs(tables: &Tables, res: &Arc) -> bool { } #[inline] -fn remote_peer_subs(tables: &Tables, res: &Arc) -> bool { +fn remote_linkstatepeer_subs(tables: &Tables, res: &Arc) -> bool { res.context.is_some() && res_hat!(res) - .peer_subs + .linkstatepeer_subs .iter() .any(|peer| peer != &tables.zid) } #[inline] -fn client_subs(res: &Arc) -> Vec> { +fn simple_subs(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -361,7 +361,7 @@ fn client_subs(res: &Arc) -> Vec> { } #[inline] -fn remote_client_subs(res: &Arc, face: &Arc) -> bool { +fn remote_simple_subs(res: &Arc, face: &Arc) -> bool { res.session_ctxs .values() .any(|ctx| ctx.face.id != face.id && ctx.subs.is_some()) @@ -442,8 +442,8 @@ fn propagate_forget_simple_subscription( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_client_subs(&m, &face) - || remote_peer_subs(tables, &m) + && (remote_simple_subs(&m, &face) + || remote_linkstatepeer_subs(tables, &m) || remote_router_subs(tables, &m)) }) }) { @@ -568,7 +568,7 @@ fn unregister_router_subscription( .retain(|sub| !Arc::ptr_eq(sub, res)); if hat_mut!(tables).full_net(WhatAmI::Peer) { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); + undeclare_linkstatepeer_subscription(tables, None, res, &tables.zid.clone()); } propagate_forget_simple_subscription(tables, res, send_declare); } @@ -600,44 +600,46 @@ fn forget_router_subscription( } fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { - res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); + res_hat_mut!(res) + .linkstatepeer_subs + .retain(|sub| sub != peer); - if res_hat!(res).peer_subs.is_empty() { + if res_hat!(res).linkstatepeer_subs.is_empty() { hat_mut!(tables) - .peer_subs + .linkstatepeer_subs .retain(|sub| !Arc::ptr_eq(sub, res)); } } -fn undeclare_peer_subscription( +fn undeclare_linkstatepeer_subscription( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, peer: &ZenohIdProto, ) { - if res_hat!(res).peer_subs.contains(peer) { + if res_hat!(res).linkstatepeer_subs.contains(peer) { unregister_peer_subscription(tables, res, peer); propagate_forget_sourced_subscription(tables, res, face, peer, WhatAmI::Peer); } } -fn forget_peer_subscription( +fn forget_linkstatepeer_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - undeclare_peer_subscription(tables, Some(face), res, peer); - let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); - let peer_subs = remote_peer_subs(tables, res); + undeclare_linkstatepeer_subscription(tables, Some(face), res, peer); + let simple_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); + let linkstatepeer_subs = remote_linkstatepeer_subs(tables, res); let zid = tables.zid; - if !client_subs && !peer_subs { + if !simple_subs && !linkstatepeer_subs { undeclare_router_subscription(tables, None, res, &zid, send_declare); } } -pub(super) fn undeclare_client_subscription( +pub(super) fn undeclare_simple_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -648,17 +650,17 @@ pub(super) fn undeclare_client_subscription( get_mut_unchecked(ctx).subs = None; } - let mut client_subs = client_subs(res); + let mut simple_subs = simple_subs(res); let router_subs = remote_router_subs(tables, res); - let peer_subs = remote_peer_subs(tables, res); - if client_subs.is_empty() && !peer_subs { + let linkstatepeer_subs = remote_linkstatepeer_subs(tables, res); + if simple_subs.is_empty() && !linkstatepeer_subs { undeclare_router_subscription(tables, None, res, &tables.zid.clone(), send_declare); } else { propagate_forget_simple_subscription_to_peers(tables, res, send_declare); } - if client_subs.len() == 1 && !router_subs && !peer_subs { - let mut face = &mut client_subs[0]; + if simple_subs.len() == 1 && !router_subs && !linkstatepeer_subs { + let mut face = &mut simple_subs[0]; if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { send_declare( &face.primitives, @@ -686,8 +688,8 @@ pub(super) fn undeclare_client_subscription( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_client_subs(&m, face) - || remote_peer_subs(tables, &m) + && (remote_simple_subs(&m, face) + || remote_linkstatepeer_subs(tables, &m) || remote_router_subs(tables, &m)) }) }) { @@ -715,14 +717,14 @@ pub(super) fn undeclare_client_subscription( } } -fn forget_client_subscription( +fn forget_simple_subscription( tables: &mut Tables, face: &mut Arc, id: SubscriberId, send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { - undeclare_client_subscription(tables, face, &mut res, send_declare); + undeclare_simple_subscription(tables, face, &mut res, send_declare); Some(res) } else { None @@ -752,16 +754,16 @@ pub(super) fn pubsub_remove_node( } WhatAmI::Peer => { for mut res in hat!(tables) - .peer_subs + .linkstatepeer_subs .iter() - .filter(|res| res_hat!(res).peer_subs.contains(node)) + .filter(|res| res_hat!(res).linkstatepeer_subs.contains(node)) .cloned() .collect::>>() { unregister_peer_subscription(tables, &mut res, node); - let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); - let peer_subs = remote_peer_subs(tables, &res); - if !client_subs && !peer_subs { + let simple_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); + let linkstatepeer_subs = remote_linkstatepeer_subs(tables, &res); + if !simple_subs && !linkstatepeer_subs { undeclare_router_subscription( tables, None, @@ -800,13 +802,13 @@ pub(super) fn pubsub_tree_change( let subs_res = match net_type { WhatAmI::Router => &hat!(tables).router_subs, - _ => &hat!(tables).peer_subs, + _ => &hat!(tables).linkstatepeer_subs, }; for res in subs_res { let subs = match net_type { WhatAmI::Router => &res_hat!(res).router_subs, - _ => &res_hat!(res).peer_subs, + _ => &res_hat!(res).linkstatepeer_subs, }; for sub in subs { if *sub == tree_id { @@ -935,8 +937,8 @@ pub(crate) fn declare_sub_interest( if hat!(tables).router_subs.iter().any(|sub| { sub.context.is_some() && sub.matches(res) - && (remote_client_subs(sub, face) - || remote_peer_subs(tables, sub) + && (remote_simple_subs(sub, face) + || remote_linkstatepeer_subs(tables, sub) || remote_router_subs(tables, sub)) }) { let id = if mode.future() { @@ -970,7 +972,10 @@ pub(crate) fn declare_sub_interest( if sub.context.is_some() && sub.matches(res) && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) - || res_hat!(sub).peer_subs.iter().any(|r| *r != tables.zid) + || res_hat!(sub) + .linkstatepeer_subs + .iter() + .any(|r| *r != tables.zid) || sub.session_ctxs.values().any(|s| { s.face.id != face.id && s.subs.is_some() @@ -1013,7 +1018,10 @@ pub(crate) fn declare_sub_interest( for sub in &hat!(tables).router_subs { if sub.context.is_some() && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) - || res_hat!(sub).peer_subs.iter().any(|r| *r != tables.zid) + || res_hat!(sub) + .linkstatepeer_subs + .iter() + .any(|r| *r != tables.zid) || sub.session_ctxs.values().any(|s| { s.subs.is_some() && (s.face.whatami != WhatAmI::Peer @@ -1072,13 +1080,20 @@ impl HatPubSubTrait for HatCode { WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_subscription(tables, face, res, sub_info, peer, send_declare) + declare_linkstatepeer_subscription( + tables, + face, + res, + sub_info, + peer, + send_declare, + ) } } else { - declare_client_subscription(tables, face, id, res, sub_info, send_declare) + declare_simple_subscription(tables, face, id, res, sub_info, send_declare) } } - _ => declare_client_subscription(tables, face, id, res, sub_info, send_declare), + _ => declare_simple_subscription(tables, face, id, res, sub_info, send_declare), } } @@ -1108,7 +1123,13 @@ impl HatPubSubTrait for HatCode { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(mut res) = res { if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_subscription(tables, face, &mut res, &peer, send_declare); + forget_linkstatepeer_subscription( + tables, + face, + &mut res, + &peer, + send_declare, + ); Some(res) } else { None @@ -1117,10 +1138,10 @@ impl HatPubSubTrait for HatCode { None } } else { - forget_client_subscription(tables, face, id, send_declare) + forget_simple_subscription(tables, face, id, send_declare) } } - _ => forget_client_subscription(tables, face, id, send_declare), + _ => forget_simple_subscription(tables, face, id, send_declare), } } @@ -1137,7 +1158,7 @@ impl HatPubSubTrait for HatCode { Sources { routers: Vec::from_iter(res_hat!(s).router_subs.iter().cloned()), peers: if hat!(tables).full_net(WhatAmI::Peer) { - Vec::from_iter(res_hat!(s).peer_subs.iter().cloned()) + Vec::from_iter(res_hat!(s).linkstatepeer_subs.iter().cloned()) } else { s.session_ctxs .values() @@ -1254,7 +1275,7 @@ impl HatPubSubTrait for HatCode { } if (master || source_type != WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Peer => source, _ => net.idx.index() as NodeId, @@ -1265,7 +1286,7 @@ impl HatPubSubTrait for HatCode { tables, net, peer_source, - &res_hat!(mres).peer_subs, + &res_hat!(mres).linkstatepeer_subs, ); } @@ -1362,13 +1383,13 @@ impl HatPubSubTrait for HatCode { } if hat!(tables).full_net(WhatAmI::Peer) { - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); insert_faces_for_subs( &mut matching_subscriptions, tables, net, net.idx.index(), - &res_hat!(mres).peer_subs, + &res_hat!(mres).linkstatepeer_subs, ); } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 93eceaa8f3..4703625fff 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -64,7 +64,7 @@ fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo let info = if hat!(tables).full_net(WhatAmI::Peer) { res.context.as_ref().and_then(|_| { res_hat!(res) - .peer_qabls + .linkstatepeer_qabls .iter() .fold(None, |accu, (zid, info)| { if *zid != tables.zid { @@ -152,7 +152,7 @@ fn local_qabl_info( }; if res.context.is_some() && hat!(tables).full_net(WhatAmI::Peer) { info = res_hat!(res) - .peer_qabls + .linkstatepeer_qabls .iter() .fold(info, |accu, (zid, info)| { if *zid != tables.zid { @@ -363,7 +363,13 @@ fn register_router_queryable( // Propagate queryable to peers if face.is_none() || face.as_ref().unwrap().whatami != WhatAmI::Peer { let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, face.as_deref_mut(), res, &local_info, tables.zid) + register_linkstatepeer_queryable( + tables, + face.as_deref_mut(), + res, + &local_info, + tables.zid, + ) } } @@ -382,19 +388,21 @@ fn declare_router_queryable( register_router_queryable(tables, Some(face), res, qabl_info, router, send_declare); } -fn register_peer_queryable( +fn register_linkstatepeer_queryable( tables: &mut Tables, face: Option<&mut Arc>, res: &mut Arc, qabl_info: &QueryableInfoType, peer: ZenohIdProto, ) { - let current_info = res_hat!(res).peer_qabls.get(&peer); + let current_info = res_hat!(res).linkstatepeer_qabls.get(&peer); if current_info.is_none() || current_info.unwrap() != qabl_info { // Register peer queryable { - res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); - hat_mut!(tables).peer_qabls.insert(res.clone()); + res_hat_mut!(res) + .linkstatepeer_qabls + .insert(peer, *qabl_info); + hat_mut!(tables).linkstatepeer_qabls.insert(res.clone()); } // Propagate queryable to peers @@ -402,7 +410,7 @@ fn register_peer_queryable( } } -fn declare_peer_queryable( +fn declare_linkstatepeer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -411,13 +419,13 @@ fn declare_peer_queryable( send_declare: &mut SendDeclare, ) { let mut face = Some(face); - register_peer_queryable(tables, face.as_deref_mut(), res, qabl_info, peer); + register_linkstatepeer_queryable(tables, face.as_deref_mut(), res, qabl_info, peer); let local_info = local_router_qabl_info(tables, res); let zid = tables.zid; register_router_queryable(tables, face, res, &local_info, zid, send_declare); } -fn register_client_queryable( +fn register_simple_queryable( _tables: &mut Tables, face: &mut Arc, id: QueryableId, @@ -437,7 +445,7 @@ fn register_client_queryable( face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } -fn declare_client_queryable( +fn declare_simple_queryable( tables: &mut Tables, face: &mut Arc, id: QueryableId, @@ -445,7 +453,7 @@ fn declare_client_queryable( qabl_info: &QueryableInfoType, send_declare: &mut SendDeclare, ) { - register_client_queryable(tables, face, id, res, qabl_info); + register_simple_queryable(tables, face, id, res, qabl_info); let local_details = local_router_qabl_info(tables, res); let zid = tables.zid; register_router_queryable(tables, Some(face), res, &local_details, zid, send_declare); @@ -461,16 +469,16 @@ fn remote_router_qabls(tables: &Tables, res: &Arc) -> bool { } #[inline] -fn remote_peer_qabls(tables: &Tables, res: &Arc) -> bool { +fn remote_linkstatepeer_qabls(tables: &Tables, res: &Arc) -> bool { res.context.is_some() && res_hat!(res) - .peer_qabls + .linkstatepeer_qabls .keys() .any(|peer| peer != &tables.zid) } #[inline] -fn client_qabls(res: &Arc) -> Vec> { +fn simple_qabls(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -484,7 +492,7 @@ fn client_qabls(res: &Arc) -> Vec> { } #[inline] -fn remote_client_qabls(res: &Arc, face: &Arc) -> bool { +fn remote_simple_qabls(res: &Arc, face: &Arc) -> bool { res.session_ctxs .values() .any(|ctx| ctx.face.id != face.id && ctx.qabl.is_some()) @@ -565,8 +573,8 @@ fn propagate_forget_simple_queryable( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_client_qabls(&m, &face) - || remote_peer_qabls(tables, &m) + && (remote_simple_qabls(&m, &face) + || remote_linkstatepeer_qabls(tables, &m) || remote_router_qabls(tables, &m)) }) }) { @@ -691,7 +699,7 @@ fn unregister_router_queryable( .retain(|qabl| !Arc::ptr_eq(qabl, res)); if hat!(tables).full_net(WhatAmI::Peer) { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); + undeclare_linkstatepeer_queryable(tables, None, res, &tables.zid.clone()); } propagate_forget_simple_queryable(tables, res, send_declare); } @@ -722,41 +730,45 @@ fn forget_router_queryable( undeclare_router_queryable(tables, Some(face), res, router, send_declare); } -fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { - res_hat_mut!(res).peer_qabls.remove(peer); +fn unregister_linkstatepeer_queryable( + tables: &mut Tables, + res: &mut Arc, + peer: &ZenohIdProto, +) { + res_hat_mut!(res).linkstatepeer_qabls.remove(peer); - if res_hat!(res).peer_qabls.is_empty() { + if res_hat!(res).linkstatepeer_qabls.is_empty() { hat_mut!(tables) - .peer_qabls + .linkstatepeer_qabls .retain(|qabl| !Arc::ptr_eq(qabl, res)); } } -fn undeclare_peer_queryable( +fn undeclare_linkstatepeer_queryable( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, peer: &ZenohIdProto, ) { - if res_hat!(res).peer_qabls.contains_key(peer) { - unregister_peer_queryable(tables, res, peer); + if res_hat!(res).linkstatepeer_qabls.contains_key(peer) { + unregister_linkstatepeer_queryable(tables, res, peer); propagate_forget_sourced_queryable(tables, res, face, peer, WhatAmI::Peer); } } -fn forget_peer_queryable( +fn forget_linkstatepeer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - undeclare_peer_queryable(tables, Some(face), res, peer); + undeclare_linkstatepeer_queryable(tables, Some(face), res, peer); - let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); - let peer_qabls = remote_peer_qabls(tables, res); + let simple_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); + let linkstatepeer_qabls = remote_linkstatepeer_qabls(tables, res); let zid = tables.zid; - if !client_qabls && !peer_qabls { + if !simple_qabls && !linkstatepeer_qabls { undeclare_router_queryable(tables, None, res, &zid, send_declare); } else { let local_info = local_router_qabl_info(tables, res); @@ -764,7 +776,7 @@ fn forget_peer_queryable( } } -pub(super) fn undeclare_client_queryable( +pub(super) fn undeclare_simple_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -779,11 +791,11 @@ pub(super) fn undeclare_client_queryable( get_mut_unchecked(ctx).qabl = None; } - let mut client_qabls = client_qabls(res); + let mut simple_qabls = simple_qabls(res); let router_qabls = remote_router_qabls(tables, res); - let peer_qabls = remote_peer_qabls(tables, res); + let linkstatepeer_qabls = remote_linkstatepeer_qabls(tables, res); - if client_qabls.is_empty() && !peer_qabls { + if simple_qabls.is_empty() && !linkstatepeer_qabls { undeclare_router_queryable(tables, None, res, &tables.zid.clone(), send_declare); } else { let local_info = local_router_qabl_info(tables, res); @@ -791,8 +803,8 @@ pub(super) fn undeclare_client_queryable( propagate_forget_simple_queryable_to_peers(tables, res, send_declare); } - if client_qabls.len() == 1 && !router_qabls && !peer_qabls { - let mut face = &mut client_qabls[0]; + if simple_qabls.len() == 1 && !router_qabls && !linkstatepeer_qabls { + let mut face = &mut simple_qabls[0]; if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { send_declare( &face.primitives, @@ -820,8 +832,8 @@ pub(super) fn undeclare_client_queryable( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_client_qabls(&m, face) - || remote_peer_qabls(tables, &m) + && (remote_simple_qabls(&m, face) + || remote_linkstatepeer_qabls(tables, &m) || remote_router_qabls(tables, &m)) }) }) { @@ -849,14 +861,14 @@ pub(super) fn undeclare_client_queryable( } } -fn forget_client_queryable( +fn forget_simple_queryable( tables: &mut Tables, face: &mut Arc, id: QueryableId, send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { - undeclare_client_queryable(tables, face, &mut res, send_declare); + undeclare_simple_queryable(tables, face, &mut res, send_declare); Some(res) } else { None @@ -896,11 +908,11 @@ pub(super) fn queries_remove_node( } } for mut res in qabls { - unregister_peer_queryable(tables, &mut res, node); + unregister_linkstatepeer_queryable(tables, &mut res, node); - let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); - let peer_qabls = remote_peer_qabls(tables, &res); - if !client_qabls && !peer_qabls { + let simple_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); + let linkstatepeer_qabls = remote_linkstatepeer_qabls(tables, &res); + if !simple_qabls && !linkstatepeer_qabls { undeclare_router_queryable( tables, None, @@ -953,7 +965,7 @@ pub(super) fn queries_linkstate_change( let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { let ctx_links = hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .map(|net| net.get_links(dst_face.zid)) .unwrap_or_else(|| &[]); @@ -1043,13 +1055,13 @@ pub(super) fn queries_tree_change( let qabls_res = match net_type { WhatAmI::Router => &hat!(tables).router_qabls, - _ => &hat!(tables).peer_qabls, + _ => &hat!(tables).linkstatepeer_qabls, }; for res in qabls_res { let qabls = match net_type { WhatAmI::Router => &res_hat!(res).router_qabls, - _ => &res_hat!(res).peer_qabls, + _ => &res_hat!(res).linkstatepeer_qabls, }; if let Some(qabl_info) = qabls.get(&tree_id) { send_sourced_queryable_to_net_children( @@ -1134,7 +1146,10 @@ pub(crate) fn declare_qabl_interest( qabl.context.is_some() && qabl.matches(res) && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) - || res_hat!(qabl).peer_qabls.keys().any(|r| *r != tables.zid) + || res_hat!(qabl) + .linkstatepeer_qabls + .keys() + .any(|r| *r != tables.zid) || qabl.session_ctxs.values().any(|s| { s.face.id != face.id && s.qabl.is_some() @@ -1179,7 +1194,10 @@ pub(crate) fn declare_qabl_interest( if qabl.context.is_some() && qabl.matches(res) && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) - || res_hat!(qabl).peer_qabls.keys().any(|r| *r != tables.zid) + || res_hat!(qabl) + .linkstatepeer_qabls + .keys() + .any(|r| *r != tables.zid) || qabl.session_ctxs.values().any(|s| { s.qabl.is_some() && (s.face.whatami != WhatAmI::Peer @@ -1221,8 +1239,8 @@ pub(crate) fn declare_qabl_interest( } else { for qabl in hat!(tables).router_qabls.iter() { if qabl.context.is_some() - && (remote_client_qabls(qabl, face) - || remote_peer_qabls(tables, qabl) + && (remote_simple_qabls(qabl, face) + || remote_linkstatepeer_qabls(tables, qabl) || remote_router_qabls(tables, qabl)) { let info = local_qabl_info(tables, qabl, face); @@ -1279,13 +1297,20 @@ impl HatQueriesTrait for HatCode { WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_queryable(tables, face, res, qabl_info, peer, send_declare) + declare_linkstatepeer_queryable( + tables, + face, + res, + qabl_info, + peer, + send_declare, + ) } } else { - declare_client_queryable(tables, face, id, res, qabl_info, send_declare) + declare_simple_queryable(tables, face, id, res, qabl_info, send_declare) } } - _ => declare_client_queryable(tables, face, id, res, qabl_info, send_declare), + _ => declare_simple_queryable(tables, face, id, res, qabl_info, send_declare), } } @@ -1315,7 +1340,13 @@ impl HatQueriesTrait for HatCode { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(mut res) = res { if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_queryable(tables, face, &mut res, &peer, send_declare); + forget_linkstatepeer_queryable( + tables, + face, + &mut res, + &peer, + send_declare, + ); Some(res) } else { None @@ -1324,10 +1355,10 @@ impl HatQueriesTrait for HatCode { None } } else { - forget_client_queryable(tables, face, id, send_declare) + forget_simple_queryable(tables, face, id, send_declare) } } - _ => forget_client_queryable(tables, face, id, send_declare), + _ => forget_simple_queryable(tables, face, id, send_declare), } } @@ -1344,7 +1375,7 @@ impl HatQueriesTrait for HatCode { Sources { routers: Vec::from_iter(res_hat!(s).router_qabls.keys().cloned()), peers: if hat!(tables).full_net(WhatAmI::Peer) { - Vec::from_iter(res_hat!(s).peer_qabls.keys().cloned()) + Vec::from_iter(res_hat!(s).linkstatepeer_qabls.keys().cloned()) } else { s.session_ctxs .values() @@ -1425,7 +1456,7 @@ impl HatQueriesTrait for HatCode { } if (master || source_type != WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Peer => source, _ => net.idx.index() as NodeId, @@ -1436,7 +1467,7 @@ impl HatQueriesTrait for HatCode { tables, net, peer_source, - &res_hat!(mres).peer_qabls, + &res_hat!(mres).linkstatepeer_qabls, complete, ); } diff --git a/zenoh/src/net/routing/hat/router/token.rs b/zenoh/src/net/routing/hat/router/token.rs index 644932446c..e6f18a5ea2 100644 --- a/zenoh/src/net/routing/hat/router/token.rs +++ b/zenoh/src/net/routing/hat/router/token.rs @@ -216,7 +216,7 @@ fn register_router_token( } // Propagate liveliness to peers if hat!(tables).full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { - register_peer_token(tables, face, res, tables.zid) + register_linkstatepeer_token(tables, face, res, tables.zid) } // Propagate liveliness to clients @@ -233,17 +233,17 @@ fn declare_router_token( register_router_token(tables, face, res, router, send_declare); } -fn register_peer_token( +fn register_linkstatepeer_token( tables: &mut Tables, face: &mut Arc, res: &mut Arc, peer: ZenohIdProto, ) { - if !res_hat!(res).peer_tokens.contains(&peer) { + if !res_hat!(res).linkstatepeer_tokens.contains(&peer) { // Register peer liveliness { - res_hat_mut!(res).peer_tokens.insert(peer); - hat_mut!(tables).peer_tokens.insert(res.clone()); + res_hat_mut!(res).linkstatepeer_tokens.insert(peer); + hat_mut!(tables).linkstatepeer_tokens.insert(res.clone()); } // Propagate liveliness to peers @@ -251,19 +251,19 @@ fn register_peer_token( } } -fn declare_peer_token( +fn declare_linkstatepeer_token( tables: &mut Tables, face: &mut Arc, res: &mut Arc, peer: ZenohIdProto, send_declare: &mut SendDeclare, ) { - register_peer_token(tables, face, res, peer); + register_linkstatepeer_token(tables, face, res, peer); let zid = tables.zid; register_router_token(tables, face, res, zid, send_declare); } -fn register_client_token( +fn register_simple_token( _tables: &mut Tables, face: &mut Arc, id: TokenId, @@ -290,14 +290,14 @@ fn register_client_token( face_hat_mut!(face).remote_tokens.insert(id, res.clone()); } -fn declare_client_token( +fn declare_simple_token( tables: &mut Tables, face: &mut Arc, id: TokenId, res: &mut Arc, send_declare: &mut SendDeclare, ) { - register_client_token(tables, face, id, res); + register_simple_token(tables, face, id, res); let zid = tables.zid; register_router_token(tables, face, res, zid, send_declare); } @@ -312,16 +312,16 @@ fn remote_router_tokens(tables: &Tables, res: &Arc) -> bool { } #[inline] -fn remote_peer_tokens(tables: &Tables, res: &Arc) -> bool { +fn remote_linkstatepeer_tokens(tables: &Tables, res: &Arc) -> bool { res.context.is_some() && res_hat!(res) - .peer_tokens + .linkstatepeer_tokens .iter() .any(|peer| peer != &tables.zid) } #[inline] -fn client_tokens(res: &Arc) -> Vec> { +fn simple_tokens(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -335,7 +335,7 @@ fn client_tokens(res: &Arc) -> Vec> { } #[inline] -fn remote_client_tokens(res: &Arc, face: &Arc) -> bool { +fn remote_simple_tokens(res: &Arc, face: &Arc) -> bool { res.session_ctxs .values() .any(|ctx| ctx.face.id != face.id && ctx.token) @@ -446,8 +446,8 @@ fn propagate_forget_simple_token( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_client_tokens(&m, &face) - || remote_peer_tokens(tables, &m) + && (remote_simple_tokens(&m, &face) + || remote_linkstatepeer_tokens(tables, &m) || remote_router_tokens(tables, &m)) }) }) { @@ -600,7 +600,7 @@ fn unregister_router_token( .retain(|token| !Arc::ptr_eq(token, res)); if hat_mut!(tables).full_net(WhatAmI::Peer) { - undeclare_peer_token(tables, None, res, &tables.zid.clone()); + undeclare_linkstatepeer_token(tables, None, res, &tables.zid.clone()); } propagate_forget_simple_token(tables, res, face, send_declare); } @@ -631,45 +631,51 @@ fn forget_router_token( undeclare_router_token(tables, Some(face), res, router, send_declare); } -fn unregister_peer_token(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { - res_hat_mut!(res).peer_tokens.retain(|token| token != peer); +fn unregister_linkstatepeer_token( + tables: &mut Tables, + res: &mut Arc, + peer: &ZenohIdProto, +) { + res_hat_mut!(res) + .linkstatepeer_tokens + .retain(|token| token != peer); - if res_hat!(res).peer_tokens.is_empty() { + if res_hat!(res).linkstatepeer_tokens.is_empty() { hat_mut!(tables) - .peer_tokens + .linkstatepeer_tokens .retain(|token| !Arc::ptr_eq(token, res)); } } -fn undeclare_peer_token( +fn undeclare_linkstatepeer_token( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, peer: &ZenohIdProto, ) { - if res_hat!(res).peer_tokens.contains(peer) { - unregister_peer_token(tables, res, peer); + if res_hat!(res).linkstatepeer_tokens.contains(peer) { + unregister_linkstatepeer_token(tables, res, peer); propagate_forget_sourced_token(tables, res, face, peer, WhatAmI::Peer); } } -fn forget_peer_token( +fn forget_linkstatepeer_token( tables: &mut Tables, face: &mut Arc, res: &mut Arc, peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - undeclare_peer_token(tables, Some(face), res, peer); - let client_tokens = res.session_ctxs.values().any(|ctx| ctx.token); - let peer_tokens = remote_peer_tokens(tables, res); + undeclare_linkstatepeer_token(tables, Some(face), res, peer); + let simple_tokens = res.session_ctxs.values().any(|ctx| ctx.token); + let linkstatepeer_tokens = remote_linkstatepeer_tokens(tables, res); let zid = tables.zid; - if !client_tokens && !peer_tokens { + if !simple_tokens && !linkstatepeer_tokens { undeclare_router_token(tables, None, res, &zid, send_declare); } } -pub(super) fn undeclare_client_token( +pub(super) fn undeclare_simple_token( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -684,17 +690,17 @@ pub(super) fn undeclare_client_token( get_mut_unchecked(ctx).token = false; } - let mut client_tokens = client_tokens(res); + let mut simple_tokens = simple_tokens(res); let router_tokens = remote_router_tokens(tables, res); - let peer_tokens = remote_peer_tokens(tables, res); - if client_tokens.is_empty() && !peer_tokens { + let linkstatepeer_tokens = remote_linkstatepeer_tokens(tables, res); + if simple_tokens.is_empty() && !linkstatepeer_tokens { undeclare_router_token(tables, Some(face), res, &tables.zid.clone(), send_declare); } else { propagate_forget_simple_token_to_peers(tables, res, send_declare); } - if client_tokens.len() == 1 && !router_tokens && !peer_tokens { - let mut face = &mut client_tokens[0]; + if simple_tokens.len() == 1 && !router_tokens && !linkstatepeer_tokens { + let mut face = &mut simple_tokens[0]; if face.whatami != WhatAmI::Client { if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { send_declare( @@ -723,8 +729,8 @@ pub(super) fn undeclare_client_token( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_client_tokens(&m, face) - || remote_peer_tokens(tables, &m) + && (remote_simple_tokens(&m, face) + || remote_linkstatepeer_tokens(tables, &m) || remote_router_tokens(tables, &m)) }) }) { @@ -753,14 +759,14 @@ pub(super) fn undeclare_client_token( } } -fn forget_client_token( +fn forget_simple_token( tables: &mut Tables, face: &mut Arc, id: TokenId, send_declare: &mut SendDeclare, ) -> Option> { if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { - undeclare_client_token(tables, face, &mut res, send_declare); + undeclare_simple_token(tables, face, &mut res, send_declare); Some(res) } else { None @@ -788,16 +794,16 @@ pub(super) fn token_remove_node( } WhatAmI::Peer => { for mut res in hat!(tables) - .peer_tokens + .linkstatepeer_tokens .iter() - .filter(|res| res_hat!(res).peer_tokens.contains(node)) + .filter(|res| res_hat!(res).linkstatepeer_tokens.contains(node)) .cloned() .collect::>>() { - unregister_peer_token(tables, &mut res, node); - let client_tokens = res.session_ctxs.values().any(|ctx| ctx.token); - let peer_tokens = remote_peer_tokens(tables, &res); - if !client_tokens && !peer_tokens { + unregister_linkstatepeer_token(tables, &mut res, node); + let simple_tokens = res.session_ctxs.values().any(|ctx| ctx.token); + let linkstatepeer_tokens = remote_linkstatepeer_tokens(tables, &res); + if !simple_tokens && !linkstatepeer_tokens { undeclare_router_token( tables, None, @@ -834,13 +840,13 @@ pub(super) fn token_tree_change( let tokens_res = match net_type { WhatAmI::Router => &hat!(tables).router_tokens, - _ => &hat!(tables).peer_tokens, + _ => &hat!(tables).linkstatepeer_tokens, }; for res in tokens_res { let tokens = match net_type { WhatAmI::Router => &res_hat!(res).router_tokens, - _ => &res_hat!(res).peer_tokens, + _ => &res_hat!(res).linkstatepeer_tokens, }; for token in tokens { if *token == tree_id { @@ -884,7 +890,7 @@ pub(super) fn token_linkstate_change( let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { let ctx_links = hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .map(|net| net.get_links(dst_face.zid)) .unwrap_or_else(|| &[]); @@ -966,8 +972,8 @@ pub(crate) fn declare_token_interest( if hat!(tables).router_tokens.iter().any(|token| { token.context.is_some() && token.matches(res) - && (remote_client_tokens(token, face) - || remote_peer_tokens(tables, token) + && (remote_simple_tokens(token, face) + || remote_linkstatepeer_tokens(tables, token) || remote_router_tokens(tables, token)) }) { let id = if mode.future() { @@ -1000,7 +1006,10 @@ pub(crate) fn declare_token_interest( .router_tokens .iter() .any(|r| *r != tables.zid) - || res_hat!(token).peer_tokens.iter().any(|r| *r != tables.zid) + || res_hat!(token) + .linkstatepeer_tokens + .iter() + .any(|r| *r != tables.zid) || token.session_ctxs.values().any(|s| { s.face.id != face.id && s.token @@ -1042,7 +1051,10 @@ pub(crate) fn declare_token_interest( .router_tokens .iter() .any(|r| *r != tables.zid) - || res_hat!(token).peer_tokens.iter().any(|r| *r != tables.zid) + || res_hat!(token) + .linkstatepeer_tokens + .iter() + .any(|r| *r != tables.zid) || token.session_ctxs.values().any(|s| { s.token && (s.face.whatami != WhatAmI::Peer @@ -1097,13 +1109,13 @@ impl HatTokenTrait for HatCode { WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_token(tables, face, res, peer, send_declare) + declare_linkstatepeer_token(tables, face, res, peer, send_declare) } } else { - declare_client_token(tables, face, id, res, send_declare) + declare_simple_token(tables, face, id, res, send_declare) } } - _ => declare_client_token(tables, face, id, res, send_declare), + _ => declare_simple_token(tables, face, id, res, send_declare), } } @@ -1133,7 +1145,7 @@ impl HatTokenTrait for HatCode { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(mut res) = res { if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_token(tables, face, &mut res, &peer, send_declare); + forget_linkstatepeer_token(tables, face, &mut res, &peer, send_declare); Some(res) } else { None @@ -1142,10 +1154,10 @@ impl HatTokenTrait for HatCode { None } } else { - forget_client_token(tables, face, id, send_declare) + forget_simple_token(tables, face, id, send_declare) } } - _ => forget_client_token(tables, face, id, send_declare), + _ => forget_simple_token(tables, face, id, send_declare), } } } From 502d3be20b78f2873ec069cbcd5fefd308ecf5c7 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 30 Jul 2024 15:26:32 +0200 Subject: [PATCH 560/598] Merge/main into dev/1.0.0 (#1279) * Add NOTE for LowLatency transport. (#1088) Signed-off-by: ChenYing Kuo * fix: Improve debug messages in `zenoh-transport` (#1090) * fix: Improve debug messages for failing RX/TX tasks * fix: Improve debug message for `accept_link` timeout * chore: Fix `clippy::redundant_pattern_matching` error * Improve pipeline backoff (#1097) * Yield task for backoff * Improve comments and error handling in backoff * Simplify pipeline pull * Consider backoff configuration * Add typos check to CI (#1065) * Fix typos * Add typos check to CI * Start link tx_task before notifying router (#1098) * Fix typos (#1110) * bump quinn & rustls (#1086) * bump quinn & rustls * fix ci windows check * add comments * Fix interface name scanning when listening on IP unspecified for TCP/TLS/QUIC/WS (#1123) Co-authored-by: Julien Enoch * Enable releasing from any branch (#1136) * Fix cargo clippy (#1145) * Release tables locks before propagating subscribers and queryables declarations to void dead locks (#1150) * Send simple sub and qabl declarations using a given function * Send simple sub and qabl declarations after releasing tables lock * Send simple sub and qabl declarations after releasing tables lock (missing places) * feat: make `TerminatableTask` terminate itself when dropped (#1151) * Fix bug in keyexpr::includes leading to call get_unchecked on empty array UB (#1208) * REST plugin uses unbounded flume channels for queries (#1213) * fix: typo in selector.rs (#1228) * fix: zenohd --cfg (#1263) * fix: zenohd --cfg * ci: trigger * Update zenohd/src/main.rs --------- Co-authored-by: Luca Cominardi * Fix failover brokering bug reacting to linkstate changes (#1272) * Change missleading log * Fix failover brokering bug reacting to linkstate changes * Retrigger CI --------- Co-authored-by: Luca Cominardi * Code format * Fix clippy warnings * Code format * Fix Clippy errors from Rust 1.80 (#1273) * Allow unexpected `doc_auto_cfg` flag * Keep never-constructed logger interceptor * Ignore interior mutability of `Resource` * Fix typo * Resolve `clippy::doc-lazy-continuation` errors * Upgrade `time@0.3.28` to `time@0.3.36` See https://github.com/time-rs/time/issues/693 * Update Cargo.toml (#1277) Updated description to be aligned with what we use everywhere else * Merge ci.yaml --------- Signed-off-by: ChenYing Kuo Co-authored-by: ChenYing Kuo (CY) Co-authored-by: Mahmoud Mazouz Co-authored-by: Tavo Annus Co-authored-by: JLer Co-authored-by: Julien Enoch Co-authored-by: OlivierHecart Co-authored-by: Yuyuan Yuan Co-authored-by: Diogo Matsubara Co-authored-by: OlivierHecart Co-authored-by: kydos --- Cargo.toml | 2 +- .../zenoh-keyexpr/src/key_expr/borrowed.rs | 1 + commons/zenoh-task/src/lib.rs | 26 ++++++++++++------- plugins/zenoh-plugin-rest/src/lib.rs | 3 ++- zenoh-ext/src/group.rs | 1 - zenoh-ext/src/publication_cache.rs | 2 +- zenoh/src/api/key_expr.rs | 1 - zenoh/src/net/routing/hat/client/queries.rs | 1 - .../src/net/routing/hat/linkstate_peer/mod.rs | 3 +-- zenoh/src/net/routing/hat/router/mod.rs | 10 +++---- zenohd/src/main.rs | 5 ++++ 11 files changed, 33 insertions(+), 22 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 254cdc19b9..b686656f77 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,7 +70,7 @@ authors = [ edition = "2021" license = "EPL-2.0 OR Apache-2.0" categories = ["network-programming"] -description = "Zenoh: Zero Overhead Pub/sub, Store/Query and Compute." +description = "Zenoh: The Zero Overhead Pub/Sub/Query Protocol." # DEFAULT-FEATURES NOTE: Be careful with default-features and additivity! # (https://github.com/rust-lang/cargo/issues/11329) diff --git a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs index a98337b987..e2afa9712f 100644 --- a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs +++ b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs @@ -187,6 +187,7 @@ impl keyexpr { /// For instance, if `self` is `"a/**/c/*" and `prefix` is `a/b/c` then: /// - the `prefix` matches `"a/**/c"` leading to a result of `"*"` when stripped from `self` /// - the `prefix` matches `"a/**"` leading to a result of `"**/c/*"` when stripped from `self` + /// /// So the result is `["*", "**/c/*"]`. /// If `prefix` cannot match the beginning of `self`, an empty list is reuturned. /// diff --git a/commons/zenoh-task/src/lib.rs b/commons/zenoh-task/src/lib.rs index 7eab9d316f..2a06b56b5c 100644 --- a/commons/zenoh-task/src/lib.rs +++ b/commons/zenoh-task/src/lib.rs @@ -129,10 +129,16 @@ impl TaskController { } pub struct TerminatableTask { - handle: JoinHandle<()>, + handle: Option>, token: CancellationToken, } +impl Drop for TerminatableTask { + fn drop(&mut self) { + self.terminate(std::time::Duration::from_secs(10)); + } +} + impl TerminatableTask { pub fn create_cancellation_token() -> CancellationToken { CancellationToken::new() @@ -146,7 +152,7 @@ impl TerminatableTask { T: Send + 'static, { TerminatableTask { - handle: rt.spawn(future.map(|_f| ())), + handle: Some(rt.spawn(future.map(|_f| ()))), token, } } @@ -167,24 +173,26 @@ impl TerminatableTask { }; TerminatableTask { - handle: rt.spawn(task), + handle: Some(rt.spawn(task)), token, } } /// Attempts to terminate the task. /// Returns true if task completed / aborted within timeout duration, false otherwise. - pub fn terminate(self, timeout: Duration) -> bool { + pub fn terminate(&mut self, timeout: Duration) -> bool { ResolveFuture::new(async move { self.terminate_async(timeout).await }).wait() } /// Async version of [`TerminatableTask::terminate()`]. - pub async fn terminate_async(self, timeout: Duration) -> bool { + pub async fn terminate_async(&mut self, timeout: Duration) -> bool { self.token.cancel(); - if tokio::time::timeout(timeout, self.handle).await.is_err() { - tracing::error!("Failed to terminate the task"); - return false; - }; + if let Some(handle) = self.handle.take() { + if tokio::time::timeout(timeout, handle).await.is_err() { + tracing::error!("Failed to terminate the task"); + return false; + }; + } true } } diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 7ef21ace7c..898e9ae2df 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -402,7 +402,8 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result PublicationCache<'a> { let PublicationCache { _queryable, local_sub, - task, + mut task, } = self; _queryable.undeclare().await?; local_sub.undeclare().await?; diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 50ce79180b..81d9aecc20 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // - use std::{ convert::{TryFrom, TryInto}, future::{IntoFuture, Ready}, diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 0c394da851..8ef3ec1fb7 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -105,7 +105,6 @@ fn propagate_simple_queryable( .local_qabls .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, &mut dst_face); - println!("Decled key = {key_expr:?}"); send_declare( &dst_face.primitives, RoutingContext::with_expr( diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index ded87f18ee..f861e1bed3 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -126,8 +126,7 @@ struct HatTables { impl Drop for HatTables { fn drop(&mut self) { - if self.linkstatepeers_trees_task.is_some() { - let task = self.linkstatepeers_trees_task.take().unwrap(); + if let Some(mut task) = self.linkstatepeers_trees_task.take() { task.terminate(Duration::from_secs(10)); } } diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 4f3a6ab62b..cf7d1d14b6 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -134,12 +134,10 @@ struct HatTables { impl Drop for HatTables { fn drop(&mut self) { - if self.linkstatepeers_trees_task.is_some() { - let task = self.linkstatepeers_trees_task.take().unwrap(); + if let Some(mut task) = self.linkstatepeers_trees_task.take() { task.terminate(Duration::from_secs(10)); } - if self.routers_trees_task.is_some() { - let task = self.routers_trees_task.take().unwrap(); + if let Some(mut task) = self.routers_trees_task.take() { task.terminate(Duration::from_secs(10)); } } @@ -253,7 +251,9 @@ impl HatTables { .as_ref() .map(|net| { let links = net.get_links(peer1); - HatTables::failover_brokering_to(links, peer2) + let res = HatTables::failover_brokering_to(links, peer2); + tracing::trace!("failover_brokering {} {} : {}", peer1, peer2, res); + res }) .unwrap_or(false) } diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 60d898d84f..d25260d606 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -264,6 +264,11 @@ fn config_from_args(args: &Args) -> Config { } Err(e) => tracing::warn!("Couldn't perform configuration {}: {}", json, e), } + } else { + panic!( + "--cfg accepts KEY:VALUE pairs. {} is not a valid KEY:VALUE pair.", + json + ) } } tracing::debug!("Config: {:?}", &config); From 24d504b8e6d2a925240d2ff319c0e06d73f265d9 Mon Sep 17 00:00:00 2001 From: Diogo Matsubara Date: Tue, 30 Jul 2024 18:20:29 +0200 Subject: [PATCH 561/598] chore: fix documentation in examples (#1280) --- examples/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/src/lib.rs b/examples/src/lib.rs index e863e1457c..6562942361 100644 --- a/examples/src/lib.rs +++ b/examples/src/lib.rs @@ -33,7 +33,7 @@ pub struct CommonArgs { /// Disable the multicast-based scouting mechanism. no_multicast_scouting: bool, #[arg(long)] - /// Disable the multicast-based scouting mechanism. + /// Enable shared-memory feature. enable_shm: bool, } From b31cc1aef67f8f75264e0e90561b19ce95f2cec1 Mon Sep 17 00:00:00 2001 From: Gabriele Baldoni Date: Wed, 31 Jul 2024 12:56:54 +0200 Subject: [PATCH 562/598] fix: invalid mime crash on REST plugin (#1287) Signed-off-by: Gabriele Baldoni --- plugins/zenoh-plugin-rest/src/lib.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 898e9ae2df..8affec9067 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -127,11 +127,7 @@ async fn to_json(results: flume::Receiver) -> String { } async fn to_json_response(results: flume::Receiver) -> Response { - response( - StatusCode::Ok, - Mime::from_str("application/json").unwrap(), - &to_json(results).await, - ) + response(StatusCode::Ok, "application/json", &to_json(results).await) } fn sample_to_html(sample: &Sample) -> String { @@ -203,12 +199,17 @@ fn method_to_kind(method: Method) -> SampleKind { } } -fn response(status: StatusCode, content_type: impl TryInto, body: &str) -> Response { +fn response<'a, S: Into<&'a str> + std::fmt::Debug>( + status: StatusCode, + content_type: S, + body: &str, +) -> Response { + tracing::trace!("Outgoing Response: {status} - {content_type:?} - body: {body}"); let mut builder = Response::builder(status) .header("content-length", body.len().to_string()) .header("Access-Control-Allow-Origin", "*") .body(body); - if let Ok(mime) = content_type.try_into() { + if let Ok(mime) = Mime::from_str(content_type.into()) { builder = builder.content_type(mime); } builder.build() From 5cfcccc69ce7df3030af6b818356fa1c69b24763 Mon Sep 17 00:00:00 2001 From: yellowhatter <104833606+yellowhatter@users.noreply.github.com> Date: Wed, 31 Jul 2024 16:46:17 +0300 Subject: [PATCH 563/598] Shm segment cleanup (#1281) * Fix SHM cleanup at exit * fix tests * change cleanup to be more robust * can't use this because of typos checks that fire on external crate imports * fix docs * remove unsafe --- Cargo.lock | 65 ++++++++++++++++++- Cargo.toml | 1 + commons/zenoh-shm/Cargo.toml | 2 +- .../zenoh-shm/src/api/client_storage/mod.rs | 22 +++---- .../src/api/provider/shm_provider.rs | 10 +-- commons/zenoh-shm/src/cleanup.rs | 47 ++++++++++++++ .../src/header/allocated_descriptor.rs | 4 +- commons/zenoh-shm/src/header/storage.rs | 7 +- commons/zenoh-shm/src/header/subscription.rs | 8 +-- commons/zenoh-shm/src/lib.rs | 1 + commons/zenoh-shm/src/posix_shm/segment.rs | 18 +++-- commons/zenoh-shm/src/reader.rs | 6 +- .../src/watchdog/allocated_watchdog.rs | 4 +- commons/zenoh-shm/src/watchdog/confirmator.rs | 8 +-- .../zenoh-shm/src/watchdog/periodic_task.rs | 2 +- commons/zenoh-shm/src/watchdog/storage.rs | 7 +- commons/zenoh-shm/src/watchdog/validator.rs | 9 ++- commons/zenoh-shm/tests/header.rs | 14 ++-- commons/zenoh-shm/tests/watchdog.rs | 25 ++++--- io/zenoh-transport/src/manager.rs | 2 +- 20 files changed, 193 insertions(+), 69 deletions(-) create mode 100644 commons/zenoh-shm/src/cleanup.rs diff --git a/Cargo.lock b/Cargo.lock index 24a39b3d63..365a07a00d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -691,6 +691,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chrono" version = "0.4.30" @@ -2091,7 +2097,7 @@ dependencies = [ "memchr", "num-cmp", "once_cell", - "parking_lot", + "parking_lot 0.12.1", "percent-encoding", "regex", "serde", @@ -2650,6 +2656,17 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" +[[package]] +name = "parking_lot" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core 0.8.6", +] + [[package]] name = "parking_lot" version = "0.12.1" @@ -2657,7 +2674,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core", + "parking_lot_core 0.9.8", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +dependencies = [ + "cfg-if 1.0.0", + "instant", + "libc", + "redox_syscall 0.2.16", + "smallvec", + "winapi", ] [[package]] @@ -4056,6 +4087,34 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "static_init" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a2a1c578e98c1c16fc3b8ec1328f7659a500737d7a0c6d625e73e830ff9c1f6" +dependencies = [ + "bitflags 1.3.2", + "cfg_aliases", + "libc", + "parking_lot 0.11.2", + "parking_lot_core 0.8.6", + "static_init_macro", + "winapi", +] + +[[package]] +name = "static_init_macro" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf" +dependencies = [ + "cfg_aliases", + "memchr", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "stdweb" version = "0.4.20" @@ -6002,7 +6061,6 @@ dependencies = [ "async-trait", "bincode", "crc", - "lazy_static", "libc", "lockfree", "num-traits", @@ -6011,6 +6069,7 @@ dependencies = [ "serde", "shared_memory", "stabby", + "static_init", "thread-priority", "tokio", "tracing", diff --git a/Cargo.toml b/Cargo.toml index b686656f77..37e6acd046 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -157,6 +157,7 @@ serde_cbor = "0.11.2" serde_json = "1.0.114" serde-pickle = "1.1.1" serde_yaml = "0.9.19" +static_init = "1.0.3" stabby = "5.0.1" sha3 = "0.10.6" shared_memory = "0.12.4" diff --git a/commons/zenoh-shm/Cargo.toml b/commons/zenoh-shm/Cargo.toml index 5e3dec390e..e5eb204a23 100644 --- a/commons/zenoh-shm/Cargo.toml +++ b/commons/zenoh-shm/Cargo.toml @@ -44,7 +44,7 @@ zenoh-core = { workspace = true } zenoh-macros = { workspace = true } zenoh-buffers = { workspace = true } rand = { workspace = true } -lazy_static = { workspace = true } +static_init = { workspace = true } num-traits = { workspace = true } num_cpus = { workspace = true, optional = true } thread-priority = { workspace = true } diff --git a/commons/zenoh-shm/src/api/client_storage/mod.rs b/commons/zenoh-shm/src/api/client_storage/mod.rs index 205bc3a9dc..07b4cd88bf 100644 --- a/commons/zenoh-shm/src/api/client_storage/mod.rs +++ b/commons/zenoh-shm/src/api/client_storage/mod.rs @@ -17,7 +17,7 @@ use std::{ sync::{Arc, RwLock}, }; -use lazy_static::lazy_static; +use static_init::dynamic; use zenoh_result::{bail, ZResult}; use crate::{ @@ -31,17 +31,15 @@ use crate::{ reader::{ClientStorage, GlobalDataSegmentID}, }; -lazy_static! { - /// A global lazily-initialized SHM client storage. - /// When initialized, contains default client set, - /// see ShmClientStorage::with_default_client_set - #[zenoh_macros::unstable_doc] - pub static ref GLOBAL_CLIENT_STORAGE: Arc = Arc::new( - ShmClientStorage::builder() - .with_default_client_set() - .build() - ); -} +#[dynamic(lazy, drop)] +/// A global lazily-initialized SHM client storage. When initialized, +/// contains default client set, see [with_default_client_set](ShmClientStorage::with_default_client_set) +#[zenoh_macros::unstable_doc] +pub static mut GLOBAL_CLIENT_STORAGE: Arc = Arc::new( + ShmClientStorage::builder() + .with_default_client_set() + .build(), +); /// Builder to create new client storages #[zenoh_macros::unstable_doc] diff --git a/commons/zenoh-shm/src/api/provider/shm_provider.rs b/commons/zenoh-shm/src/api/provider/shm_provider.rs index 1487a1ee18..bab1588e0c 100644 --- a/commons/zenoh-shm/src/api/provider/shm_provider.rs +++ b/commons/zenoh-shm/src/api/provider/shm_provider.rs @@ -904,13 +904,15 @@ where ConfirmedDescriptor, )> { // allocate shared header - let allocated_header = GLOBAL_HEADER_STORAGE.allocate_header()?; + let allocated_header = GLOBAL_HEADER_STORAGE.read().allocate_header()?; // allocate watchdog - let allocated_watchdog = GLOBAL_STORAGE.allocate_watchdog()?; + let allocated_watchdog = GLOBAL_STORAGE.read().allocate_watchdog()?; // add watchdog to confirmator - let confirmed_watchdog = GLOBAL_CONFIRMATOR.add_owned(&allocated_watchdog.descriptor)?; + let confirmed_watchdog = GLOBAL_CONFIRMATOR + .read() + .add_owned(&allocated_watchdog.descriptor)?; Ok((allocated_header, allocated_watchdog, confirmed_watchdog)) } @@ -928,7 +930,7 @@ where // add watchdog to validator let c_header = header.clone(); - GLOBAL_VALIDATOR.add( + GLOBAL_VALIDATOR.read().add( allocated_watchdog.descriptor.clone(), Box::new(move || { c_header diff --git a/commons/zenoh-shm/src/cleanup.rs b/commons/zenoh-shm/src/cleanup.rs new file mode 100644 index 0000000000..5649732bf6 --- /dev/null +++ b/commons/zenoh-shm/src/cleanup.rs @@ -0,0 +1,47 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use static_init::dynamic; + +/// A global cleanup, that is guaranteed to be dropped at normal program exit and that will +/// execute all registered cleanup routines at this moment +#[dynamic(lazy, drop)] +pub(crate) static mut CLEANUP: Cleanup = Cleanup::new(); + +/// An RAII object that calls all registered routines upon destruction +pub(crate) struct Cleanup { + cleanups: lockfree::queue::Queue>>, +} + +impl Cleanup { + fn new() -> Self { + Self { + cleanups: Default::default(), + } + } + + pub(crate) fn register_cleanup(&self, cleanup_fn: Box) { + self.cleanups.push(Some(cleanup_fn)); + } +} + +impl Drop for Cleanup { + fn drop(&mut self) { + while let Some(cleanup) = self.cleanups.pop() { + if let Some(f) = cleanup { + f(); + } + } + } +} diff --git a/commons/zenoh-shm/src/header/allocated_descriptor.rs b/commons/zenoh-shm/src/header/allocated_descriptor.rs index f800683595..6cf1d1d011 100644 --- a/commons/zenoh-shm/src/header/allocated_descriptor.rs +++ b/commons/zenoh-shm/src/header/allocated_descriptor.rs @@ -21,6 +21,8 @@ pub struct AllocatedHeaderDescriptor { impl Drop for AllocatedHeaderDescriptor { fn drop(&mut self) { - GLOBAL_HEADER_STORAGE.reclaim_header(self.descriptor.clone()); + GLOBAL_HEADER_STORAGE + .read() + .reclaim_header(self.descriptor.clone()); } } diff --git a/commons/zenoh-shm/src/header/storage.rs b/commons/zenoh-shm/src/header/storage.rs index 36e004511a..7d4c06cd2a 100644 --- a/commons/zenoh-shm/src/header/storage.rs +++ b/commons/zenoh-shm/src/header/storage.rs @@ -16,7 +16,7 @@ use std::{ sync::{Arc, Mutex}, }; -use lazy_static::lazy_static; +use static_init::dynamic; use zenoh_result::{zerror, ZResult}; use super::{ @@ -25,9 +25,8 @@ use super::{ segment::HeaderSegment, }; -lazy_static! { - pub static ref GLOBAL_HEADER_STORAGE: HeaderStorage = HeaderStorage::new(32768usize).unwrap(); -} +#[dynamic(lazy,drop)] +pub static mut GLOBAL_HEADER_STORAGE: HeaderStorage = HeaderStorage::new(32768usize).unwrap(); pub struct HeaderStorage { available: Arc>>, diff --git a/commons/zenoh-shm/src/header/subscription.rs b/commons/zenoh-shm/src/header/subscription.rs index 5efe54a7f7..6259877302 100644 --- a/commons/zenoh-shm/src/header/subscription.rs +++ b/commons/zenoh-shm/src/header/subscription.rs @@ -16,7 +16,7 @@ use std::{ sync::{Arc, Mutex}, }; -use lazy_static::lazy_static; +use static_init::dynamic; use zenoh_result::{zerror, ZResult}; use super::{ @@ -24,9 +24,9 @@ use super::{ segment::HeaderSegment, }; -lazy_static! { - pub static ref GLOBAL_HEADER_SUBSCRIPTION: Subscription = Subscription::new(); -} +#[dynamic(lazy,drop)] + pub static mut GLOBAL_HEADER_SUBSCRIPTION: Subscription = Subscription::new(); + pub struct Subscription { linked_table: Mutex>>, diff --git a/commons/zenoh-shm/src/lib.rs b/commons/zenoh-shm/src/lib.rs index 8ec2458931..2d8173c2f9 100644 --- a/commons/zenoh-shm/src/lib.rs +++ b/commons/zenoh-shm/src/lib.rs @@ -46,6 +46,7 @@ macro_rules! tested_crate_module { } pub mod api; +mod cleanup; pub mod header; pub mod posix_shm; pub mod reader; diff --git a/commons/zenoh-shm/src/posix_shm/segment.rs b/commons/zenoh-shm/src/posix_shm/segment.rs index 657976ece1..6a34506029 100644 --- a/commons/zenoh-shm/src/posix_shm/segment.rs +++ b/commons/zenoh-shm/src/posix_shm/segment.rs @@ -18,6 +18,8 @@ use rand::Rng; use shared_memory::{Shmem, ShmemConf, ShmemError}; use zenoh_result::{bail, zerror, ZResult}; +use crate::cleanup::CLEANUP; + const SEGMENT_DEDICATE_TRIES: usize = 100; const ECMA: crc::Crc = crc::Crc::::new(&crc::CRC_64_ECMA_182); @@ -55,15 +57,21 @@ where for _ in 0..SEGMENT_DEDICATE_TRIES { // Generate random id let id: ID = rand::thread_rng().gen(); + let os_id = Self::os_id(id.clone(), id_prefix); + + // Register cleanup routine to make sure Segment will be unlinked on exit + let c_os_id = os_id.clone(); + CLEANUP.read().register_cleanup(Box::new(move || { + if let Ok(mut shmem) = ShmemConf::new().os_id(c_os_id).open() { + shmem.set_owner(true); + drop(shmem); + } + })); // Try to create a new segment identified by prefix and generated id. // If creation fails because segment already exists for this id, // the creation attempt will be repeated with another id - match ShmemConf::new() - .size(alloc_size) - .os_id(Self::os_id(id.clone(), id_prefix)) - .create() - { + match ShmemConf::new().size(alloc_size).os_id(os_id).create() { Ok(shmem) => { tracing::debug!( "Created SHM segment, size: {alloc_size}, prefix: {id_prefix}, id: {id}" diff --git a/commons/zenoh-shm/src/reader.rs b/commons/zenoh-shm/src/reader.rs index 1298c38aff..a62e8a147f 100644 --- a/commons/zenoh-shm/src/reader.rs +++ b/commons/zenoh-shm/src/reader.rs @@ -51,11 +51,13 @@ impl ShmReader { // that the sender of this buffer has incremented it for us. // attach to the watchdog before doing other things - let watchdog = Arc::new(GLOBAL_CONFIRMATOR.add(&info.watchdog_descriptor)?); + let watchdog = Arc::new(GLOBAL_CONFIRMATOR.read().add(&info.watchdog_descriptor)?); let segment = self.ensure_segment(info)?; let shmb = ShmBufInner { - header: GLOBAL_HEADER_SUBSCRIPTION.link(&info.header_descriptor)?, + header: GLOBAL_HEADER_SUBSCRIPTION + .read() + .link(&info.header_descriptor)?, buf: segment.map(info.data_descriptor.chunk)?, info: info.clone(), watchdog, diff --git a/commons/zenoh-shm/src/watchdog/allocated_watchdog.rs b/commons/zenoh-shm/src/watchdog/allocated_watchdog.rs index 45917d5bdc..6293b157d3 100644 --- a/commons/zenoh-shm/src/watchdog/allocated_watchdog.rs +++ b/commons/zenoh-shm/src/watchdog/allocated_watchdog.rs @@ -29,7 +29,7 @@ impl AllocatedWatchdog { impl Drop for AllocatedWatchdog { fn drop(&mut self) { - GLOBAL_VALIDATOR.remove(self.descriptor.clone()); - GLOBAL_STORAGE.free_watchdog(self.descriptor.clone()); + GLOBAL_VALIDATOR.read().remove(self.descriptor.clone()); + GLOBAL_STORAGE.read().free_watchdog(self.descriptor.clone()); } } diff --git a/commons/zenoh-shm/src/watchdog/confirmator.rs b/commons/zenoh-shm/src/watchdog/confirmator.rs index b84a76dc50..9d87adfb97 100644 --- a/commons/zenoh-shm/src/watchdog/confirmator.rs +++ b/commons/zenoh-shm/src/watchdog/confirmator.rs @@ -18,7 +18,7 @@ use std::{ time::Duration, }; -use lazy_static::lazy_static; +use static_init::dynamic; use zenoh_result::{zerror, ZResult}; use super::{ @@ -27,10 +27,10 @@ use super::{ segment::Segment, }; -lazy_static! { - pub static ref GLOBAL_CONFIRMATOR: WatchdogConfirmator = +#[dynamic(lazy,drop)] +pub static mut GLOBAL_CONFIRMATOR: WatchdogConfirmator = WatchdogConfirmator::new(Duration::from_millis(50)); -} + pub struct ConfirmedDescriptor { pub owned: OwnedDescriptor, diff --git a/commons/zenoh-shm/src/watchdog/periodic_task.rs b/commons/zenoh-shm/src/watchdog/periodic_task.rs index a41f601cfe..f68203df54 100644 --- a/commons/zenoh-shm/src/watchdog/periodic_task.rs +++ b/commons/zenoh-shm/src/watchdog/periodic_task.rs @@ -33,7 +33,7 @@ pub struct PeriodicTask { impl Drop for PeriodicTask { fn drop(&mut self) { - self.running.store(false, Ordering::Relaxed) + self.running.store(false, Ordering::Relaxed); } } diff --git a/commons/zenoh-shm/src/watchdog/storage.rs b/commons/zenoh-shm/src/watchdog/storage.rs index 1b04ad313c..48fa4cde40 100644 --- a/commons/zenoh-shm/src/watchdog/storage.rs +++ b/commons/zenoh-shm/src/watchdog/storage.rs @@ -16,14 +16,13 @@ use std::{ sync::{Arc, Mutex}, }; -use lazy_static::lazy_static; +use static_init::dynamic; use zenoh_result::{zerror, ZResult}; use super::{allocated_watchdog::AllocatedWatchdog, descriptor::OwnedDescriptor, segment::Segment}; -lazy_static! { - pub static ref GLOBAL_STORAGE: WatchdogStorage = WatchdogStorage::new(32768usize).unwrap(); -} +#[dynamic(lazy,drop)] +pub static mut GLOBAL_STORAGE: WatchdogStorage = WatchdogStorage::new(32768usize).unwrap(); pub struct WatchdogStorage { available: Arc>>, diff --git a/commons/zenoh-shm/src/watchdog/validator.rs b/commons/zenoh-shm/src/watchdog/validator.rs index d28dfa8e3c..5becefb547 100644 --- a/commons/zenoh-shm/src/watchdog/validator.rs +++ b/commons/zenoh-shm/src/watchdog/validator.rs @@ -14,16 +14,15 @@ use std::{collections::BTreeMap, sync::Arc, time::Duration}; -use lazy_static::lazy_static; +use static_init::dynamic; use super::{descriptor::OwnedDescriptor, periodic_task::PeriodicTask}; pub(super) type InvalidateCallback = Box; -lazy_static! { - pub static ref GLOBAL_VALIDATOR: WatchdogValidator = - WatchdogValidator::new(Duration::from_millis(100)); -} +#[dynamic(lazy, drop)] +pub static mut GLOBAL_VALIDATOR: WatchdogValidator = + WatchdogValidator::new(Duration::from_millis(100)); enum Transaction { Add(InvalidateCallback), diff --git a/commons/zenoh-shm/tests/header.rs b/commons/zenoh-shm/tests/header.rs index f417f0b86e..747757a3b2 100644 --- a/commons/zenoh-shm/tests/header.rs +++ b/commons/zenoh-shm/tests/header.rs @@ -26,7 +26,7 @@ use common::execute_concurrent; fn header_alloc_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { |_task_index: usize, _iteration: usize| -> ZResult<()> { - let _allocated_header = GLOBAL_HEADER_STORAGE.allocate_header()?; + let _allocated_header = GLOBAL_HEADER_STORAGE.read().allocate_header()?; Ok(()) } } @@ -43,9 +43,9 @@ fn header_alloc_concurrent() { fn header_link_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { |_task_index: usize, _iteration: usize| { - let allocated_header = GLOBAL_HEADER_STORAGE.allocate_header()?; + let allocated_header = GLOBAL_HEADER_STORAGE.read().allocate_header()?; let descr = HeaderDescriptor::from(&allocated_header.descriptor); - let _linked_header = GLOBAL_HEADER_SUBSCRIPTION.link(&descr)?; + let _linked_header = GLOBAL_HEADER_SUBSCRIPTION.read().link(&descr)?; Ok(()) } } @@ -63,7 +63,7 @@ fn header_link_concurrent() { fn header_link_failure_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { |_task_index: usize, _iteration: usize| { - let allocated_header = GLOBAL_HEADER_STORAGE.allocate_header()?; + let allocated_header = GLOBAL_HEADER_STORAGE.read().allocate_header()?; let descr = HeaderDescriptor::from(&allocated_header.descriptor); drop(allocated_header); @@ -73,7 +73,7 @@ fn header_link_failure_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Se // functionality is implemented on higher level by means of generation mechanism and protects from both header // and watchdog link-to-deallocated issues. This generation mechanism depends on the behaviour below, so // everything is fair :) - let _linked_header = GLOBAL_HEADER_SUBSCRIPTION.link(&descr)?; + let _linked_header = GLOBAL_HEADER_SUBSCRIPTION.read().link(&descr)?; Ok(()) } } @@ -90,9 +90,9 @@ fn header_link_failure_concurrent() { fn header_check_memory_fn(parallel_tasks: usize, iterations: usize) { let task_fun = |_task_index: usize, _iteration: usize| -> ZResult<()> { - let allocated_header = GLOBAL_HEADER_STORAGE.allocate_header()?; + let allocated_header = GLOBAL_HEADER_STORAGE.read().allocate_header()?; let descr = HeaderDescriptor::from(&allocated_header.descriptor); - let linked_header = GLOBAL_HEADER_SUBSCRIPTION.link(&descr)?; + let linked_header = GLOBAL_HEADER_SUBSCRIPTION.read().link(&descr)?; let mut rng = rand::thread_rng(); let allocated = allocated_header.descriptor.header(); diff --git a/commons/zenoh-shm/tests/watchdog.rs b/commons/zenoh-shm/tests/watchdog.rs index fbdf672f87..bc4a75dfa9 100644 --- a/commons/zenoh-shm/tests/watchdog.rs +++ b/commons/zenoh-shm/tests/watchdog.rs @@ -30,7 +30,7 @@ const CONFIRMATION_PERIOD: Duration = Duration::from_millis(50); fn watchdog_alloc_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { |_task_index: usize, _iteration: usize| -> ZResult<()> { - let _allocated = GLOBAL_STORAGE.allocate_watchdog()?; + let _allocated = GLOBAL_STORAGE.read().allocate_watchdog()?; Ok(()) } } @@ -47,8 +47,8 @@ fn watchdog_alloc_concurrent() { fn watchdog_confirmed_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { |_task_index: usize, _iteration: usize| -> ZResult<()> { - let allocated = GLOBAL_STORAGE.allocate_watchdog()?; - let confirmed = GLOBAL_CONFIRMATOR.add_owned(&allocated.descriptor)?; + let allocated = GLOBAL_STORAGE.read().allocate_watchdog()?; + let confirmed = GLOBAL_CONFIRMATOR.read().add_owned(&allocated.descriptor)?; // check that the confirmed watchdog stays valid for i in 0..10 { @@ -81,9 +81,11 @@ fn watchdog_confirmed_concurrent() { #[ignore] fn watchdog_confirmed_dangling() { let allocated = GLOBAL_STORAGE + .read() .allocate_watchdog() .expect("error allocating watchdog!"); let confirmed = GLOBAL_CONFIRMATOR + .read() .add_owned(&allocated.descriptor) .expect("error adding watchdog to confirmator!"); drop(allocated); @@ -97,13 +99,13 @@ fn watchdog_confirmed_dangling() { fn watchdog_validated_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { |_task_index: usize, _iteration: usize| -> ZResult<()> { - let allocated = GLOBAL_STORAGE.allocate_watchdog()?; - let confirmed = GLOBAL_CONFIRMATOR.add_owned(&allocated.descriptor)?; + let allocated = GLOBAL_STORAGE.read().allocate_watchdog()?; + let confirmed = GLOBAL_CONFIRMATOR.read().add_owned(&allocated.descriptor)?; let valid = Arc::new(AtomicBool::new(true)); { let c_valid = valid.clone(); - GLOBAL_VALIDATOR.add( + GLOBAL_VALIDATOR.read().add( allocated.descriptor.clone(), Box::new(move || { c_valid.store(false, std::sync::atomic::Ordering::SeqCst); @@ -150,13 +152,14 @@ fn watchdog_validated_invalid_without_confirmator_fn( ) -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { |_task_index: usize, _iteration: usize| -> ZResult<()> { let allocated = GLOBAL_STORAGE + .read() .allocate_watchdog() .expect("error allocating watchdog!"); let valid = Arc::new(AtomicBool::new(true)); { let c_valid = valid.clone(); - GLOBAL_VALIDATOR.add( + GLOBAL_VALIDATOR.read().add( allocated.descriptor.clone(), Box::new(move || { c_valid.store(false, std::sync::atomic::Ordering::SeqCst); @@ -193,16 +196,18 @@ fn watchdog_validated_additional_confirmation_fn( ) -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { |_task_index: usize, _iteration: usize| -> ZResult<()> { let allocated = GLOBAL_STORAGE + .read() .allocate_watchdog() .expect("error allocating watchdog!"); let confirmed = GLOBAL_CONFIRMATOR + .read() .add_owned(&allocated.descriptor) .expect("error adding watchdog to confirmator!"); let allow_invalid = Arc::new(AtomicBool::new(false)); { let c_allow_invalid = allow_invalid.clone(); - GLOBAL_VALIDATOR.add( + GLOBAL_VALIDATOR.read().add( allocated.descriptor.clone(), Box::new(move || { assert!(c_allow_invalid.load(std::sync::atomic::Ordering::SeqCst)); @@ -252,16 +257,18 @@ fn watchdog_validated_overloaded_system_fn( ) -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { |_task_index: usize, _iteration: usize| -> ZResult<()> { let allocated = GLOBAL_STORAGE + .read() .allocate_watchdog() .expect("error allocating watchdog!"); let confirmed = GLOBAL_CONFIRMATOR + .read() .add_owned(&allocated.descriptor) .expect("error adding watchdog to confirmator!"); let allow_invalid = Arc::new(AtomicBool::new(false)); { let c_allow_invalid = allow_invalid.clone(); - GLOBAL_VALIDATOR.add( + GLOBAL_VALIDATOR.read().add( allocated.descriptor.clone(), Box::new(move || { assert!(c_allow_invalid.load(std::sync::atomic::Ordering::SeqCst)); diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index 9df02dfc67..669744838f 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -276,7 +276,7 @@ impl TransportManagerBuilder { #[cfg(feature = "shared-memory")] let shm_reader = self .shm_reader - .unwrap_or_else(|| ShmReader::new(GLOBAL_CLIENT_STORAGE.clone())); + .unwrap_or_else(|| ShmReader::new((*GLOBAL_CLIENT_STORAGE.read()).clone())); let unicast = self.unicast.build( &mut prng, From bcda8ecbf83b2a6445ae7a505d436b03a952b842 Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Wed, 31 Jul 2024 15:46:35 +0200 Subject: [PATCH 564/598] Fix failing Liveliness Subscriber Undeclaration (#1283) * Undeclare subscribers at the end of liveliness tests * Use `kind` in `Seesion::undeclare_subscriber_inner` * Address review comments --- zenoh-ext/tests/liveliness.rs | 48 +++++++++++++++++++++++++++-------- zenoh/src/api/session.rs | 5 +--- zenoh/tests/liveliness.rs | 44 ++++++++++++++++++++++++++------ 3 files changed, 75 insertions(+), 22 deletions(-) diff --git a/zenoh-ext/tests/liveliness.rs b/zenoh-ext/tests/liveliness.rs index 97dc817394..68d4b1b798 100644 --- a/zenoh-ext/tests/liveliness.rs +++ b/zenoh-ext/tests/liveliness.rs @@ -70,7 +70,7 @@ async fn test_liveliness_querying_subscriber_clique() { .unwrap(); tokio::time::sleep(SLEEP).await; - let _token2 = ztimeout!(peer2.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); + let token2 = ztimeout!(peer2.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); tokio::time::sleep(SLEEP).await; let sample = ztimeout!(sub.recv_async()).unwrap(); @@ -81,12 +81,18 @@ async fn test_liveliness_querying_subscriber_clique() { assert_eq!(sample.kind(), SampleKind::Put); assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_2); - drop(token1); + token1.undeclare().await.unwrap(); tokio::time::sleep(SLEEP).await; let sample = ztimeout!(sub.recv_async()).unwrap(); assert_eq!(sample.kind(), SampleKind::Delete); assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + token2.undeclare().await.unwrap(); + sub.close().await.unwrap(); + + peer1.close().await.unwrap(); + peer2.close().await.unwrap(); } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] @@ -106,7 +112,7 @@ async fn test_liveliness_querying_subscriber_brokered() { zenoh_util::try_init_log_from_env(); - let _router = { + let router = { let mut c = config::default(); c.listen .endpoints @@ -168,7 +174,7 @@ async fn test_liveliness_querying_subscriber_brokered() { .unwrap(); tokio::time::sleep(SLEEP).await; - let _token2 = ztimeout!(client3.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); + let token2 = ztimeout!(client3.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); tokio::time::sleep(SLEEP).await; let sample = ztimeout!(sub.recv_async()).unwrap(); @@ -179,12 +185,20 @@ async fn test_liveliness_querying_subscriber_brokered() { assert_eq!(sample.kind(), SampleKind::Put); assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_2); - drop(token1); + token1.undeclare().await.unwrap(); tokio::time::sleep(SLEEP).await; let sample = ztimeout!(sub.recv_async()).unwrap(); assert_eq!(sample.kind(), SampleKind::Delete); assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + token2.undeclare().await.unwrap(); + sub.close().await.unwrap(); + + router.close().await.unwrap(); + client1.close().await.unwrap(); + client2.close().await.unwrap(); + client3.close().await.unwrap(); } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] @@ -244,7 +258,7 @@ async fn test_liveliness_fetching_subscriber_clique() { .unwrap(); tokio::time::sleep(SLEEP).await; - let _token2 = ztimeout!(peer2.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); + let token2 = ztimeout!(peer2.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); tokio::time::sleep(SLEEP).await; let sample = ztimeout!(sub.recv_async()).unwrap(); @@ -255,12 +269,18 @@ async fn test_liveliness_fetching_subscriber_clique() { assert_eq!(sample.kind(), SampleKind::Put); assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_2); - drop(token1); + token1.undeclare().await.unwrap(); tokio::time::sleep(SLEEP).await; let sample = ztimeout!(sub.recv_async()).unwrap(); assert_eq!(sample.kind(), SampleKind::Delete); assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + token2.undeclare().await.unwrap(); + sub.close().await.unwrap(); + + peer1.close().await.unwrap(); + peer2.close().await.unwrap(); } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] @@ -280,7 +300,7 @@ async fn test_liveliness_fetching_subscriber_brokered() { zenoh_util::try_init_log_from_env(); - let _router = { + let router = { let mut c = config::default(); c.listen .endpoints @@ -346,7 +366,7 @@ async fn test_liveliness_fetching_subscriber_brokered() { .unwrap(); tokio::time::sleep(SLEEP).await; - let _token2 = ztimeout!(client3.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); + let token2 = ztimeout!(client3.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); tokio::time::sleep(SLEEP).await; let sample = ztimeout!(sub.recv_async()).unwrap(); @@ -357,10 +377,18 @@ async fn test_liveliness_fetching_subscriber_brokered() { assert_eq!(sample.kind(), SampleKind::Put); assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_2); - drop(token1); + token1.undeclare().await.unwrap(); tokio::time::sleep(SLEEP).await; let sample = ztimeout!(sub.recv_async()).unwrap(); assert_eq!(sample.kind(), SampleKind::Delete); assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + token2.undeclare().await.unwrap(); + sub.close().await.unwrap(); + + router.close().await.unwrap(); + client1.close().await.unwrap(); + client2.close().await.unwrap(); + client3.close().await.unwrap(); } diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 4ca924e023..06f44b8bf5 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -1238,10 +1238,7 @@ impl Session { pub(crate) fn undeclare_subscriber_inner(&self, sid: Id, kind: SubscriberKind) -> ZResult<()> { let mut state = zwrite!(self.state); - if let Some(sub_state) = state - .subscribers_mut(SubscriberKind::Subscriber) - .remove(&sid) - { + if let Some(sub_state) = state.subscribers_mut(kind).remove(&sid) { trace!("undeclare_subscriber({:?})", sub_state); for res in state .local_resources diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 72dab9bd29..4d964cc1cf 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -65,12 +65,17 @@ async fn test_liveliness_subscriber_clique() { assert!(sample.kind() == SampleKind::Put); assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); - drop(token); + token.undeclare().await.unwrap(); tokio::time::sleep(SLEEP).await; let sample = ztimeout!(sub.recv_async()).unwrap(); assert!(sample.kind() == SampleKind::Delete); assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); + + sub.undeclare().await.unwrap(); + + peer1.close().await.unwrap(); + peer2.close().await.unwrap(); } #[cfg(feature = "unstable")] @@ -114,7 +119,7 @@ async fn test_liveliness_query_clique() { s }; - let _token = ztimeout!(peer1.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); + let token = ztimeout!(peer1.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); tokio::time::sleep(SLEEP).await; let get = ztimeout!(peer2.liveliness().get(LIVELINESS_KEYEXPR)).unwrap(); @@ -123,6 +128,11 @@ async fn test_liveliness_query_clique() { let sample = ztimeout!(get.recv_async()).unwrap().into_result().unwrap(); assert!(sample.kind() == SampleKind::Put); assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); + + token.undeclare().await.unwrap(); + + peer1.close().await.unwrap(); + peer2.close().await.unwrap(); } #[cfg(feature = "unstable")] @@ -141,7 +151,7 @@ async fn test_liveliness_subscriber_brokered() { zenoh_util::try_init_log_from_env(); - let _router = { + let router = { let mut c = config::default(); c.listen .endpoints @@ -190,12 +200,18 @@ async fn test_liveliness_subscriber_brokered() { assert!(sample.kind() == SampleKind::Put); assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); - drop(token); + token.undeclare().await.unwrap(); tokio::time::sleep(SLEEP).await; let sample = ztimeout!(sub.recv_async()).unwrap(); assert!(sample.kind() == SampleKind::Delete); assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); + + sub.undeclare().await.unwrap(); + + router.close().await.unwrap(); + client1.close().await.unwrap(); + client2.close().await.unwrap(); } #[cfg(feature = "unstable")] @@ -213,7 +229,7 @@ async fn test_liveliness_query_brokered() { zenoh_util::try_init_log_from_env(); - let _router = { + let router = { let mut c = config::default(); c.listen .endpoints @@ -252,7 +268,7 @@ async fn test_liveliness_query_brokered() { s }; - let _token = ztimeout!(client1.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); + let token = ztimeout!(client1.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); tokio::time::sleep(SLEEP).await; let get = ztimeout!(client2.liveliness().get(LIVELINESS_KEYEXPR)).unwrap(); @@ -261,6 +277,12 @@ async fn test_liveliness_query_brokered() { let sample = ztimeout!(get.recv_async()).unwrap().into_result().unwrap(); assert!(sample.kind() == SampleKind::Put); assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); + + token.undeclare().await.unwrap(); + + router.close().await.unwrap(); + client1.close().await.unwrap(); + client2.close().await.unwrap(); } #[cfg(feature = "unstable")] @@ -295,12 +317,15 @@ async fn test_liveliness_subscriber_local() { assert!(sample.kind() == SampleKind::Put); assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); - drop(token); + token.undeclare().await.unwrap(); tokio::time::sleep(SLEEP).await; let sample = ztimeout!(sub.recv_async()).unwrap(); assert!(sample.kind() == SampleKind::Delete); assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); + + sub.undeclare().await.unwrap(); + peer.close().await.unwrap(); } #[cfg(feature = "unstable")] @@ -325,7 +350,7 @@ async fn test_liveliness_query_local() { s }; - let _token = ztimeout!(peer.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); + let token = ztimeout!(peer.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); tokio::time::sleep(SLEEP).await; let get = ztimeout!(peer.liveliness().get(LIVELINESS_KEYEXPR)).unwrap(); @@ -334,4 +359,7 @@ async fn test_liveliness_query_local() { let sample = ztimeout!(get.recv_async()).unwrap().into_result().unwrap(); assert!(sample.kind() == SampleKind::Put); assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); + + token.undeclare().await.unwrap(); + peer.close().await.unwrap(); } From f394867423cb03e9fa423e6ebb63e3ad0e48f4a1 Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Wed, 31 Jul 2024 17:15:23 +0200 Subject: [PATCH 565/598] Rename `from_boxed_string_unchecked` to `from_boxed_str_unchecked` (#1288) --- commons/zenoh-keyexpr/src/key_expr/owned.rs | 4 ++-- zenoh/src/api/key_expr.rs | 8 ++++---- zenoh/src/api/session.rs | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/commons/zenoh-keyexpr/src/key_expr/owned.rs b/commons/zenoh-keyexpr/src/key_expr/owned.rs index 6089df2a1e..517de97658 100644 --- a/commons/zenoh-keyexpr/src/key_expr/owned.rs +++ b/commons/zenoh-keyexpr/src/key_expr/owned.rs @@ -71,13 +71,13 @@ impl OwnedKeyExpr { /// Key Expressions must follow some rules to be accepted by a Zenoh network. /// Messages addressed with invalid key expressions will be dropped. pub unsafe fn from_string_unchecked(s: String) -> Self { - Self::from_boxed_string_unchecked(s.into_boxed_str()) + Self::from_boxed_str_unchecked(s.into_boxed_str()) } /// Constructs an OwnedKeyExpr without checking [`keyexpr`]'s invariants /// # Safety /// Key Expressions must follow some rules to be accepted by a Zenoh network. /// Messages addressed with invalid key expressions will be dropped. - pub unsafe fn from_boxed_string_unchecked(s: Box) -> Self { + pub unsafe fn from_boxed_str_unchecked(s: Box) -> Self { OwnedKeyExpr(s.into()) } } diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 81d9aecc20..dbd44a85c6 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -81,10 +81,10 @@ impl KeyExpr<'static> { /// # Safety /// Key Expressions must follow some rules to be accepted by a Zenoh network. /// Messages addressed with invalid key expressions will be dropped. - pub unsafe fn from_boxed_string_unchecked(s: Box) -> Self { - Self(KeyExprInner::Owned( - OwnedKeyExpr::from_boxed_string_unchecked(s), - )) + pub unsafe fn from_boxed_str_unchecked(s: Box) -> Self { + Self(KeyExprInner::Owned(OwnedKeyExpr::from_boxed_str_unchecked( + s, + ))) } } impl<'a> KeyExpr<'a> { diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 06f44b8bf5..ed1c75d3f2 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -337,7 +337,7 @@ pub(crate) enum Resource { impl Resource { pub(crate) fn new(name: Box) -> Self { if keyexpr::new(name.as_ref()).is_ok() { - Self::for_keyexpr(unsafe { OwnedKeyExpr::from_boxed_string_unchecked(name) }) + Self::for_keyexpr(unsafe { OwnedKeyExpr::from_boxed_str_unchecked(name) }) } else { Self::Prefix { prefix: name } } From 5d09cf7cb7af1c01611fa7eaeed3b9592baa9374 Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Thu, 1 Aug 2024 14:39:31 +0200 Subject: [PATCH 566/598] Rework `plugins_loading/search_dirs` config option (#1278) * Rework `plugins_loading/search_dirs` config option * Add license header in `lib_search_dirs.rs` * Address review comment * Remove `LIB_DEFAULT_SEARCH_PATHS` --- Cargo.lock | 9 +- Cargo.toml | 3 +- DEFAULT_CONFIG.json5 | 14 +- commons/zenoh-config/src/lib.rs | 22 +- commons/zenoh-util/Cargo.toml | 2 + commons/zenoh-util/src/lib.rs | 2 + commons/zenoh-util/src/lib_loader.rs | 45 +--- commons/zenoh-util/src/lib_search_dirs.rs | 236 ++++++++++++++++++ plugins/zenoh-backend-traits/Cargo.toml | 1 + plugins/zenoh-backend-traits/src/config.rs | 20 +- .../zenoh-plugin-storage-manager/Cargo.toml | 1 + .../zenoh-plugin-storage-manager/src/lib.rs | 4 +- zenohd/src/main.rs | 7 +- 13 files changed, 293 insertions(+), 73 deletions(-) create mode 100644 commons/zenoh-util/src/lib_search_dirs.rs diff --git a/Cargo.lock b/Cargo.lock index 365a07a00d..9740344616 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1185,9 +1185,9 @@ checksum = "bbfc4744c1b8f2a09adc0e55242f60b1af195d88596bd8700be74418c056c555" [[package]] name = "either" -version = "1.9.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "encoding_rs" @@ -3618,6 +3618,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "763f8cd0d4c71ed8389c90cb8100cba87e763bd01a8e614d4f0af97bcd50a161" dependencies = [ "dyn-clone", + "either", "schemars_derive", "serde", "serde_json", @@ -6001,6 +6002,7 @@ dependencies = [ "urlencoding", "zenoh", "zenoh-plugin-trait", + "zenoh-util", "zenoh_backend_traits", ] @@ -6154,6 +6156,8 @@ dependencies = [ "libc", "libloading", "pnet_datalink", + "serde", + "serde_json", "shellexpand", "tokio", "tracing", @@ -6171,6 +6175,7 @@ dependencies = [ "async-trait", "const_format", "derive_more", + "either", "schemars", "serde_json", "zenoh", diff --git a/Cargo.toml b/Cargo.toml index 37e6acd046..047cf35a02 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -148,7 +148,7 @@ rustls-native-certs = "0.7.0" rustls-pemfile = "2.0.0" rustls-webpki = "0.102.0" rustls-pki-types = "1.1.0" -schemars = "0.8.12" +schemars = { version = "0.8.12", features = ["either"] } secrecy = { version = "0.8.0", features = ["serde", "alloc"] } serde = { version = "1.0.154", default-features = false, features = [ "derive", @@ -188,6 +188,7 @@ webpki-roots = "0.26.0" winapi = { version = "0.3.9", features = ["iphlpapi"] } x509-parser = "0.16.0" z-serial = "0.2.3" +either = "1.13.0" zenoh-ext = { version = "0.11.0-dev", path = "zenoh-ext" } zenoh-shm = { version = "0.11.0-dev", path = "commons/zenoh-shm" } zenoh-result = { version = "0.11.0-dev", path = "commons/zenoh-result", default-features = false } diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index de5baa4725..c4052313d9 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -199,7 +199,7 @@ // /// Id has to be unique within the rule set // "id": "rule1", // "messages": [ - // "put", "delete", "declare_subscriber", + // "put", "delete", "declare_subscriber", // "query", "reply", "declare_queryable", // ], // "flows":["egress","ingress"], @@ -211,7 +211,7 @@ // { // "id": "rule2", // "messages": [ - // "put", "delete", "declare_subscriber", + // "put", "delete", "declare_subscriber", // "query", "reply", "declare_queryable", // ], // "flows":["ingress"], @@ -462,11 +462,15 @@ /// // // plugins_loading: { - // // Enable plugins loading. + // /// Enable plugins loading. // enabled: false, // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup. - // /// If `enabled: true` and `search_dirs` is not specified then `search_dirs` falls back to the default value: ".:~/.zenoh/lib:/opt/homebrew/lib:/usr/local/lib:/usr/lib" - // search_dirs: [], + // /// Directories are specified as object with fields `kind` and `value` is accepted. + // /// 1. If `kind` is `current_exe_parent`, then the parent of the current executable's directory is searched and `value` should be `null`. + // /// In Bash notation, `{ "kind": "current_exe_parent" }` equals `$(dirname $(which zenohd))` while `"."` equals `$PWD`. + // /// 2. If `kind` is `path`, then `value` is interpreted as a filesystem path. Simply supplying a string instead of a object is equivalent to this. + // /// If `enabled: true` and `search_dirs` is not specified then `search_dirs` falls back to the default value: + // search_dirs: [{ "kind": "current_exe_parent" }, ".", "~/.zenoh/lib", "/opt/homebrew/lib", "/usr/local/lib", "/usr/lib"], // }, // /// Plugins are only loaded if `plugins_loading: { enabled: true }` and present in the configuration when starting. // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index f5fc01aa63..b7b63e1602 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -45,7 +45,7 @@ use zenoh_protocol::{ transport::{BatchSize, TransportSn}, }; use zenoh_result::{bail, zerror, ZResult}; -use zenoh_util::LibLoader; +use zenoh_util::{LibLoader, LibSearchDirs}; pub mod mode_dependent; pub use mode_dependent::*; @@ -547,7 +547,7 @@ validated_struct::validator! { pub plugins_loading: #[derive(Default)] PluginsLoading { pub enabled: bool, - pub search_dirs: Option>, // TODO (low-prio): Switch this String to a PathBuf? (applies to other paths in the config as well) + pub search_dirs: LibSearchDirs, }, #[validated(recursive_accessors)] /// The configuration for plugins. @@ -573,19 +573,6 @@ fn set_false() -> bool { false } -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct PluginSearchDirs(Vec); -impl Default for PluginSearchDirs { - fn default() -> Self { - Self( - (*zenoh_util::LIB_DEFAULT_SEARCH_PATHS) - .split(':') - .map(|c| c.to_string()) - .collect(), - ) - } -} - #[test] fn config_deser() { let config = Config::from_deserializer( @@ -763,10 +750,7 @@ impl Config { pub fn libloader(&self) -> LibLoader { if self.plugins_loading.enabled { - match self.plugins_loading.search_dirs() { - Some(dirs) => LibLoader::new(dirs, true), - None => LibLoader::default(), - } + LibLoader::new(self.plugins_loading.search_dirs().clone()) } else { LibLoader::empty() } diff --git a/commons/zenoh-util/Cargo.toml b/commons/zenoh-util/Cargo.toml index e41433b85f..df99e01385 100644 --- a/commons/zenoh-util/Cargo.toml +++ b/commons/zenoh-util/Cargo.toml @@ -49,6 +49,8 @@ shellexpand = { workspace = true } zenoh-core = { workspace = true } zenoh-result = { workspace = true, features = ["default"] } const_format = { workspace = true } +serde = { workspace = true, features = ["default"] } +serde_json = { workspace = true } [target.'cfg(windows)'.dependencies] winapi = { workspace = true } diff --git a/commons/zenoh-util/src/lib.rs b/commons/zenoh-util/src/lib.rs index 745e790711..a6cf03e5fb 100644 --- a/commons/zenoh-util/src/lib.rs +++ b/commons/zenoh-util/src/lib.rs @@ -21,6 +21,7 @@ use lazy_static::lazy_static; pub mod ffi; mod lib_loader; +pub mod lib_search_dirs; pub mod net; pub mod time_range; @@ -28,6 +29,7 @@ pub use lib_loader::*; pub mod timer; pub use timer::*; pub mod log; +pub use lib_search_dirs::*; pub use log::*; /// The "ZENOH_HOME" environment variable name diff --git a/commons/zenoh-util/src/lib_loader.rs b/commons/zenoh-util/src/lib_loader.rs index 9d4a52c332..082bb04839 100644 --- a/commons/zenoh-util/src/lib_loader.rs +++ b/commons/zenoh-util/src/lib_loader.rs @@ -23,13 +23,13 @@ use tracing::{debug, warn}; use zenoh_core::{zconfigurable, zerror}; use zenoh_result::{bail, ZResult}; +use crate::LibSearchDirs; + zconfigurable! { /// The libraries prefix for the current platform (usually: `"lib"`) pub static ref LIB_PREFIX: String = DLL_PREFIX.to_string(); /// The libraries suffix for the current platform (`".dll"` or `".so"` or `".dylib"`...) pub static ref LIB_SUFFIX: String = DLL_SUFFIX.to_string(); - /// The default list of paths where to search for libraries to load - pub static ref LIB_DEFAULT_SEARCH_PATHS: String = ".:~/.zenoh/lib:/opt/homebrew/lib:/usr/local/lib:/usr/lib".to_string(); } /// LibLoader allows search for libraries and to load them. @@ -44,40 +44,16 @@ impl LibLoader { LibLoader { search_paths: None } } - /// Returns the list of search paths used by `LibLoader::default()` - pub fn default_search_paths() -> &'static str { - &LIB_DEFAULT_SEARCH_PATHS - } - /// Creates a new [LibLoader] with a set of paths where the libraries will be searched for. /// If `exe_parent_dir`is true, the parent directory of the current executable is also added /// to the set of paths for search. - pub fn new(search_dirs: &[S], exe_parent_dir: bool) -> LibLoader - where - S: AsRef, - { - let mut search_paths: Vec = vec![]; - for s in search_dirs { - match shellexpand::full(s) { - Ok(cow_str) => match PathBuf::from(&*cow_str).canonicalize() { - Ok(path) => search_paths.push(path), - Err(err) => debug!("Cannot search for libraries in {}: {}", cow_str, err), - }, - Err(err) => warn!("Cannot search for libraries in '{}': {} ", s.as_ref(), err), - } - } - Self::_new(search_paths, exe_parent_dir) - } - fn _new(mut search_paths: Vec, exe_parent_dir: bool) -> Self { - if exe_parent_dir { - match std::env::current_exe() { - Ok(path) => match path.parent() { - Some(p) => if p.is_dir() { - search_paths.push(p.canonicalize().unwrap()) - }, - None => warn!("Can't search for plugins in executable parent directory: no parent directory for {}.", path.to_string_lossy()), - }, - Err(e) => warn!("Can't search for plugins in executable parent directory: {}.", e), + pub fn new(dirs: LibSearchDirs) -> LibLoader { + let mut search_paths = Vec::new(); + + for path in dirs.into_iter() { + match path { + Ok(path) => search_paths.push(path), + Err(err) => tracing::error!("{err}"), } } @@ -237,7 +213,6 @@ impl LibLoader { impl Default for LibLoader { fn default() -> Self { - let paths: Vec<&str> = (*LIB_DEFAULT_SEARCH_PATHS).split(':').collect(); - LibLoader::new(&paths, true) + LibLoader::new(LibSearchDirs::default()) } } diff --git a/commons/zenoh-util/src/lib_search_dirs.rs b/commons/zenoh-util/src/lib_search_dirs.rs new file mode 100644 index 0000000000..58046ff3a7 --- /dev/null +++ b/commons/zenoh-util/src/lib_search_dirs.rs @@ -0,0 +1,236 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{env, error::Error, fmt::Display, path::PathBuf, str::FromStr}; + +use serde::{ + de::{value::MapAccessDeserializer, Visitor}, + Deserialize, Serialize, +}; + +#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] +#[serde(default)] +pub struct LibSearchDirs(Vec); + +impl LibSearchDirs { + pub fn from_paths>(paths: &[T]) -> Self { + Self( + paths + .iter() + .map(|s| LibSearchDir::Path(s.as_ref().to_string())) + .collect(), + ) + } + + pub fn from_specs>(paths: &[T]) -> Result { + let dirs = paths + .iter() + .map(|s| { + let de = &mut serde_json::Deserializer::from_str(s.as_ref()); + LibSearchDir::deserialize(de) + }) + .collect::, _>>()?; + + Ok(Self(dirs)) + } +} + +#[derive(Debug)] +pub struct InvalidLibSearchDir { + found: LibSearchDir, + source: String, +} + +impl Display for InvalidLibSearchDir { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "invalid library search directory `{:?}`: {}", + self.found, self.source + ) + } +} + +impl Error for InvalidLibSearchDir {} + +pub struct IntoIter { + iter: std::vec::IntoIter, +} + +impl Iterator for IntoIter { + type Item = Result; + + fn next(&mut self) -> Option { + self.iter.next().map(LibSearchDir::into_path) + } +} + +impl IntoIterator for LibSearchDirs { + type Item = Result; + + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter { + iter: self.0.into_iter(), + } + } +} + +impl Default for LibSearchDirs { + fn default() -> Self { + LibSearchDirs(vec![ + LibSearchDir::Spec(LibSearchSpec { + kind: LibSearchSpecKind::CurrentExeParent, + value: None, + }), + LibSearchDir::Path(".".to_string()), + LibSearchDir::Path("~/.zenoh/lib".to_string()), + LibSearchDir::Path("/opt/homebrew/lib".to_string()), + LibSearchDir::Path("/usr/local/lib".to_string()), + LibSearchDir::Path("/usr/lib".to_string()), + ]) + } +} + +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub enum LibSearchDir { + Path(String), + Spec(LibSearchSpec), +} + +impl LibSearchDir { + fn into_path(self) -> Result { + match self { + LibSearchDir::Path(path) => LibSearchSpec { + kind: LibSearchSpecKind::Path, + value: Some(path), + } + .into_path(), + LibSearchDir::Spec(spec) => spec.into_path(), + } + } +} + +#[derive(Clone, Debug, Deserialize, Serialize, Eq, Hash, PartialEq)] +#[serde(rename_all = "snake_case")] +pub struct LibSearchSpec { + kind: LibSearchSpecKind, + value: Option, +} + +impl LibSearchSpec { + fn into_path(self) -> Result { + fn error_from_source(spec: &LibSearchSpec, err: T) -> InvalidLibSearchDir { + InvalidLibSearchDir { + found: LibSearchDir::Spec(spec.clone()), + source: err.to_string(), + } + } + + fn error_from_str(spec: &LibSearchSpec, err: &str) -> InvalidLibSearchDir { + InvalidLibSearchDir { + found: LibSearchDir::Spec(spec.clone()), + source: err.to_string(), + } + } + + match self.kind { + LibSearchSpecKind::Path => { + let Some(value) = &self.value else { + return Err(error_from_str( + &self, + "`path` specs should have a `value` field", + )); + }; + + let expanded = + shellexpand::full(value).map_err(|err| error_from_source(&self, err))?; + + let path = + PathBuf::from_str(&expanded).map_err(|err| error_from_source(&self, err))?; + + Ok(path) + } + LibSearchSpecKind::CurrentExeParent => { + let current_exe = + env::current_exe().map_err(|err| error_from_source(&self, err))?; + + let Some(current_exe_parent) = current_exe.parent() else { + return Err(error_from_str( + &self, + "current executable's path has no parent directory", + )); + }; + + let canonicalized = current_exe_parent + .canonicalize() + .map_err(|err| error_from_source(&self, err))?; + + Ok(canonicalized) + } + } + } +} + +#[derive(Clone, Debug, Deserialize, Serialize, Eq, Hash, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum LibSearchSpecKind { + Path, + CurrentExeParent, +} + +impl<'de> Deserialize<'de> for LibSearchDir { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_any(LibSearchSpecOrPathVisitor) + } +} + +impl Serialize for LibSearchDir { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + match self { + LibSearchDir::Path(path) => serializer.serialize_str(path), + LibSearchDir::Spec(spec) => spec.serialize(serializer), + } + } +} + +struct LibSearchSpecOrPathVisitor; + +impl<'de> Visitor<'de> for LibSearchSpecOrPathVisitor { + type Value = LibSearchDir; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("str or map with field `kind` and optionally field `value`") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + Ok(LibSearchDir::Path(v.to_string())) + } + + fn visit_map(self, map: A) -> Result + where + A: serde::de::MapAccess<'de>, + { + LibSearchSpec::deserialize(MapAccessDeserializer::new(map)).map(LibSearchDir::Spec) + } +} diff --git a/plugins/zenoh-backend-traits/Cargo.toml b/plugins/zenoh-backend-traits/Cargo.toml index 5997dc5c65..1a574dd118 100644 --- a/plugins/zenoh-backend-traits/Cargo.toml +++ b/plugins/zenoh-backend-traits/Cargo.toml @@ -37,6 +37,7 @@ zenoh-util = { workspace = true } schemars = { workspace = true } zenoh-plugin-trait = { workspace = true } const_format = { workspace = true } +either = { workspace = true } [features] default = [] diff --git a/plugins/zenoh-backend-traits/src/config.rs b/plugins/zenoh-backend-traits/src/config.rs index 98167680c8..e440e3014e 100644 --- a/plugins/zenoh-backend-traits/src/config.rs +++ b/plugins/zenoh-backend-traits/src/config.rs @@ -15,6 +15,7 @@ use std::{convert::TryFrom, time::Duration}; use const_format::concatcp; use derive_more::{AsMut, AsRef}; +use either::Either; use schemars::JsonSchema; use serde_json::{Map, Value}; use zenoh::{ @@ -23,6 +24,7 @@ use zenoh::{ }; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_result::{bail, zerror, Error}; +use zenoh_util::LibSearchDirs; #[derive(JsonSchema, Debug, Clone, AsMut, AsRef)] pub struct PluginConfig { @@ -30,7 +32,9 @@ pub struct PluginConfig { pub name: String, #[schemars(with = "Option")] pub required: bool, - pub backend_search_dirs: Option>, + // REVIEW: This is inconsistent with `plugins_loading/search_dirs` + #[schemars(with = "Option, String>>>>")] + pub backend_search_dirs: LibSearchDirs, #[schemars(with = "Map")] pub volumes: Vec, #[schemars(with = "Map")] @@ -161,16 +165,18 @@ impl + AsRef, V: AsObject> TryFrom<(S, &V)> for PluginConfi }) .unwrap_or(Ok(true))?; let backend_search_dirs = match value.get("backend_search_dirs") { - Some(serde_json::Value::String(path)) => Some(vec![path.clone()]), + Some(serde_json::Value::String(path)) => LibSearchDirs::from_paths(&[path.clone()]), Some(serde_json::Value::Array(paths)) => { - let mut result = Vec::with_capacity(paths.len()); + let mut specs = Vec::with_capacity(paths.len()); for path in paths { - let path = if let serde_json::Value::String(path) = path {path} else {bail!("`backend_search_dirs` field of {}'s configuration must be a string or array of strings", name.as_ref())}; - result.push(path.clone()); + let serde_json::Value::String(path) = path else { + bail!("`backend_search_dirs` field of {}'s configuration must be a string or array of strings", name.as_ref()); + }; + specs.push(path.clone()); } - Some(result) + LibSearchDirs::from_specs(&specs)? } - None => None, + None => LibSearchDirs::default(), _ => bail!("`backend_search_dirs` field of {}'s configuration must be a string or array of strings", name.as_ref()) }; let volumes = match value.get("volumes") { diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index 9ef1846d72..fa7650fcc2 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -54,6 +54,7 @@ zenoh = { workspace = true, features = [ ] } zenoh-plugin-trait = { workspace = true } zenoh_backend_traits = { workspace = true } +zenoh-util = { workspace = true } [build-dependencies] rustc_version = { workspace = true } diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 3f98725a5e..4043665c5d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -112,9 +112,7 @@ impl StorageRuntimeInner { storages, .. } = config; - let lib_loader = backend_search_dirs - .map(|search_dirs| LibLoader::new(&search_dirs, false)) - .unwrap_or_default(); + let lib_loader = LibLoader::new(backend_search_dirs); let plugins_manager = PluginsManager::dynamic(lib_loader.clone(), BACKEND_LIB_PREFIX) diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index d25260d606..9ce0a64333 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -21,6 +21,7 @@ use zenoh::{ config::{Config, EndPoint, ModeDependentValue, PermissionsConf, ValidatedMap, WhatAmI}, Result, }; +use zenoh_util::LibSearchDirs; #[cfg(feature = "loki")] const LOKI_ENDPOINT_VAR: &str = "LOKI_ENDPOINT"; @@ -146,7 +147,11 @@ fn config_from_args(args: &Args) -> Config { if !args.plugin_search_dir.is_empty() { config .plugins_loading - .set_search_dirs(Some(args.plugin_search_dir.clone())) + // REVIEW: Should this append to search_dirs instead? As there is no way to pass the new + // `current_exe_parent` unless we change the format of the argument and this overrides + // the one set from the default config. + // Also, --cfg plugins_loading/search_dirs=[...] makes this argument superfluous. + .set_search_dirs(LibSearchDirs::from_paths(&args.plugin_search_dir)) .unwrap(); } for plugin in &args.plugin { From ce4e9bf5dc7a0dbbe3f0ef50255ec960454d0dba Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Fri, 2 Aug 2024 11:40:16 +0200 Subject: [PATCH 567/598] Rename `close` to `undeclare` in `zenoh-ext` (#1286) --- zenoh-ext/src/publication_cache.rs | 4 ++-- zenoh-ext/src/querying_subscriber.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index c9b734519f..9c1536c2a1 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -256,9 +256,9 @@ impl<'a> PublicationCache<'a> { }) } - /// Close this PublicationCache + /// Undeclare this [`PublicationCache`]`. #[inline] - pub fn close(self) -> impl Resolve> + 'a { + pub fn undeclare(self) -> impl Resolve> + 'a { ResolveFuture::new(async move { let PublicationCache { _queryable, diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 6134e4d2d7..224abfde87 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -722,9 +722,9 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { Ok(fetch_subscriber) } - /// Close this FetchingSubscriber + /// Undeclare this [`FetchingSubscriber`]`. #[inline] - pub fn close(self) -> impl Resolve> + 'a { + pub fn undeclare(self) -> impl Resolve> + 'a { self.subscriber.undeclare() } From 4d337a950b18e56e889ffd7cf9cfd293ae3f5d52 Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Fri, 2 Aug 2024 14:38:15 +0200 Subject: [PATCH 568/598] Rename `close` to `undeclare` in `zenoh-ext-examples` (#1293) --- zenoh-ext/tests/liveliness.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/zenoh-ext/tests/liveliness.rs b/zenoh-ext/tests/liveliness.rs index 68d4b1b798..637d07ba57 100644 --- a/zenoh-ext/tests/liveliness.rs +++ b/zenoh-ext/tests/liveliness.rs @@ -89,7 +89,7 @@ async fn test_liveliness_querying_subscriber_clique() { assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); token2.undeclare().await.unwrap(); - sub.close().await.unwrap(); + sub.undeclare().await.unwrap(); peer1.close().await.unwrap(); peer2.close().await.unwrap(); @@ -193,7 +193,7 @@ async fn test_liveliness_querying_subscriber_brokered() { assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); token2.undeclare().await.unwrap(); - sub.close().await.unwrap(); + sub.undeclare().await.unwrap(); router.close().await.unwrap(); client1.close().await.unwrap(); @@ -277,7 +277,7 @@ async fn test_liveliness_fetching_subscriber_clique() { assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); token2.undeclare().await.unwrap(); - sub.close().await.unwrap(); + sub.undeclare().await.unwrap(); peer1.close().await.unwrap(); peer2.close().await.unwrap(); @@ -385,7 +385,7 @@ async fn test_liveliness_fetching_subscriber_brokered() { assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); token2.undeclare().await.unwrap(); - sub.close().await.unwrap(); + sub.undeclare().await.unwrap(); router.close().await.unwrap(); client1.close().await.unwrap(); From 8f4779a1f89bfcab4013383a48acb38264d8573c Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Fri, 2 Aug 2024 16:17:42 +0200 Subject: [PATCH 569/598] fix: Fix ZBufWriter implementation (#1289) * fix: Fix ZBufWriter implementation * fix: do not expose `ZSliceWriter` * fix: typo * fix: fix `ZBuf::read_u8` * fix: don't add additional empty slice to write on zbuf * refactor: add comment about `ZSlice::writer` being internal --- commons/zenoh-buffers/src/bbuf.rs | 9 +- commons/zenoh-buffers/src/zbuf.rs | 281 ++++++------------------- commons/zenoh-buffers/src/zslice.rs | 227 ++++++++++---------- io/zenoh-transport/src/common/batch.rs | 2 +- zenoh/src/api/bytes.rs | 8 +- 5 files changed, 189 insertions(+), 338 deletions(-) diff --git a/commons/zenoh-buffers/src/bbuf.rs b/commons/zenoh-buffers/src/bbuf.rs index 7af2a1a464..55d341880c 100644 --- a/commons/zenoh-buffers/src/bbuf.rs +++ b/commons/zenoh-buffers/src/bbuf.rs @@ -185,13 +185,8 @@ impl<'a> HasReader for &'a BBuf { // From impls impl From for ZSlice { fn from(value: BBuf) -> Self { - ZSlice { - buf: Arc::new(value.buffer), - start: 0, - end: value.len, - #[cfg(feature = "shared-memory")] - kind: crate::ZSliceKind::Raw, - } + // SAFETY: buffer length is ensured to be lesser than its capacity + unsafe { ZSlice::new(Arc::new(value.buffer), 0, value.len).unwrap_unchecked() } } } diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index f846280b91..2d5bcca213 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -12,14 +12,12 @@ // ZettaScale Zenoh Team, // use alloc::{sync::Arc, vec::Vec}; -use core::{cmp, iter, mem, num::NonZeroUsize, ops::RangeBounds, ptr}; +use core::{cmp, iter, num::NonZeroUsize, ptr::NonNull}; #[cfg(feature = "std")] use std::io; use zenoh_collections::SingleOrVec; -#[cfg(feature = "shared-memory")] -use crate::ZSliceKind; use crate::{ buffer::{Buffer, SplitBuffer}, reader::{ @@ -27,13 +25,9 @@ use crate::{ SiphonableReader, }, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, - ZSlice, ZSliceBuffer, + ZSlice, ZSliceBuffer, ZSliceWriter, }; -fn get_mut_unchecked(arc: &mut Arc) -> &mut T { - unsafe { &mut (*(Arc::as_ptr(arc) as *mut T)) } -} - #[derive(Debug, Clone, Default, Eq)] pub struct ZBuf { slices: SingleOrVec, @@ -80,85 +74,9 @@ impl ZBuf { } } - pub fn splice>(&mut self, erased: Range, replacement: &[u8]) { - let start = match erased.start_bound() { - core::ops::Bound::Included(n) => *n, - core::ops::Bound::Excluded(n) => n + 1, - core::ops::Bound::Unbounded => 0, - }; - let end = match erased.end_bound() { - core::ops::Bound::Included(n) => n + 1, - core::ops::Bound::Excluded(n) => *n, - core::ops::Bound::Unbounded => self.len(), - }; - if start != end { - self.remove(start, end); - } - self.insert(start, replacement); - } - - fn remove(&mut self, mut start: usize, mut end: usize) { - assert!(start <= end); - assert!(end <= self.len()); - let mut start_slice_idx = 0; - let mut start_idx_in_start_slice = 0; - let mut end_slice_idx = 0; - let mut end_idx_in_end_slice = 0; - for (i, slice) in self.slices.as_mut().iter_mut().enumerate() { - if slice.len() > start { - start_slice_idx = i; - start_idx_in_start_slice = start; - } - if slice.len() >= end { - end_slice_idx = i; - end_idx_in_end_slice = end; - break; - } - start -= slice.len(); - end -= slice.len(); - } - let start_slice = &mut self.slices.as_mut()[start_slice_idx]; - start_slice.end = start_slice.start + start_idx_in_start_slice; - let drain_start = start_slice_idx + (start_slice.start < start_slice.end) as usize; - let end_slice = &mut self.slices.as_mut()[end_slice_idx]; - end_slice.start += end_idx_in_end_slice; - let drain_end = end_slice_idx + (end_slice.start >= end_slice.end) as usize; - self.slices.drain(drain_start..drain_end); - } - - fn insert(&mut self, mut at: usize, slice: &[u8]) { - if slice.is_empty() { - return; - } - let old_at = at; - let mut slice_index = usize::MAX; - for (i, slice) in self.slices.as_ref().iter().enumerate() { - if at < slice.len() { - slice_index = i; - break; - } - if let Some(new_at) = at.checked_sub(slice.len()) { - at = new_at - } else { - panic!( - "Out of bounds insert attempted: at={old_at}, len={}", - self.len() - ) - } - } - if at != 0 { - let split = &self.slices.as_ref()[slice_index]; - let (l, r) = ( - split.subslice(0, at).unwrap(), - split.subslice(at, split.len()).unwrap(), - ); - self.slices.drain(slice_index..(slice_index + 1)); - self.slices.insert(slice_index, l); - self.slices.insert(slice_index + 1, Vec::from(slice).into()); - self.slices.insert(slice_index + 2, r); - } else { - self.slices.insert(slice_index, Vec::from(slice).into()) - } + #[inline] + fn opt_zslice_writer(&mut self) -> Option { + self.slices.last_mut().and_then(|s| s.writer()) } } @@ -319,7 +237,7 @@ impl<'a> Reader for ZBufReader<'a> { fn read_u8(&mut self) -> Result { let slice = self.inner.slices.get(self.cursor.slice).ok_or(DidntRead)?; - let byte = slice[self.cursor.byte]; + let byte = *slice.get(self.cursor.byte).ok_or(DidntRead)?; self.cursor.byte += 1; if self.cursor.byte == slice.len() { self.cursor.slice += 1; @@ -359,10 +277,7 @@ impl<'a> Reader for ZBufReader<'a> { Ok(buffer.into()) } cmp::Ordering::Equal => { - let s = slice - .subslice(self.cursor.byte, slice.len()) - .ok_or(DidntRead)?; - + let s = slice.subslice(self.cursor.byte..).ok_or(DidntRead)?; self.cursor.slice += 1; self.cursor.byte = 0; Ok(s) @@ -370,7 +285,7 @@ impl<'a> Reader for ZBufReader<'a> { cmp::Ordering::Greater => { let start = self.cursor.byte; self.cursor.byte += len; - slice.subslice(start, self.cursor.byte).ok_or(DidntRead) + slice.subslice(start..self.cursor.byte).ok_or(DidntRead) } } } @@ -530,14 +445,14 @@ impl Iterator for ZBufSliceIterator<'_, '_> { match self.remaining.cmp(&len) { cmp::Ordering::Less => { let end = start + self.remaining; - let slice = slice.subslice(start, end); + let slice = slice.subslice(start..end); self.reader.cursor.byte = end; self.remaining = 0; slice } cmp::Ordering::Equal => { let end = start + self.remaining; - let slice = slice.subslice(start, end); + let slice = slice.subslice(start..end); self.reader.cursor.slice += 1; self.reader.cursor.byte = 0; self.remaining = 0; @@ -545,7 +460,7 @@ impl Iterator for ZBufSliceIterator<'_, '_> { } cmp::Ordering::Greater => { let end = start + len; - let slice = slice.subslice(start, end); + let slice = slice.subslice(start..end); self.reader.cursor.slice += 1; self.reader.cursor.byte = 0; self.remaining -= len; @@ -562,78 +477,43 @@ impl Iterator for ZBufSliceIterator<'_, '_> { // Writer #[derive(Debug)] pub struct ZBufWriter<'a> { - inner: &'a mut ZBuf, - cache: Arc>, + inner: NonNull, + zslice_writer: Option>, +} + +impl<'a> ZBufWriter<'a> { + #[inline] + fn zslice_writer(&mut self) -> &mut ZSliceWriter<'a> { + // Cannot use `if let` because of https://github.com/rust-lang/rust/issues/54663 + if self.zslice_writer.is_some() { + return self.zslice_writer.as_mut().unwrap(); + } + // SAFETY: `self.inner` is valid as guaranteed by `self.writer` borrow + let zbuf = unsafe { self.inner.as_mut() }; + zbuf.slices.push(ZSlice::empty()); + self.zslice_writer = zbuf.slices.last_mut().unwrap().writer(); + self.zslice_writer.as_mut().unwrap() + } } impl<'a> HasWriter for &'a mut ZBuf { type Writer = ZBufWriter<'a>; fn writer(self) -> Self::Writer { - let mut cache = None; - if let Some(ZSlice { buf, end, .. }) = self.slices.last_mut() { - // Verify the ZSlice is actually a Vec - if let Some(b) = buf.as_any().downcast_ref::>() { - // Check for the length - if *end == b.len() { - cache = Some(unsafe { Arc::from_raw(Arc::into_raw(buf.clone()).cast()) }) - } - } - } - ZBufWriter { - inner: self, - cache: cache.unwrap_or_else(|| Arc::new(Vec::new())), + inner: NonNull::new(self).unwrap(), + zslice_writer: self.opt_zslice_writer(), } } } impl Writer for ZBufWriter<'_> { fn write(&mut self, bytes: &[u8]) -> Result { - let Some(len) = NonZeroUsize::new(bytes.len()) else { - return Err(DidntWrite); - }; - self.write_exact(bytes)?; - Ok(len) + self.zslice_writer().write(bytes) } fn write_exact(&mut self, bytes: &[u8]) -> Result<(), DidntWrite> { - let cache = get_mut_unchecked(&mut self.cache); - let prev_cache_len = cache.len(); - cache.extend_from_slice(bytes); - let cache_len = cache.len(); - - // Verify we are writing on the cache - if let Some(ZSlice { - buf, ref mut end, .. - }) = self.inner.slices.last_mut() - { - // Verify the previous length of the cache is the right one - if *end == prev_cache_len { - // Verify the ZSlice is actually a Vec - if let Some(b) = buf.as_any().downcast_ref::>() { - // Verify the Vec of the ZSlice is exactly the one from the cache - if core::ptr::eq(cache.as_ptr(), b.as_ptr()) { - // Simply update the slice length - *end = cache_len; - return Ok(()); - } - } - } - } - - self.inner.slices.push(ZSlice { - buf: self.cache.clone(), - start: prev_cache_len, - end: cache_len, - #[cfg(feature = "shared-memory")] - kind: ZSliceKind::Raw, - }); - Ok(()) - } - - fn write_u8(&mut self, byte: u8) -> Result<(), DidntWrite> { - self.write_exact(core::slice::from_ref(&byte)) + self.zslice_writer().write_exact(bytes) } fn remaining(&self) -> usize { @@ -641,55 +521,19 @@ impl Writer for ZBufWriter<'_> { } fn write_zslice(&mut self, slice: &ZSlice) -> Result<(), DidntWrite> { - self.inner.slices.push(slice.clone()); + self.zslice_writer = None; + // SAFETY: `self.inner` is valid as guaranteed by `self.writer` borrow, + // and `self.writer` has been overwritten + unsafe { self.inner.as_mut() }.push_zslice(slice.clone()); Ok(()) } - unsafe fn with_slot(&mut self, mut len: usize, write: F) -> Result + unsafe fn with_slot(&mut self, len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize, { - let cache = get_mut_unchecked(&mut self.cache); - let prev_cache_len = cache.len(); - cache.reserve(len); - - // SAFETY: we already reserved len elements on the vector. - let s = crate::unsafe_slice_mut!(cache.spare_capacity_mut(), ..len); - // SAFETY: converting MaybeUninit into [u8] is safe because we are going to write on it. - // The returned len tells us how many bytes have been written so as to update the len accordingly. - len = unsafe { write(&mut *(s as *mut [mem::MaybeUninit] as *mut [u8])) }; - // SAFETY: we already reserved len elements on the vector. - unsafe { cache.set_len(prev_cache_len + len) }; - - let cache_len = cache.len(); - - // Verify we are writing on the cache - if let Some(ZSlice { - buf, ref mut end, .. - }) = self.inner.slices.last_mut() - { - // Verify the previous length of the cache is the right one - if *end == prev_cache_len { - // Verify the ZSlice is actually a Vec - if let Some(b) = buf.as_any().downcast_ref::>() { - // Verify the Vec of the ZSlice is exactly the one from the cache - if ptr::eq(cache.as_ptr(), b.as_ptr()) { - // Simply update the slice length - *end = cache_len; - return NonZeroUsize::new(len).ok_or(DidntWrite); - } - } - } - } - - self.inner.slices.push(ZSlice { - buf: self.cache.clone(), - start: prev_cache_len, - end: cache_len, - #[cfg(feature = "shared-memory")] - kind: ZSliceKind::Raw, - }); - NonZeroUsize::new(len).ok_or(DidntWrite) + // SAFETY: same precondition as the enclosing function + self.zslice_writer().with_slot(len, write) } } @@ -697,22 +541,25 @@ impl BacktrackableWriter for ZBufWriter<'_> { type Mark = ZBufPos; fn mark(&mut self) -> Self::Mark { - if let Some(slice) = self.inner.slices.last() { - ZBufPos { - slice: self.inner.slices.len(), - byte: slice.end, - } - } else { - ZBufPos { slice: 0, byte: 0 } + let byte = self.zslice_writer.as_mut().map(|w| w.mark()); + // SAFETY: `self.inner` is valid as guaranteed by `self.writer` borrow + let zbuf = unsafe { self.inner.as_mut() }; + ZBufPos { + slice: zbuf.slices.len(), + byte: byte + .or_else(|| Some(zbuf.opt_zslice_writer()?.mark())) + .unwrap_or(0), } } fn rewind(&mut self, mark: Self::Mark) -> bool { - self.inner - .slices - .truncate(mark.slice + usize::from(mark.byte != 0)); - if let Some(slice) = self.inner.slices.last_mut() { - slice.end = mark.byte; + // SAFETY: `self.inner` is valid as guaranteed by `self.writer` borrow, + // and `self.writer` is reassigned after modification + let zbuf = unsafe { self.inner.as_mut() }; + zbuf.slices.truncate(mark.slice); + self.zslice_writer = zbuf.opt_zslice_writer(); + if let Some(writer) = &mut self.zslice_writer { + writer.rewind(mark.byte); } true } @@ -752,24 +599,24 @@ mod tests { let slice: ZSlice = [0u8, 1, 2, 3, 4, 5, 6, 7].to_vec().into(); let mut zbuf1 = ZBuf::empty(); - zbuf1.push_zslice(slice.subslice(0, 4).unwrap()); - zbuf1.push_zslice(slice.subslice(4, 8).unwrap()); + zbuf1.push_zslice(slice.subslice(..4).unwrap()); + zbuf1.push_zslice(slice.subslice(4..8).unwrap()); let mut zbuf2 = ZBuf::empty(); - zbuf2.push_zslice(slice.subslice(0, 1).unwrap()); - zbuf2.push_zslice(slice.subslice(1, 4).unwrap()); - zbuf2.push_zslice(slice.subslice(4, 8).unwrap()); + zbuf2.push_zslice(slice.subslice(..1).unwrap()); + zbuf2.push_zslice(slice.subslice(1..4).unwrap()); + zbuf2.push_zslice(slice.subslice(4..8).unwrap()); assert_eq!(zbuf1, zbuf2); let mut zbuf1 = ZBuf::empty(); - zbuf1.push_zslice(slice.subslice(2, 4).unwrap()); - zbuf1.push_zslice(slice.subslice(4, 8).unwrap()); + zbuf1.push_zslice(slice.subslice(2..4).unwrap()); + zbuf1.push_zslice(slice.subslice(4..8).unwrap()); let mut zbuf2 = ZBuf::empty(); - zbuf2.push_zslice(slice.subslice(2, 3).unwrap()); - zbuf2.push_zslice(slice.subslice(3, 6).unwrap()); - zbuf2.push_zslice(slice.subslice(6, 8).unwrap()); + zbuf2.push_zslice(slice.subslice(2..3).unwrap()); + zbuf2.push_zslice(slice.subslice(3..6).unwrap()); + zbuf2.push_zslice(slice.subslice(6..8).unwrap()); assert_eq!(zbuf1, zbuf2); } diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index 6ed404eb78..ec77ffa770 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -14,16 +14,15 @@ use alloc::{boxed::Box, sync::Arc, vec::Vec}; use core::{ any::Any, - convert::AsRef, - fmt, + fmt, iter, num::NonZeroUsize, - ops::{Deref, Index, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive}, - option, + ops::{Bound, Deref, RangeBounds}, }; use crate::{ buffer::{Buffer, SplitBuffer}, reader::{BacktrackableReader, DidntRead, HasReader, Reader}, + writer::{BacktrackableWriter, DidntWrite, Writer}, }; /*************************************/ @@ -37,7 +36,7 @@ pub trait ZSliceBuffer: Any + Send + Sync + fmt::Debug { impl ZSliceBuffer for Vec { fn as_slice(&self) -> &[u8] { - self.as_ref() + self } fn as_any(&self) -> &dyn Any { @@ -51,7 +50,7 @@ impl ZSliceBuffer for Vec { impl ZSliceBuffer for Box<[u8]> { fn as_slice(&self) -> &[u8] { - self.as_ref() + self } fn as_any(&self) -> &dyn Any { @@ -65,7 +64,7 @@ impl ZSliceBuffer for Box<[u8]> { impl ZSliceBuffer for [u8; N] { fn as_slice(&self) -> &[u8] { - self.as_ref() + self } fn as_any(&self) -> &dyn Any { @@ -91,9 +90,9 @@ pub enum ZSliceKind { /// A clonable wrapper to a contiguous slice of bytes. #[derive(Clone)] pub struct ZSlice { - pub(crate) buf: Arc, - pub(crate) start: usize, - pub(crate) end: usize, + buf: Arc, + start: usize, + end: usize, #[cfg(feature = "shared-memory")] pub kind: ZSliceKind, } @@ -108,58 +107,62 @@ impl ZSlice { Self::new(buf, start, end) } + #[inline] pub fn new( buf: Arc, start: usize, end: usize, ) -> Result> { if start <= end && end <= buf.as_slice().len() { - // unsafe: this operation is safe because we just checked the slice boundaries - Ok(unsafe { ZSlice::new_unchecked(buf, start, end) }) + Ok(Self { + buf, + start, + end, + #[cfg(feature = "shared-memory")] + kind: ZSliceKind::Raw, + }) } else { Err(buf) } } + #[inline] pub fn empty() -> Self { - Self::new(Arc::new([]), 0, 0).unwrap() - } - - /// # Safety - /// This function does not verify whether the `start` and `end` indexes are within the buffer boundaries. - /// If a [`ZSlice`] is built via this constructor, a later access may panic if `start` and `end` indexes are out-of-bound. - pub unsafe fn new_unchecked(buf: Arc, start: usize, end: usize) -> Self { - ZSlice { - buf, - start, - end, - #[cfg(feature = "shared-memory")] - kind: ZSliceKind::Raw, - } + Self::new(Arc::new(Vec::::new()), 0, 0).unwrap() } #[inline] #[must_use] - pub fn downcast_ref(&self) -> Option<&T> - where - T: Any, - { - self.buf.as_any().downcast_ref::() + pub fn downcast_ref(&self) -> Option<&T> { + self.buf.as_any().downcast_ref() } + /// # Safety + /// + /// Buffer modification must not modify slice range. #[inline] #[must_use] - pub fn downcast_mut(&mut self) -> Option<&mut T> - where - T: Any, - { - Arc::get_mut(&mut self.buf).and_then(|val| val.as_any_mut().downcast_mut::()) + pub unsafe fn downcast_mut(&mut self) -> Option<&mut T> { + Arc::get_mut(&mut self.buf)?.as_any_mut().downcast_mut() } + // This method is internal and is only meant to be used in `ZBufWriter`. + // It's implemented in this module because it plays with `ZSlice` invariant, + // so it should stay in the same module. + // See https://github.com/eclipse-zenoh/zenoh/pull/1289#discussion_r1701796640 #[inline] - #[must_use] - pub const fn range(&self) -> Range { - self.start..self.end + pub(crate) fn writer(&mut self) -> Option { + let vec = Arc::get_mut(&mut self.buf)? + .as_any_mut() + .downcast_mut::>()?; + if self.end == vec.len() { + Some(ZSliceWriter { + vec, + end: &mut self.end, + }) + } else { + None + } } #[inline] @@ -178,11 +181,20 @@ impl ZSlice { #[must_use] pub fn as_slice(&self) -> &[u8] { // SAFETY: bounds checks are performed at `ZSlice` construction via `make()` or `subslice()`. - crate::unsafe_slice!(self.buf.as_slice(), self.range()) - } - - #[must_use] - pub fn subslice(&self, start: usize, end: usize) -> Option { + unsafe { self.buf.as_slice().get_unchecked(self.start..self.end) } + } + + pub fn subslice(&self, range: impl RangeBounds) -> Option { + let start = match range.start_bound() { + Bound::Included(&n) => n, + Bound::Excluded(&n) => n + 1, + Bound::Unbounded => 0, + }; + let end = match range.end_bound() { + Bound::Included(&n) => n + 1, + Bound::Excluded(&n) => n, + Bound::Unbounded => self.len(), + }; if start <= end && end <= self.len() { Some(ZSlice { buf: self.buf.clone(), @@ -211,65 +223,9 @@ impl AsRef<[u8]> for ZSlice { } } -impl Index for ZSlice { - type Output = u8; - - fn index(&self, index: usize) -> &Self::Output { - &self.buf.as_slice()[self.start + index] - } -} - -impl Index> for ZSlice { - type Output = [u8]; - - fn index(&self, range: Range) -> &Self::Output { - &(self.deref())[range] - } -} - -impl Index> for ZSlice { - type Output = [u8]; - - fn index(&self, range: RangeFrom) -> &Self::Output { - &(self.deref())[range] - } -} - -impl Index for ZSlice { - type Output = [u8]; - - fn index(&self, _range: RangeFull) -> &Self::Output { - self - } -} - -impl Index> for ZSlice { - type Output = [u8]; - - fn index(&self, range: RangeInclusive) -> &Self::Output { - &(self.deref())[range] - } -} - -impl Index> for ZSlice { - type Output = [u8]; - - fn index(&self, range: RangeTo) -> &Self::Output { - &(self.deref())[range] - } -} - -impl Index> for ZSlice { - type Output = [u8]; - - fn index(&self, range: RangeToInclusive) -> &Self::Output { - &(self.deref())[range] - } -} - -impl PartialEq for ZSlice { - fn eq(&self, other: &Self) -> bool { - self.as_slice() == other.as_slice() +impl + ?Sized> PartialEq for ZSlice { + fn eq(&self, other: &Rhs) -> bool { + self.as_slice() == other.as_ref() } } @@ -334,10 +290,57 @@ impl Buffer for &mut ZSlice { // SplitBuffer impl SplitBuffer for ZSlice { - type Slices<'a> = option::IntoIter<&'a [u8]>; + type Slices<'a> = iter::Once<&'a [u8]>; fn slices(&self) -> Self::Slices<'_> { - Some(self.as_slice()).into_iter() + iter::once(self.as_slice()) + } +} + +#[derive(Debug)] +pub(crate) struct ZSliceWriter<'a> { + vec: &'a mut Vec, + end: &'a mut usize, +} + +impl Writer for ZSliceWriter<'_> { + fn write(&mut self, bytes: &[u8]) -> Result { + let len = self.vec.write(bytes)?; + *self.end += len.get(); + Ok(len) + } + + fn write_exact(&mut self, bytes: &[u8]) -> Result<(), DidntWrite> { + self.write(bytes).map(|_| ()) + } + + fn remaining(&self) -> usize { + self.vec.remaining() + } + + unsafe fn with_slot(&mut self, len: usize, write: F) -> Result + where + F: FnOnce(&mut [u8]) -> usize, + { + // SAFETY: same precondition as the enclosing function + let len = unsafe { self.vec.with_slot(len, write) }?; + *self.end += len.get(); + Ok(len) + } +} + +impl BacktrackableWriter for ZSliceWriter<'_> { + type Mark = usize; + + fn mark(&mut self) -> Self::Mark { + *self.end + } + + fn rewind(&mut self, mark: Self::Mark) -> bool { + assert!(mark <= self.vec.len()); + self.vec.truncate(mark); + *self.end = mark; + true } } @@ -354,6 +357,7 @@ impl Reader for &mut ZSlice { fn read(&mut self, into: &mut [u8]) -> Result { let mut reader = self.as_slice().reader(); let len = reader.read(into)?; + // we trust `Reader` impl for `&[u8]` to not overflow the size of the slice self.start += len.get(); Ok(len) } @@ -361,6 +365,7 @@ impl Reader for &mut ZSlice { fn read_exact(&mut self, into: &mut [u8]) -> Result<(), DidntRead> { let mut reader = self.as_slice().reader(); reader.read_exact(into)?; + // we trust `Reader` impl for `&[u8]` to not overflow the size of the slice self.start += into.len(); Ok(()) } @@ -368,6 +373,7 @@ impl Reader for &mut ZSlice { fn read_u8(&mut self) -> Result { let mut reader = self.as_slice().reader(); let res = reader.read_u8()?; + // we trust `Reader` impl for `&[u8]` to not overflow the size of the slice self.start += 1; Ok(res) } @@ -379,7 +385,7 @@ impl Reader for &mut ZSlice { } fn read_zslice(&mut self, len: usize) -> Result { - let res = self.subslice(0, len).ok_or(DidntRead)?; + let res = self.subslice(..len).ok_or(DidntRead)?; self.start += len; Ok(res) } @@ -401,6 +407,7 @@ impl BacktrackableReader for &mut ZSlice { } fn rewind(&mut self, mark: Self::Mark) -> bool { + assert!(mark <= self.end); self.start = mark; true } @@ -419,8 +426,8 @@ impl std::io::Read for &mut ZSlice { } } +#[cfg(feature = "test")] impl ZSlice { - #[cfg(feature = "test")] pub fn rand(len: usize) -> Self { use rand::Rng; @@ -439,10 +446,10 @@ mod tests { let mut zslice: ZSlice = buf.clone().into(); assert_eq!(buf.as_slice(), zslice.as_slice()); - let range = zslice.range(); - let mut_slice = zslice.downcast_mut::>().unwrap(); + // SAFETY: buffer slize size is not modified + let mut_slice = unsafe { zslice.downcast_mut::>() }.unwrap(); - mut_slice[range][..buf.len()].clone_from_slice(&buf[..]); + mut_slice[..buf.len()].clone_from_slice(&buf[..]); assert_eq!(buf.as_slice(), zslice.as_slice()); } diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index 94b03b0514..5537ec46fb 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -443,7 +443,7 @@ impl RBatch { self.buffer = self .buffer - .subslice(l.len() + h.len(), self.buffer.len()) + .subslice(l.len() + h.len()..self.buffer.len()) .ok_or_else(|| zerror!("Invalid batch length"))?; Ok(()) diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 1a0935f846..27754b081a 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -1157,7 +1157,7 @@ macro_rules! impl_int { // SAFETY: // - 0 is a valid start index because bs is guaranteed to always have a length greater or equal than 1 // - end is a valid end index because is bounded between 0 and bs.len() - ZBytes::new(unsafe { ZSlice::new_unchecked(Arc::new(bs), 0, end) }) + ZBytes::new(unsafe { ZSlice::new(Arc::new(bs), 0, end).unwrap_unchecked() }) } } @@ -2257,7 +2257,8 @@ impl<'a> Deserialize<&'a mut zshm> for ZSerde { // A ZSliceShmBorrowMut is expected to have only one slice let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_mut::() { + // SAFETY: ShmBufInner cannot change the size of the slice + if let Some(shmb) = unsafe { zs.downcast_mut::() } { return Ok(shmb.into()); } } @@ -2274,7 +2275,8 @@ impl<'a> Deserialize<&'a mut zshmmut> for ZSerde { // A ZSliceShmBorrowMut is expected to have only one slice let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_mut::() { + // SAFETY: ShmBufInner cannot change the size of the slice + if let Some(shmb) = unsafe { zs.downcast_mut::() } { return shmb.try_into().map_err(|_| ZDeserializeError); } } From df357ae7786155103c4befbaf152781240ecf2c6 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 5 Aug 2024 10:30:38 +0200 Subject: [PATCH 570/598] Renaming --- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index c9872413ea..dffdb4e1de 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -208,7 +208,7 @@ fn declare_simple_subscription( } #[inline] -fn client_subs(res: &Arc) -> Vec> { +fn simple_subs(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -222,7 +222,7 @@ fn client_subs(res: &Arc) -> Vec> { } #[inline] -fn remote_client_subs(res: &Arc, face: &Arc) -> bool { +fn remote_simple_subs(res: &Arc, face: &Arc) -> bool { res.session_ctxs .values() .any(|ctx| ctx.face.id != face.id && ctx.subs.is_some()) @@ -260,7 +260,7 @@ fn propagate_forget_simple_subscription( { if !res.context().matches.iter().any(|m| { m.upgrade() - .is_some_and(|m| m.context.is_some() && remote_client_subs(&m, &face)) + .is_some_and(|m| m.context.is_some() && remote_simple_subs(&m, &face)) }) { if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { send_declare( @@ -296,13 +296,13 @@ pub(super) fn undeclare_simple_subscription( get_mut_unchecked(ctx).subs = None; } - let mut client_subs = client_subs(res); - if client_subs.is_empty() { + let mut simple_subs = simple_subs(res); + if simple_subs.is_empty() { propagate_forget_simple_subscription(tables, res, send_declare); } - if client_subs.len() == 1 { - let mut face = &mut client_subs[0]; + if simple_subs.len() == 1 { + let mut face = &mut simple_subs[0]; if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { send_declare( &face.primitives, @@ -329,7 +329,7 @@ pub(super) fn undeclare_simple_subscription( { if !res.context().matches.iter().any(|m| { m.upgrade() - .is_some_and(|m| m.context.is_some() && remote_client_subs(&m, face)) + .is_some_and(|m| m.context.is_some() && remote_simple_subs(&m, face)) }) { if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { send_declare( From b1e4dba77ad1af2991c383ffa3aded3f968455ff Mon Sep 17 00:00:00 2001 From: Yuyuan Yuan Date: Mon, 5 Aug 2024 18:19:08 +0800 Subject: [PATCH 571/598] feat: use the bit manipulation to compute the alignment size (#1206) * feat: use the bit manipulation to compute the alignment size * fixup! feat: use the bit manipulation to compute the alignment size * chore: prove the alignment size calculation * chore: make the notations consistent * chore: typos * chore: reuse the computation * ci: trigger --- commons/zenoh-shm/src/api/provider/types.rs | 61 +++++++++++++++------ 1 file changed, 44 insertions(+), 17 deletions(-) diff --git a/commons/zenoh-shm/src/api/provider/types.rs b/commons/zenoh-shm/src/api/provider/types.rs index bb04dfa5fc..71d3753e26 100644 --- a/commons/zenoh-shm/src/api/provider/types.rs +++ b/commons/zenoh-shm/src/api/provider/types.rs @@ -52,7 +52,7 @@ impl Display for AllocAlignment { impl Default for AllocAlignment { fn default() -> Self { Self { - pow: (std::mem::align_of::() as f64).log2().round() as u8, + pow: std::mem::align_of::().ilog2() as _, } } } @@ -65,9 +65,10 @@ impl AllocAlignment { /// This function will return an error if provided alignment power cannot fit into usize. #[zenoh_macros::unstable_doc] pub const fn new(pow: u8) -> Result { - match pow { - pow if pow < usize::BITS as u8 => Ok(Self { pow }), - _ => Err(ZLayoutError::IncorrectLayoutArgs), + if pow < usize::BITS as u8 { + Ok(Self { pow }) + } else { + Err(ZLayoutError::IncorrectLayoutArgs) } } @@ -92,19 +93,45 @@ impl AllocAlignment { /// ``` #[zenoh_macros::unstable_doc] pub fn align_size(&self, size: NonZeroUsize) -> NonZeroUsize { - let alignment = self.get_alignment_value(); - match size.get() % alignment { - 0 => size, - // SAFETY: - // This unsafe block is always safe: - // 1. 0 < remainder < alignment - // 2. because of 1, the value of (alignment.get() - remainder) is always > 0 - // 3. because of 2, we add nonzero size to nonzero (alignment.get() - remainder) and it is always positive if no overflow - // 4. we make sure that there is no overflow condition in 3 by means of alignment limitation in `new` by limiting pow value - remainder => unsafe { - NonZeroUsize::new_unchecked(size.get() + (alignment.get() - remainder)) - }, - } + // Notations: + // - size to align S + // - usize::BITS B + // - pow P where 0 ≤ P < B + // - alignment value A = 2^P + // - return R = min{x | x ≥ S, x % A = 0} + // + // Example 1: A = 4 = (00100)₂, S = 4 = (00100)₂ ⇒ R = 4 = (00100)₂ + // Example 2: A = 4 = (00100)₂, S = 7 = (00111)₂ ⇒ R = 8 = (01000)₂ + // Example 3: A = 4 = (00100)₂, S = 8 = (01000)₂ ⇒ R = 8 = (01000)₂ + // Example 4: A = 4 = (00100)₂, S = 9 = (01001)₂ ⇒ R = 12 = (01100)₂ + // + // Algorithm: For any x = (bₙ, ⋯, b₂, b₁)₂ in binary representation, + // 1. x % A = 0 ⇔ ∀i < P, bᵢ = 0 + // 2. f(x) ≜ x & !(A-1) leads to ∀i < P, bᵢ = 0, hence f(x) % A = 0 + // (i.e. f zeros all bits before the P-th bit) + // 3. R = min{x | x ≥ S, x % A = 0} is equivalent to find the unique R where S ≤ R < S+A and R % A = 0 + // 4. x-A < f(x) ≤ x ⇒ S-1 < f(S+A-1) ≤ S+A-1 ⇒ S ≤ f(S+A-1) < S+A + // + // Hence R = f(S+A-1) = (S+(A-1)) & !(A-1) is the desired value + + // Compute A - 1 = 2^P - 1 + let a_minus_1 = self.get_alignment_value().get() - 1; + + // Overflow check: ensure S ≤ 2^B - 2^P = (2^B - 1) - (A - 1) + // so that R < S+A ≤ 2^B and hence it's a valid usize + let bound = usize::MAX - a_minus_1; + assert!( + size.get() <= bound, + "The given size {} exceeded the maximum {}", + size.get(), + bound + ); + + // Overflow never occurs due to the check above + let r = (size.get() + a_minus_1) & !a_minus_1; + + // SAFETY: R ≥ 0 since R ≥ S ≥ 0 + unsafe { NonZeroUsize::new_unchecked(r) } } } From b7d42efee8bd043ef6a955d03e2b39c181c51b09 Mon Sep 17 00:00:00 2001 From: "ChenYing Kuo (CY)" Date: Tue, 6 Aug 2024 15:41:43 +0800 Subject: [PATCH 572/598] Use tokio to replace async_std for plugins and backends. (#1264) * Use tokio to replace async_std for plugins and backends. Signed-off-by: ChenYing Kuo * Incorporate the review comments. Signed-off-by: ChenYing Kuo * Support thread number config for tokio runtime. Signed-off-by: ChenYing Kuo --------- Signed-off-by: ChenYing Kuo --- Cargo.lock | 22 ++---- Cargo.toml | 1 - DEFAULT_CONFIG.json5 | 6 ++ commons/zenoh-util/Cargo.toml | 1 - commons/zenoh-util/src/timer.rs | 52 +++++++------ .../zenoh-link-vsock/src/unicast.rs | 2 +- plugins/zenoh-backend-example/Cargo.toml | 2 +- plugins/zenoh-backend-example/src/lib.rs | 2 +- plugins/zenoh-backend-traits/Cargo.toml | 1 - plugins/zenoh-plugin-example/Cargo.toml | 3 +- plugins/zenoh-plugin-example/src/lib.rs | 36 +++++++-- plugins/zenoh-plugin-rest/Cargo.toml | 2 +- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 6 +- plugins/zenoh-plugin-rest/src/config.rs | 14 ++++ plugins/zenoh-plugin-rest/src/lib.rs | 78 +++++++++++++------ .../zenoh-plugin-storage-manager/Cargo.toml | 2 +- .../zenoh-plugin-storage-manager/src/lib.rs | 49 ++++++++---- .../src/memory_backend/mod.rs | 2 +- .../src/replica/align_queryable.rs | 2 +- .../src/replica/aligner.rs | 3 +- .../src/replica/mod.rs | 8 +- .../src/replica/snapshotter.rs | 12 +-- .../src/replica/storage.rs | 3 +- .../src/storages_mgt.rs | 7 +- .../tests/operations.rs | 10 ++- .../tests/wildcard.rs | 14 +--- 26 files changed, 210 insertions(+), 130 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9740344616..dc465ce342 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -274,16 +274,6 @@ dependencies = [ "syn 2.0.52", ] -[[package]] -name = "async-attributes" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" -dependencies = [ - "quote", - "syn 1.0.109", -] - [[package]] name = "async-channel" version = "1.9.0" @@ -439,7 +429,6 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" dependencies = [ - "async-attributes", "async-channel", "async-global-executor", "async-io", @@ -5523,12 +5512,12 @@ dependencies = [ name = "zenoh-backend-example" version = "0.11.0-dev" dependencies = [ - "async-std", "async-trait", "const_format", "futures", "git-version", "serde_json", + "tokio", "tracing", "zenoh", "zenoh-plugin-trait", @@ -5942,11 +5931,12 @@ dependencies = [ name = "zenoh-plugin-example" version = "0.11.0-dev" dependencies = [ - "async-std", "const_format", "futures", "git-version", + "lazy_static", "serde_json", + "tokio", "tracing", "zenoh", "zenoh-plugin-trait", @@ -5958,7 +5948,6 @@ name = "zenoh-plugin-rest" version = "0.11.0-dev" dependencies = [ "anyhow", - "async-std", "base64 0.22.1", "clap", "const_format", @@ -5973,6 +5962,7 @@ dependencies = [ "serde", "serde_json", "tide", + "tokio", "tracing", "zenoh", "zenoh-plugin-trait", @@ -5983,7 +5973,6 @@ name = "zenoh-plugin-storage-manager" version = "0.11.0-dev" dependencies = [ "async-global-executor", - "async-std", "async-trait", "const_format", "crc", @@ -5998,6 +5987,7 @@ dependencies = [ "schemars", "serde", "serde_json", + "tokio", "tracing", "urlencoding", "zenoh", @@ -6146,7 +6136,6 @@ dependencies = [ name = "zenoh-util" version = "0.11.0-dev" dependencies = [ - "async-std", "async-trait", "const_format", "flume", @@ -6171,7 +6160,6 @@ dependencies = [ name = "zenoh_backend_traits" version = "0.11.0-dev" dependencies = [ - "async-std", "async-trait", "const_format", "derive_more", diff --git a/Cargo.toml b/Cargo.toml index 047cf35a02..034d059862 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,7 +81,6 @@ anyhow = { version = "1.0.69", default-features = false } # Default features are async-executor = "1.5.0" async-global-executor = "2.3.1" async-io = "2.3.3" -async-std = { version = "=1.12.0", default-features = false } # Default features are disabled due to some crates' requirements async-trait = "0.1.60" base64 = "0.22.1" bincode = "1.3.3" diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index c4052313d9..893a1930a5 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -493,6 +493,12 @@ // __config__: "./plugins/zenoh-plugin-rest/config.json5", // /// http port to answer to rest requests // http_port: 8000, + // /// The number of worker thread in TOKIO runtime (default: 2) + // /// The configuration only takes effect if running as a dynamic plugin, which can not reuse the current runtime. + // work_thread_num: 0, + // /// The number of blocking thread in TOKIO runtime (default: 50) + // /// The configuration only takes effect if running as a dynamic plugin, which can not reuse the current runtime. + // max_block_thread_num: 50, // }, // // /// Configure the storage manager plugin diff --git a/commons/zenoh-util/Cargo.toml b/commons/zenoh-util/Cargo.toml index df99e01385..38694dc1b5 100644 --- a/commons/zenoh-util/Cargo.toml +++ b/commons/zenoh-util/Cargo.toml @@ -35,7 +35,6 @@ maintenance = { status = "actively-developed" } test = [] [dependencies] -async-std = { workspace = true, features = ["default", "unstable"] } tokio = { workspace = true, features = ["time", "net"] } async-trait = { workspace = true } flume = { workspace = true } diff --git a/commons/zenoh-util/src/timer.rs b/commons/zenoh-util/src/timer.rs index 7fd059b0cf..ab52c0c996 100644 --- a/commons/zenoh-util/src/timer.rs +++ b/commons/zenoh-util/src/timer.rs @@ -21,9 +21,9 @@ use std::{ time::{Duration, Instant}, }; -use async_std::{prelude::*, sync::Mutex, task}; use async_trait::async_trait; use flume::{bounded, Receiver, RecvError, Sender}; +use tokio::{runtime::Handle, select, sync::Mutex, task, time}; use zenoh_core::zconfigurable; zconfigurable! { @@ -120,7 +120,7 @@ async fn timer_task( let mut events = events.lock().await; loop { - // Fuuture for adding new events + // Future for adding new events let new = new_event.recv_async(); match events.peek() { @@ -130,12 +130,17 @@ async fn timer_task( let next = next.clone(); let now = Instant::now(); if next.when > now { - task::sleep(next.when - now).await; + time::sleep(next.when - now).await; } Ok((false, next)) }; - match new.race(wait).await { + let result = select! { + result = wait => { result }, + result = new => { result }, + }; + + match result { Ok((is_new, mut ev)) => { if is_new { // A new event has just been added: push it onto the heap @@ -204,14 +209,14 @@ impl Timer { // Start the timer task let c_e = timer.events.clone(); let fut = async move { - let _ = sl_receiver - .recv_async() - .race(timer_task(c_e, ev_receiver)) - .await; + select! { + _ = sl_receiver.recv_async() => {}, + _ = timer_task(c_e, ev_receiver) => {}, + }; tracing::trace!("A - Timer task no longer running..."); }; if spawn_blocking { - task::spawn_blocking(|| task::block_on(fut)); + task::spawn_blocking(|| Handle::current().block_on(fut)); } else { task::spawn(fut); } @@ -234,14 +239,14 @@ impl Timer { // Start the timer task let c_e = self.events.clone(); let fut = async move { - let _ = sl_receiver - .recv_async() - .race(timer_task(c_e, ev_receiver)) - .await; + select! { + _ = sl_receiver.recv_async() => {}, + _ = timer_task(c_e, ev_receiver) => {}, + }; tracing::trace!("A - Timer task no longer running..."); }; if spawn_blocking { - task::spawn_blocking(|| task::block_on(fut)); + task::spawn_blocking(|| Handle::current().block_on(fut)); } else { task::spawn(fut); } @@ -307,8 +312,8 @@ mod tests { time::{Duration, Instant}, }; - use async_std::task; use async_trait::async_trait; + use tokio::{runtime::Runtime, time}; use super::{Timed, TimedEvent, Timer}; @@ -349,7 +354,7 @@ mod tests { timer.add_async(event).await; // Wait for the event to occur - task::sleep(3 * interval).await; + time::sleep(3 * interval).await; // Load and reset the counter value let value = counter.swap(0, Ordering::SeqCst); @@ -368,7 +373,7 @@ mod tests { handle.defuse(); // Wait for the event to occur - task::sleep(3 * interval).await; + time::sleep(3 * interval).await; // Load and reset the counter value let value = counter.swap(0, Ordering::SeqCst); @@ -390,7 +395,7 @@ mod tests { timer.add_async(event).await; // Wait for the events to occur - task::sleep(to_elapse + interval).await; + time::sleep(to_elapse + interval).await; // Load and reset the counter value let value = counter.swap(0, Ordering::SeqCst); @@ -401,7 +406,7 @@ mod tests { handle.defuse(); // Wait a bit more to verify that not more events have been fired - task::sleep(to_elapse).await; + time::sleep(to_elapse).await; // Load and reset the counter value let value = counter.swap(0, Ordering::SeqCst); @@ -416,7 +421,7 @@ mod tests { timer.add_async(event).await; // Wait for the events to occur - task::sleep(to_elapse + interval).await; + time::sleep(to_elapse + interval).await; // Load and reset the counter value let value = counter.swap(0, Ordering::SeqCst); @@ -426,7 +431,7 @@ mod tests { timer.stop_async().await; // Wait some time - task::sleep(to_elapse).await; + time::sleep(to_elapse).await; // Load and reset the counter value let value = counter.swap(0, Ordering::SeqCst); @@ -436,13 +441,14 @@ mod tests { timer.start_async(false).await; // Wait for the events to occur - task::sleep(to_elapse).await; + time::sleep(to_elapse).await; // Load and reset the counter value let value = counter.swap(0, Ordering::SeqCst); assert_eq!(value, amount); } - task::block_on(run()); + let rt = Runtime::new().unwrap(); + rt.block_on(run()); } } diff --git a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs index 6bbd627537..e7b261f292 100644 --- a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs @@ -83,7 +83,7 @@ pub fn get_vsock_addr(address: Address<'_>) -> ZResult { } pub struct LinkUnicastVsock { - // The underlying socket as returned from the async-std library + // The underlying socket as returned from the tokio library socket: UnsafeCell, // The source socket address of this link (address used on the local host) src_addr: VsockAddr, diff --git a/plugins/zenoh-backend-example/Cargo.toml b/plugins/zenoh-backend-example/Cargo.toml index df505bd211..9f548e1187 100644 --- a/plugins/zenoh-backend-example/Cargo.toml +++ b/plugins/zenoh-backend-example/Cargo.toml @@ -29,11 +29,11 @@ name = "zenoh_backend_example" crate-type = ["cdylib"] [dependencies] -async-std = { workspace = true, features = ["default"] } const_format = { workspace = true } futures = { workspace = true } git-version = { workspace = true } tracing = { workspace = true } +tokio = { workspace = true } serde_json = { workspace = true } zenoh = { workspace = true, features = ["default"] } zenoh-plugin-trait = { workspace = true } diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index bd64fd5024..b9e670b799 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -13,8 +13,8 @@ // use std::collections::{hash_map::Entry, HashMap}; -use async_std::sync::RwLock; use async_trait::async_trait; +use tokio::sync::RwLock; use zenoh::{internal::Value, key_expr::OwnedKeyExpr, prelude::*, time::Timestamp}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, diff --git a/plugins/zenoh-backend-traits/Cargo.toml b/plugins/zenoh-backend-traits/Cargo.toml index 1a574dd118..766f52d609 100644 --- a/plugins/zenoh-backend-traits/Cargo.toml +++ b/plugins/zenoh-backend-traits/Cargo.toml @@ -27,7 +27,6 @@ description = "Zenoh: traits to be implemented by backends libraries" maintenance = { status = "actively-developed" } [dependencies] -async-std = { workspace = true, features = ["default"] } async-trait = { workspace = true } derive_more = { workspace = true } serde_json = { workspace = true } diff --git a/plugins/zenoh-plugin-example/Cargo.toml b/plugins/zenoh-plugin-example/Cargo.toml index bc52ee5fb2..5341adcf8c 100644 --- a/plugins/zenoh-plugin-example/Cargo.toml +++ b/plugins/zenoh-plugin-example/Cargo.toml @@ -34,12 +34,13 @@ name = "zenoh_plugin_example" crate-type = ["cdylib"] [dependencies] -async-std = { workspace = true, features = ["default"] } const_format = { workspace = true } zenoh-util = { workspace = true } futures = { workspace = true } +lazy_static = { workspace = true } git-version = { workspace = true } tracing = { workspace = true } +tokio = { workspace = true } serde_json = { workspace = true } zenoh = { workspace = true, features = [ "default", diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index cbd84fb766..b7c494946d 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -17,6 +17,7 @@ use std::{ borrow::Cow, collections::HashMap, convert::TryFrom, + future::Future, sync::{ atomic::{AtomicBool, Ordering::Relaxed}, Arc, Mutex, @@ -39,6 +40,32 @@ use zenoh::{ }; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; +const WORKER_THREAD_NUM: usize = 2; +const MAX_BLOCK_THREAD_NUM: usize = 50; +lazy_static::lazy_static! { + // The global runtime is used in the dynamic plugins, which we can't get the current runtime + static ref TOKIO_RUNTIME: tokio::runtime::Runtime = tokio::runtime::Builder::new_multi_thread() + .worker_threads(WORKER_THREAD_NUM) + .max_blocking_threads(MAX_BLOCK_THREAD_NUM) + .enable_all() + .build() + .expect("Unable to create runtime"); +} +#[inline(always)] +fn spawn_runtime(task: impl Future + Send + 'static) { + // Check whether able to get the current runtime + match tokio::runtime::Handle::try_current() { + Ok(rt) => { + // Able to get the current runtime (standalone binary), spawn on the current runtime + rt.spawn(task); + } + Err(_) => { + // Unable to get the current runtime (dynamic plugins), spawn on the global runtime + TOKIO_RUNTIME.spawn(task); + } + } +} + // The struct implementing the ZenohPlugin and ZenohPlugin traits pub struct ExamplePlugin {} @@ -78,8 +105,7 @@ impl Plugin for ExamplePlugin { // a flag to end the plugin's loop when the plugin is removed from the config let flag = Arc::new(AtomicBool::new(true)); - // spawn the task running the plugin's loop - async_std::task::spawn(run(runtime.clone(), selector, flag.clone())); + spawn_runtime(run(runtime.clone(), selector, flag.clone())); // return a RunningPlugin to zenohd Ok(Box::new(RunningPlugin(Arc::new(Mutex::new( RunningPluginInner { @@ -122,11 +148,7 @@ impl RunningPluginTrait for RunningPlugin { match KeyExpr::try_from(selector.clone()) { Err(e) => tracing::error!("{}", e), Ok(selector) => { - async_std::task::spawn(run( - guard.runtime.clone(), - selector, - guard.flag.clone(), - )); + spawn_runtime(run(guard.runtime.clone(), selector, guard.flag.clone())); } } return Ok(None); diff --git a/plugins/zenoh-plugin-rest/Cargo.toml b/plugins/zenoh-plugin-rest/Cargo.toml index 5f36b5bf34..d9a53e9f43 100644 --- a/plugins/zenoh-plugin-rest/Cargo.toml +++ b/plugins/zenoh-plugin-rest/Cargo.toml @@ -33,7 +33,6 @@ crate-type = ["cdylib", "rlib"] [dependencies] anyhow = { workspace = true, features = ["default"] } -async-std = { workspace = true, features = ["default", "attributes"] } base64 = { workspace = true } const_format = { workspace = true } flume = { workspace = true } @@ -46,6 +45,7 @@ schemars = { workspace = true } serde = { workspace = true, features = ["default"] } serde_json = { workspace = true } tide = { workspace = true } +tokio = { workspace = true } zenoh = { workspace = true, features = [ "plugins", "default", diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index e3fae4d285..aefdfd4f86 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -34,7 +34,7 @@ if(typeof(EventSource) !== "undefined") { } "#; -#[async_std::main] +#[tokio::main] async fn main() { // initiate logging zenoh::try_init_log_from_env(); @@ -49,7 +49,7 @@ async fn main() { println!("Declaring Queryable on '{key}'..."); let queryable = session.declare_queryable(key).await.unwrap(); - async_std::task::spawn({ + tokio::task::spawn({ let receiver = queryable.handler().clone(); async move { while let Ok(request) = receiver.recv_async().await { @@ -75,7 +75,7 @@ async fn main() { println!("Data updates are accessible through HTML5 SSE at http://:8000/{key}"); loop { publisher.put(value).await.unwrap(); - async_std::task::sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(1)).await; } } diff --git a/plugins/zenoh-plugin-rest/src/config.rs b/plugins/zenoh-plugin-rest/src/config.rs index a785eec094..d215b8a5a7 100644 --- a/plugins/zenoh-plugin-rest/src/config.rs +++ b/plugins/zenoh-plugin-rest/src/config.rs @@ -21,12 +21,18 @@ use serde::{ }; const DEFAULT_HTTP_INTERFACE: &str = "[::]"; +pub const DEFAULT_WORK_THREAD_NUM: usize = 2; +pub const DEFAULT_MAX_BLOCK_THREAD_NUM: usize = 50; #[derive(JsonSchema, Deserialize, serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] pub struct Config { #[serde(deserialize_with = "deserialize_http_port")] pub http_port: String, + #[serde(default = "default_work_thread_num")] + pub work_thread_num: usize, + #[serde(default = "default_max_block_thread_num")] + pub max_block_thread_num: usize, #[serde(default, deserialize_with = "deserialize_path")] __path__: Option>, __required__: Option, @@ -47,6 +53,14 @@ where deserializer.deserialize_any(HttpPortVisitor) } +fn default_work_thread_num() -> usize { + DEFAULT_WORK_THREAD_NUM +} + +fn default_max_block_thread_num() -> usize { + DEFAULT_MAX_BLOCK_THREAD_NUM +} + struct HttpPortVisitor; impl<'de> Visitor<'de> for HttpPortVisitor { diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 8affec9067..eb65a991d6 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -17,14 +17,24 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use std::{borrow::Cow, convert::TryFrom, str::FromStr, sync::Arc}; +use std::{ + borrow::Cow, + convert::TryFrom, + future::Future, + str::FromStr, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; -use async_std::prelude::FutureExt; use base64::Engine; use futures::StreamExt; use http_types::Method; use serde::{Deserialize, Serialize}; use tide::{http::Mime, sse::Sender, Request, Response, Server, StatusCode}; +use tokio::time::timeout; use zenoh::{ bytes::{Encoding, ZBytes}, internal::{ @@ -51,6 +61,32 @@ lazy_static::lazy_static! { } const RAW_KEY: &str = "_raw"; +lazy_static::lazy_static! { + static ref WORKER_THREAD_NUM: AtomicUsize = AtomicUsize::new(config::DEFAULT_WORK_THREAD_NUM); + static ref MAX_BLOCK_THREAD_NUM: AtomicUsize = AtomicUsize::new(config::DEFAULT_MAX_BLOCK_THREAD_NUM); + // The global runtime is used in the dynamic plugins, which we can't get the current runtime + static ref TOKIO_RUNTIME: tokio::runtime::Runtime = tokio::runtime::Builder::new_multi_thread() + .worker_threads(WORKER_THREAD_NUM.load(Ordering::SeqCst)) + .max_blocking_threads(MAX_BLOCK_THREAD_NUM.load(Ordering::SeqCst)) + .enable_all() + .build() + .expect("Unable to create runtime"); +} +#[inline(always)] +pub(crate) fn blockon_runtime(task: F) -> F::Output { + // Check whether able to get the current runtime + match tokio::runtime::Handle::try_current() { + Ok(rt) => { + // Able to get the current runtime (standalone binary), use the current runtime + tokio::task::block_in_place(|| rt.block_on(task)) + } + Err(_) => { + // Unable to get the current runtime (dynamic plugins), reuse the global runtime + tokio::task::block_in_place(|| TOKIO_RUNTIME.block_on(task)) + } + } +} + #[derive(Serialize, Deserialize)] struct JSONSample { key: String, @@ -246,8 +282,14 @@ impl Plugin for RestPlugin { let conf: Config = serde_json::from_value(plugin_conf.clone()) .map_err(|e| zerror!("Plugin `{}` configuration error: {}", name, e))?; - let task = async_std::task::spawn(run(runtime.clone(), conf.clone())); - let task = async_std::task::block_on(task.timeout(std::time::Duration::from_millis(1))); + WORKER_THREAD_NUM.store(conf.work_thread_num, Ordering::SeqCst); + MAX_BLOCK_THREAD_NUM.store(conf.max_block_thread_num, Ordering::SeqCst); + + let task = run(runtime.clone(), conf.clone()); + let task = blockon_runtime(async { + timeout(Duration::from_millis(1), TOKIO_RUNTIME.spawn(task)).await + }); + if let Ok(Err(e)) = task { bail!("REST server failed within 1ms: {e}") } @@ -332,12 +374,8 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result, String)>) -> tide::Result {} Ok(Err(e)) => { - tracing::debug!( - "SSE error ({})! Unsubscribe and terminate (task {})", - e, - async_std::task::current().id() - ); + tracing::debug!("SSE error ({})! Unsubscribe and terminate", e); if let Err(e) = sub.undeclare().await { tracing::error!("Error undeclaring subscriber: {}", e); } break; } Err(_) => { - tracing::debug!( - "SSE timeout! Unsubscribe and terminate (task {})", - async_std::task::current().id() - ); + tracing::debug!("SSE timeout! Unsubscribe and terminate",); if let Err(e) = sub.undeclare().await { tracing::error!("Error undeclaring subscriber: {}", e); } diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index fa7650fcc2..08367f75c3 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -32,7 +32,6 @@ name = "zenoh_plugin_storage_manager" crate-type = ["cdylib", "rlib"] [dependencies] -async-std = { workspace = true, features = ["default"] } async-trait = { workspace = true } crc = { workspace = true } const_format = { workspace = true } @@ -45,6 +44,7 @@ libloading = { workspace = true } tracing = { workspace = true } serde = { workspace = true, features = ["default"] } serde_json = { workspace = true } +tokio = { workspace = true } urlencoding = { workspace = true } zenoh = { workspace = true, features = [ "default", diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 4043665c5d..fb578b198d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -25,7 +25,6 @@ use std::{ sync::{Arc, Mutex}, }; -use async_std::task; use flume::Sender; use memory_backend::MemoryBackend; use storages_mgt::StorageMessage; @@ -56,6 +55,18 @@ mod memory_backend; mod replica; mod storages_mgt; +const WORKER_THREAD_NUM: usize = 2; +const MAX_BLOCK_THREAD_NUM: usize = 50; +lazy_static::lazy_static! { + // The global runtime is used in the zenohd case, which we can't get the current runtime + static ref TOKIO_RUNTIME: tokio::runtime::Runtime = tokio::runtime::Builder::new_multi_thread() + .worker_threads(WORKER_THREAD_NUM) + .max_blocking_threads(MAX_BLOCK_THREAD_NUM) + .enable_all() + .build() + .expect("Unable to create runtime"); +} + #[cfg(feature = "dynamic_plugin")] zenoh_plugin_trait::declare_plugin!(StoragesPlugin); @@ -194,11 +205,13 @@ impl StorageRuntimeInner { let name = name.as_ref(); tracing::info!("Killing volume '{}'", name); if let Some(storages) = self.storages.remove(name) { - async_std::task::block_on(futures::future::join_all( - storages - .into_values() - .map(|s| async move { s.send(StorageMessage::Stop) }), - )); + tokio::task::block_in_place(|| { + TOKIO_RUNTIME.block_on(futures::future::join_all( + storages + .into_values() + .map(|s| async move { s.send(StorageMessage::Stop) }), + )) + }); } self.plugins_manager .started_plugin_mut(name) @@ -266,12 +279,14 @@ impl StorageRuntimeInner { volume_id, backend.name() ); - let stopper = async_std::task::block_on(create_and_start_storage( - admin_key, - storage.clone(), - backend.instance(), - self.session.clone(), - ))?; + let stopper = tokio::task::block_in_place(|| { + TOKIO_RUNTIME.block_on(create_and_start_storage( + admin_key, + storage.clone(), + backend.instance(), + self.session.clone(), + )) + })?; self.storages .entry(volume_id) .or_default() @@ -359,10 +374,12 @@ impl RunningPluginTrait for StorageRuntime { for (storage, handle) in storages { with_extended_string(key, &[storage], |key| { if keyexpr::new(key.as_str()).unwrap().intersects(key_expr) { - if let Ok(value) = task::block_on(async { - let (tx, rx) = async_std::channel::bounded(1); - let _ = handle.send(StorageMessage::GetStatus(tx)); - rx.recv().await + if let Some(value) = tokio::task::block_in_place(|| { + TOKIO_RUNTIME.block_on(async { + let (tx, mut rx) = tokio::sync::mpsc::channel(1); + let _ = handle.send(StorageMessage::GetStatus(tx)); + rx.recv().await + }) }) { responses.push(Response::new(key.clone(), value)) } diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index 7c74d9f7f9..b056cf7faf 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -13,8 +13,8 @@ // use std::{collections::HashMap, sync::Arc}; -use async_std::sync::RwLock; use async_trait::async_trait; +use tokio::sync::RwLock; use zenoh::{internal::Value, key_expr::OwnedKeyExpr, time::Timestamp, Result as ZResult}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index c11a632e41..737ce79144 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -17,9 +17,9 @@ use std::{ collections::{BTreeSet, HashMap, HashSet}, str, str::FromStr, + sync::Arc, }; -use async_std::sync::Arc; use zenoh::{ internal::Value, key_expr::OwnedKeyExpr, prelude::*, query::Parameters, sample::Sample, time::Timestamp, Session, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 7992053a67..952a72f499 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -16,10 +16,11 @@ use std::{ borrow::Cow, collections::{HashMap, HashSet}, str, + sync::Arc, }; -use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; +use tokio::sync::RwLock; use zenoh::{ internal::Value, key_expr::{KeyExpr, OwnedKeyExpr}, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 930b4511a2..ecb8815153 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -17,15 +17,13 @@ use std::{ collections::{HashMap, HashSet}, str, + sync::Arc, time::{Duration, SystemTime}, }; -use async_std::{ - stream::{interval, StreamExt}, - sync::{Arc, RwLock}, -}; use flume::{Receiver, Sender}; use futures::{pin_mut, select, FutureExt}; +use tokio::{sync::RwLock, time::interval}; use zenoh::{key_expr::keyexpr, prelude::*}; use zenoh_backend_traits::config::{ReplicaConfig, StorageConfig}; @@ -277,7 +275,7 @@ impl Replica { // time it takes to publish. let mut interval = interval(self.replica_config.publication_interval); loop { - let _ = interval.next().await; + let _ = interval.tick().await; let digest = snapshotter.get_digest().await; let digest = digest.compress(); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs index 190cf6005b..3f00648597 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs @@ -14,16 +14,16 @@ use std::{ collections::{HashMap, HashSet}, convert::TryFrom, + sync::Arc, time::Duration, }; -use async_std::{ - stream::{interval, StreamExt}, - sync::{Arc, RwLock}, - task::sleep, -}; use flume::Receiver; use futures::join; +use tokio::{ + sync::RwLock, + time::{interval, sleep}, +}; use zenoh::{key_expr::OwnedKeyExpr, time::Timestamp, Session}; use zenoh_backend_traits::config::ReplicaConfig; @@ -126,7 +126,7 @@ impl Snapshotter { let mut interval = interval(self.replica_config.delta); loop { - let _ = interval.next().await; + let _ = interval.tick().await; let mut last_snapshot_time = self.content.last_snapshot_time.write().await; let mut last_interval = self.content.last_interval.write().await; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index d12b51042c..d3e34f064c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -14,13 +14,14 @@ use std::{ collections::{HashMap, HashSet}, str::{self, FromStr}, + sync::Arc, time::{SystemTime, UNIX_EPOCH}, }; -use async_std::sync::{Arc, Mutex, RwLock}; use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; +use tokio::sync::{Mutex, RwLock}; use zenoh::{ bytes::EncodingBuilderTrait, internal::{ diff --git a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs index 4ca39cb093..fcc8425545 100644 --- a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs @@ -11,7 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::sync::Arc; +use std::sync::Arc; + use zenoh::{session::Session, Result as ZResult}; use zenoh_backend_traits::config::StorageConfig; @@ -19,7 +20,7 @@ pub use super::replica::{Replica, StorageService}; pub enum StorageMessage { Stop, - GetStatus(async_std::channel::Sender), + GetStatus(tokio::sync::mpsc::Sender), } pub(crate) async fn start_storage( @@ -38,7 +39,7 @@ pub(crate) async fn start_storage( let (tx, rx) = flume::bounded(1); - async_std::task::spawn(async move { + tokio::task::spawn(async move { // If a configuration for replica is present, we initialize a replica, else only a storage service // A replica contains a storage service and all metadata required for anti-entropy if config.replica_config.is_some() { diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index d8ada83e4c..483b87e223 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -18,7 +18,7 @@ use std::{borrow::Cow, str::FromStr, thread::sleep}; -use async_std::task; +use tokio::runtime::Runtime; use zenoh::{ internal::zasync_executor_init, prelude::*, query::Reply, sample::Sample, time::Timestamp, Config, Session, @@ -51,9 +51,10 @@ async fn get_data(session: &Session, key_expr: &str) -> Vec { } async fn test_updates_in_order() { - task::block_on(async { + async { zasync_executor_init!(); - }); + } + .await; let mut config = Config::default(); config .insert_json5( @@ -148,5 +149,6 @@ async fn test_updates_in_order() { #[test] fn updates_test() { - task::block_on(async { test_updates_in_order().await }); + let rt = Runtime::new().unwrap(); + rt.block_on(async { test_updates_in_order().await }); } diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index d1633a28d4..6a6e36b2fd 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -19,7 +19,7 @@ use std::{borrow::Cow, str::FromStr, thread::sleep}; // use std::collections::HashMap; -use async_std::task; +use tokio::runtime::Runtime; use zenoh::{ internal::zasync_executor_init, prelude::*, query::Reply, sample::Sample, time::Timestamp, Config, Session, @@ -52,9 +52,7 @@ async fn get_data(session: &Session, key_expr: &str) -> Vec { } async fn test_wild_card_in_order() { - task::block_on(async { - zasync_executor_init!(); - }); + zasync_executor_init!(); let mut config = Config::default(); config .insert_json5( @@ -189,12 +187,8 @@ async fn test_wild_card_in_order() { drop(storage); } -// fn test_wild_card_out_of_order() { -// assert_eq!(true, true); -// } - #[test] fn wildcard_test() { - task::block_on(async { test_wild_card_in_order().await }); - // task::block_on(async { test_wild_card_out_of_order() }); + let rt = Runtime::new().unwrap(); + rt.block_on(async { test_wild_card_in_order().await }); } From 2d5ab7c48c47c4d957d653b14fc9140549721619 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 6 Aug 2024 10:11:13 +0200 Subject: [PATCH 573/598] Fix routing bugs (#1296) * Fix bug leading to dupplicates * Fix bug leading to duplicate queries * Avoid sending subscribers back to it's source for failover brokering * Fix failover brokering bug reacting to linkstate changes for queries and tokens --- zenoh/src/net/routing/hat/client/pubsub.rs | 70 +++++---- zenoh/src/net/routing/hat/client/queries.rs | 16 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 96 ++++++------ zenoh/src/net/routing/hat/p2p_peer/queries.rs | 48 +++--- zenoh/src/net/routing/hat/router/pubsub.rs | 6 +- zenoh/src/net/routing/hat/router/queries.rs | 147 ++++++++---------- zenoh/src/net/routing/hat/router/token.rs | 134 ++++++++-------- 7 files changed, 256 insertions(+), 261 deletions(-) diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 9d7760247e..4edc9c98e6 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -356,47 +356,49 @@ impl HatPubSubTrait for HatCode { } }; - for face in tables - .faces - .values() - .filter(|f| f.whatami != WhatAmI::Client) - { - if face.local_interests.values().any(|interest| { - interest.finalized - && interest.options.subscribers() - && interest - .res - .as_ref() - .map(|res| { - KeyExpr::try_from(res.expr()) - .and_then(|intres| { - KeyExpr::try_from(expr.full_expr()) - .map(|putres| intres.includes(&putres)) - }) - .unwrap_or(false) - }) - .unwrap_or(true) - }) { - if face_hat!(face).remote_subs.values().any(|sub| { - KeyExpr::try_from(sub.expr()) - .and_then(|subres| { - KeyExpr::try_from(expr.full_expr()) - .map(|putres| subres.intersects(&putres)) - }) - .unwrap_or(false) + if source_type == WhatAmI::Client { + for face in tables + .faces + .values() + .filter(|f| f.whatami != WhatAmI::Client) + { + if face.local_interests.values().any(|interest| { + interest.finalized + && interest.options.subscribers() + && interest + .res + .as_ref() + .map(|res| { + KeyExpr::try_from(res.expr()) + .and_then(|intres| { + KeyExpr::try_from(expr.full_expr()) + .map(|putres| intres.includes(&putres)) + }) + .unwrap_or(false) + }) + .unwrap_or(true) }) { + if face_hat!(face).remote_subs.values().any(|sub| { + KeyExpr::try_from(sub.expr()) + .and_then(|subres| { + KeyExpr::try_from(expr.full_expr()) + .map(|putres| subres.intersects(&putres)) + }) + .unwrap_or(false) + }) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.insert( + face.id, + (face.clone(), key_expr.to_owned(), NodeId::default()), + ); + } + } else { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); route.insert( face.id, (face.clone(), key_expr.to_owned(), NodeId::default()), ); } - } else { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); - route.insert( - face.id, - (face.clone(), key_expr.to_owned(), NodeId::default()), - ); } } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 8ef3ec1fb7..7658a509da 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -349,13 +349,15 @@ impl HatQueriesTrait for HatCode { } }; - if let Some(face) = tables.faces.values().find(|f| f.whatami != WhatAmI::Client) { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); - route.push(QueryTargetQabl { - direction: (face.clone(), key_expr.to_owned(), NodeId::default()), - complete: 0, - distance: f64::MAX, - }); + if source_type == WhatAmI::Client { + if let Some(face) = tables.faces.values().find(|f| f.whatami != WhatAmI::Client) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: (face.clone(), key_expr.to_owned(), NodeId::default()), + complete: 0, + distance: f64::MAX, + }); + } } let res = Resource::get_resource(expr.prefix, expr.suffix); diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index dffdb4e1de..12a1e67186 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -604,62 +604,64 @@ impl HatPubSubTrait for HatCode { } }; - for face in tables - .faces - .values() - .filter(|f| f.whatami == WhatAmI::Router) - { - if face.local_interests.values().any(|interest| { - interest.finalized - && interest.options.subscribers() - && interest - .res - .as_ref() - .map(|res| { - KeyExpr::try_from(res.expr()) - .and_then(|intres| { - KeyExpr::try_from(expr.full_expr()) - .map(|putres| intres.includes(&putres)) - }) - .unwrap_or(false) - }) - .unwrap_or(true) - }) { - if face_hat!(face).remote_subs.values().any(|sub| { - KeyExpr::try_from(sub.expr()) - .and_then(|subres| { - KeyExpr::try_from(expr.full_expr()) - .map(|putres| subres.intersects(&putres)) - }) - .unwrap_or(false) + if source_type == WhatAmI::Client { + for face in tables + .faces + .values() + .filter(|f| f.whatami == WhatAmI::Router) + { + if face.local_interests.values().any(|interest| { + interest.finalized + && interest.options.subscribers() + && interest + .res + .as_ref() + .map(|res| { + KeyExpr::try_from(res.expr()) + .and_then(|intres| { + KeyExpr::try_from(expr.full_expr()) + .map(|putres| intres.includes(&putres)) + }) + .unwrap_or(false) + }) + .unwrap_or(true) }) { + if face_hat!(face).remote_subs.values().any(|sub| { + KeyExpr::try_from(sub.expr()) + .and_then(|subres| { + KeyExpr::try_from(expr.full_expr()) + .map(|putres| subres.intersects(&putres)) + }) + .unwrap_or(false) + }) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.insert( + face.id, + (face.clone(), key_expr.to_owned(), NodeId::default()), + ); + } + } else { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); route.insert( face.id, (face.clone(), key_expr.to_owned(), NodeId::default()), ); } - } else { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); - route.insert( - face.id, - (face.clone(), key_expr.to_owned(), NodeId::default()), - ); } - } - for face in tables.faces.values().filter(|f| { - f.whatami == WhatAmI::Peer - && !f - .local_interests - .get(&0) - .map(|i| i.finalized) - .unwrap_or(true) - }) { - route.entry(face.id).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); - (face.clone(), key_expr.to_owned(), NodeId::default()) - }); + for face in tables.faces.values().filter(|f| { + f.whatami == WhatAmI::Peer + && !f + .local_interests + .get(&0) + .map(|i| i.finalized) + .unwrap_or(true) + }) { + route.entry(face.id).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + (face.clone(), key_expr.to_owned(), NodeId::default()) + }); + } } let res = Resource::get_resource(expr.prefix, expr.suffix); diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 166f63b301..87b6372dae 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -589,30 +589,32 @@ impl HatQueriesTrait for HatCode { } }; - // TODO: BNestMatching: What if there is a local compete ? - if let Some(face) = tables.faces.values().find(|f| f.whatami == WhatAmI::Router) { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); - route.push(QueryTargetQabl { - direction: (face.clone(), key_expr.to_owned(), NodeId::default()), - complete: 0, - distance: f64::MAX, - }); - } + if source_type == WhatAmI::Client { + // TODO: BNestMatching: What if there is a local compete ? + if let Some(face) = tables.faces.values().find(|f| f.whatami == WhatAmI::Router) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: (face.clone(), key_expr.to_owned(), NodeId::default()), + complete: 0, + distance: f64::MAX, + }); + } - for face in tables.faces.values().filter(|f| { - f.whatami == WhatAmI::Peer - && !f - .local_interests - .get(&0) - .map(|i| i.finalized) - .unwrap_or(true) - }) { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); - route.push(QueryTargetQabl { - direction: (face.clone(), key_expr.to_owned(), NodeId::default()), - complete: 0, - distance: 0.5, - }); + for face in tables.faces.values().filter(|f| { + f.whatami == WhatAmI::Peer + && !f + .local_interests + .get(&0) + .map(|i| i.finalized) + .unwrap_or(true) + }) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: (face.clone(), key_expr.to_owned(), NodeId::default()), + complete: 0, + distance: 0.5, + }); + } } let res = Resource::get_resource(expr.prefix, expr.suffix); diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index eaaf4ff921..506c85888c 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -855,7 +855,7 @@ pub(super) fn pubsub_linkstate_change( && !client_subs && !res.session_ctxs.values().any(|ctx| { ctx.face.whatami == WhatAmI::Peer - && src_face.zid != ctx.face.zid + && src_face.id != ctx.face.id && HatTables::failover_brokering_to(links, ctx.face.zid) }) }) @@ -884,7 +884,9 @@ pub(super) fn pubsub_linkstate_change( } for dst_face in tables.faces.values_mut() { - if HatTables::failover_brokering_to(links, dst_face.zid) { + if src_face.id != dst_face.id + && HatTables::failover_brokering_to(links, dst_face.zid) + { for res in face_hat!(src_face).remote_subs.values() { if !face_hat!(dst_face).local_subs.contains_key(res) { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 4703625fff..d706435179 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -946,86 +946,77 @@ pub(super) fn queries_linkstate_change( links: &[ZenohIdProto], send_declare: &mut SendDeclare, ) { - if let Some(src_face) = tables.get_face(zid) { + if let Some(mut src_face) = tables.get_face(zid).cloned() { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { - for res in face_hat!(src_face).remote_qabls.values() { - let client_qabls = res - .session_ctxs - .values() - .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.qabl.is_some()); - if !remote_router_qabls(tables, res) && !client_qabls { - for ctx in get_mut_unchecked(&mut res.clone()) + let to_forget = face_hat!(src_face) + .local_qabls + .keys() + .filter(|res| { + let client_qabls = res .session_ctxs - .values_mut() - { - let dst_face = &mut get_mut_unchecked(ctx).face; - if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if let Some((id, _)) = face_hat!(dst_face).local_qabls.get(res).cloned() - { - let forget = !HatTables::failover_brokering_to(links, dst_face.zid) - && { - let ctx_links = hat!(tables) - .linkstatepeers_net - .as_ref() - .map(|net| net.get_links(dst_face.zid)) - .unwrap_or_else(|| &[]); - res.session_ctxs.values().any(|ctx2| { - ctx2.face.whatami == WhatAmI::Peer - && ctx2.qabl.is_some() - && HatTables::failover_brokering_to( - ctx_links, - ctx2.face.zid, - ) - }) - }; - if forget { - send_declare( - &dst_face.primitives, - RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable( - UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }, - ), - }, - res.expr(), - ), - ); - - face_hat_mut!(dst_face).local_qabls.remove(res); - } - } else if HatTables::failover_brokering_to(links, ctx.face.zid) { - let dst_face = &mut get_mut_unchecked(ctx).face; - let info = local_qabl_info(tables, res, dst_face); - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face) - .local_qabls - .insert(res.clone(), (id, info)); - let key_expr = Resource::decl_key(res, dst_face); - send_declare( - &dst_face.primitives, - RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - ), - ); - } + .values() + .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.qabl.is_some()); + !remote_router_qabls(tables, res) + && !client_qabls + && !res.session_ctxs.values().any(|ctx| { + ctx.face.whatami == WhatAmI::Peer + && src_face.id != ctx.face.id + && HatTables::failover_brokering_to(links, ctx.face.zid) + }) + }) + .cloned() + .collect::>>(); + for res in to_forget { + if let Some((id, _)) = face_hat_mut!(&mut src_face).local_qabls.remove(&res) { + let wire_expr = Resource::get_best_key(&res, "", src_face.id); + send_declare( + &src_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); + } + } + + for mut dst_face in tables.faces.values().cloned() { + if src_face.id != dst_face.id + && HatTables::failover_brokering_to(links, dst_face.zid) + { + for res in face_hat!(src_face).remote_qabls.values() { + if !face_hat!(dst_face).local_qabls.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + let info = local_qabl_info(tables, res, &dst_face); + face_hat_mut!(&mut dst_face) + .local_qabls + .insert(res.clone(), (id, info)); + let key_expr = Resource::decl_key(res, &mut dst_face); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); } } } diff --git a/zenoh/src/net/routing/hat/router/token.rs b/zenoh/src/net/routing/hat/router/token.rs index e6f18a5ea2..5677901987 100644 --- a/zenoh/src/net/routing/hat/router/token.rs +++ b/zenoh/src/net/routing/hat/router/token.rs @@ -872,79 +872,73 @@ pub(super) fn token_linkstate_change( links: &[ZenohIdProto], send_declare: &mut SendDeclare, ) { - if let Some(src_face) = tables.get_face(zid).cloned() { + if let Some(mut src_face) = tables.get_face(zid).cloned() { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { - for res in face_hat!(src_face).remote_tokens.values() { - let client_tokens = res - .session_ctxs - .values() - .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.token); - if !remote_router_tokens(tables, res) && !client_tokens { - for ctx in get_mut_unchecked(&mut res.clone()) + let to_forget = face_hat!(src_face) + .local_tokens + .keys() + .filter(|res| { + let client_tokens = res .session_ctxs - .values_mut() - { - let dst_face = &mut get_mut_unchecked(ctx).face; - if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if let Some(id) = face_hat!(dst_face).local_tokens.get(res).cloned() { - let forget = !HatTables::failover_brokering_to(links, dst_face.zid) - && { - let ctx_links = hat!(tables) - .linkstatepeers_net - .as_ref() - .map(|net| net.get_links(dst_face.zid)) - .unwrap_or_else(|| &[]); - res.session_ctxs.values().any(|ctx2| { - ctx2.face.whatami == WhatAmI::Peer - && ctx2.token - && HatTables::failover_brokering_to( - ctx_links, - ctx2.face.zid, - ) - }) - }; - if forget { - send_declare( - &dst_face.primitives, - RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareToken(UndeclareToken { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - ), - ); + .values() + .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.token); + !remote_router_tokens(tables, res) + && !client_tokens + && !res.session_ctxs.values().any(|ctx| { + ctx.face.whatami == WhatAmI::Peer + && src_face.id != ctx.face.id + && HatTables::failover_brokering_to(links, ctx.face.zid) + }) + }) + .cloned() + .collect::>>(); + for res in to_forget { + if let Some(id) = face_hat_mut!(&mut src_face).local_tokens.remove(&res) { + let wire_expr = Resource::get_best_key(&res, "", src_face.id); + send_declare( + &src_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); + } + } - face_hat_mut!(dst_face).local_tokens.remove(res); - } - } else if HatTables::failover_brokering_to(links, ctx.face.zid) { - let dst_face = &mut get_mut_unchecked(ctx).face; - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); - send_declare( - &dst_face.primitives, - RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareToken(DeclareToken { - id, - wire_expr: key_expr, - }), - }, - res.expr(), - ), - ); - } + for dst_face in tables.faces.values_mut() { + if src_face.id != dst_face.id + && HatTables::failover_brokering_to(links, dst_face.zid) + { + for res in face_hat!(src_face).remote_tokens.values() { + if !face_hat!(dst_face).local_tokens.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + ), + ); } } } From 931f89f73dfed2dd10609af16e0ed1a380242cef Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Tue, 6 Aug 2024 18:18:42 +0200 Subject: [PATCH 574/598] Remove redundant `WhatAmI` tests in `HatBaseTrait::init` (#1299) --- zenoh/src/net/routing/hat/linkstate_peer/mod.rs | 4 ++-- zenoh/src/net/routing/hat/router/mod.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index f861e1bed3..e5303bbc77 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -190,8 +190,8 @@ impl HatBaseTrait for HatCode { WhatAmIMatcher::empty() }; - let peer_full_linkstate = whatami != WhatAmI::Client - && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; + let peer_full_linkstate = + unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; let router_peers_failover_brokering = unwrap_or_default!(config.routing().router().peers_failover_brokering()); drop(config); diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index cf7d1d14b6..df1729c087 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -320,9 +320,9 @@ impl HatBaseTrait for HatCode { WhatAmIMatcher::empty() }; - let router_full_linkstate = whatami == WhatAmI::Router; - let peer_full_linkstate = whatami != WhatAmI::Client - && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; + let router_full_linkstate = true; + let peer_full_linkstate = + unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; let router_peers_failover_brokering = unwrap_or_default!(config.routing().router().peers_failover_brokering()); drop(config); From 26d05605c28a331dcda50b04386903ab5ce8081d Mon Sep 17 00:00:00 2001 From: Yuyuan Yuan Date: Wed, 7 Aug 2024 18:48:23 +0800 Subject: [PATCH 575/598] refactor: polish the z_bytes (de)serialize impl macro (#1300) --- zenoh/src/api/bytes.rs | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 27754b081a..ed9b57f826 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -1142,8 +1142,8 @@ impl<'a> TryFrom<&'a mut ZBytes> for Cow<'a, str> { } } -// - Integers impl -macro_rules! impl_int { +// - Impl Serialize/Deserialize for numbers +macro_rules! impl_num { ($t:ty) => { impl Serialize<$t> for ZSerde { type Output = ZBytes; @@ -1241,24 +1241,24 @@ macro_rules! impl_int { } // Zenoh unsigned integers -impl_int!(u8); -impl_int!(u16); -impl_int!(u32); -impl_int!(u64); -impl_int!(u128); -impl_int!(usize); +impl_num!(u8); +impl_num!(u16); +impl_num!(u32); +impl_num!(u64); +impl_num!(u128); +impl_num!(usize); // Zenoh signed integers -impl_int!(i8); -impl_int!(i16); -impl_int!(i32); -impl_int!(i64); -impl_int!(i128); -impl_int!(isize); +impl_num!(i8); +impl_num!(i16); +impl_num!(i32); +impl_num!(i64); +impl_num!(i128); +impl_num!(isize); // Zenoh floats -impl_int!(f32); -impl_int!(f64); +impl_num!(f32); +impl_num!(f64); // Zenoh bool impl Serialize for ZSerde { From 0bc608836e20c3ab5ad2e479e8b1aa53ba3addd4 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 7 Aug 2024 16:03:51 +0200 Subject: [PATCH 576/598] Add append method to ZByteWriter (#1301) * Add append() to ZBytesWriter * Improve docs * Improve docs * Fix typos * Fix doc tests --- zenoh/src/api/bytes.rs | 66 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 65 insertions(+), 1 deletion(-) diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index ed9b57f826..7e3083b57f 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -378,7 +378,8 @@ impl ZBytesReader<'_> { self.remaining() == 0 } - /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. + /// Deserialize an object of type `T` from a [`ZBytesReader`] using the [`ZSerde`]. + /// See [`ZBytesWriter::serialize`] for an example. pub fn deserialize(&mut self) -> Result>::Error> where for<'a> ZSerde: Deserialize = &'a ZBytes>, @@ -419,6 +420,32 @@ impl ZBytesWriter<'_> { unsafe { codec.write(&mut self.0, bytes).unwrap_unchecked() }; } + /// Serialize a type `T` on the [`ZBytes`]. For simmetricity, every serialization + /// operation preserves type boundaries by preprending the length of the serialized data. + /// This allows calling [`ZBytesReader::deserialize`] in the same order to retrieve the original type. + /// + /// Example: + /// ``` + /// use zenoh::bytes::ZBytes; + /// + /// // serialization + /// let mut bytes = ZBytes::empty(); + /// let mut writer = bytes.writer(); + /// let i1 = 1234_u32; + /// let i2 = String::from("test"); + /// let i3 = vec![1, 2, 3, 4]; + /// writer.serialize(i1); + /// writer.serialize(&i2); + /// writer.serialize(&i3); + /// // deserialization + /// let mut reader = bytes.reader(); + /// let o1: u32 = reader.deserialize().unwrap(); + /// let o2: String = reader.deserialize().unwrap(); + /// let o3: Vec = reader.deserialize().unwrap(); + /// assert_eq!(i1, o1); + /// assert_eq!(i2, o2); + /// assert_eq!(i3, o3); + /// ``` pub fn serialize(&mut self, t: T) where ZSerde: Serialize, @@ -427,6 +454,8 @@ impl ZBytesWriter<'_> { self.write(&tpld.0); } + /// Try to serialize a type `T` on the [`ZBytes`]. Serialization works + /// in the same way as [`ZBytesWriter::serialize`]. pub fn try_serialize(&mut self, t: T) -> Result<(), E> where ZSerde: Serialize>, @@ -435,6 +464,41 @@ impl ZBytesWriter<'_> { self.write(&tpld.0); Ok(()) } + + /// Append a [`ZBytes`] to this [`ZBytes`] by taking ownership. + /// This allows to compose a [`ZBytes`] out of multiple [`ZBytes`] that may point to different memory regions. + /// Said in other terms, it allows to create a linear view on different memory regions without copy. + /// Please note that `append` does not preserve any boundaries as done in [`ZBytesWriter::serialize`], meaning + /// that [`ZBytesReader::deserialize`] will not be able to deserialize the types in the same seriliazation order. + /// You will need to decide how to deserialize data yourself. + /// + /// Example: + /// ``` + /// use zenoh::bytes::ZBytes; + /// + /// let one = ZBytes::from(vec![0, 1]); + /// let two = ZBytes::from(vec![2, 3, 4, 5]); + /// let three = ZBytes::from(vec![6, 7]); + /// + /// let mut bytes = ZBytes::empty(); + /// let mut writer = bytes.writer(); + /// // Append data without copying by passing ownership + /// writer.append(one); + /// writer.append(two); + /// writer.append(three); + /// + /// // deserialization + /// let mut out: Vec = bytes.into(); + /// assert_eq!(out, vec![0u8, 1, 2, 3, 4, 5, 6, 7]); + /// ``` + pub fn append(&mut self, b: ZBytes) { + use zenoh_buffers::writer::Writer; + for s in b.0.zslices() { + // SAFETY: we are writing a ZSlice on a ZBuf, this is infallible because we are just pushing a ZSlice to + // the list of available ZSlices. + unsafe { self.0.write_zslice(s).unwrap_unchecked() } + } + } } impl std::io::Write for ZBytesWriter<'_> { From 764be602d3b577d359b2255e831a8896f1078d14 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 9 Aug 2024 16:29:31 +0200 Subject: [PATCH 577/598] Replace trees computation tasks with a worker (#1303) * Replace trees computation tasks with a worker * Address review comments * Remove review comments --- .../src/net/routing/hat/linkstate_peer/mod.rs | 80 ++++++------ zenoh/src/net/routing/hat/router/mod.rs | 115 +++++++++--------- 2 files changed, 96 insertions(+), 99 deletions(-) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index e5303bbc77..f9e1674c3e 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -21,7 +21,6 @@ use std::{ any::Any, collections::{HashMap, HashSet}, sync::{atomic::AtomicU32, Arc}, - time::Duration, }; use token::{token_remove_node, undeclare_simple_token}; @@ -116,42 +115,21 @@ macro_rules! face_hat_mut { } use face_hat_mut; -struct HatTables { - linkstatepeer_subs: HashSet>, - linkstatepeer_tokens: HashSet>, - linkstatepeer_qabls: HashSet>, - linkstatepeers_net: Option, - linkstatepeers_trees_task: Option, +struct TreesComputationWorker { + _task: TerminatableTask, + tx: flume::Sender>, } -impl Drop for HatTables { - fn drop(&mut self) { - if let Some(mut task) = self.linkstatepeers_trees_task.take() { - task.terminate(Duration::from_secs(10)); - } - } -} - -impl HatTables { +impl TreesComputationWorker { fn new() -> Self { - Self { - linkstatepeer_subs: HashSet::new(), - linkstatepeer_tokens: HashSet::new(), - linkstatepeer_qabls: HashSet::new(), - linkstatepeers_net: None, - linkstatepeers_trees_task: None, - } - } - - fn schedule_compute_trees(&mut self, tables_ref: Arc) { - if self.linkstatepeers_trees_task.is_none() { - let task = TerminatableTask::spawn( - zenoh_runtime::ZRuntime::Net, - async move { - tokio::time::sleep(std::time::Duration::from_millis( - *TREES_COMPUTATION_DELAY_MS, - )) - .await; + let (tx, rx) = flume::bounded::>(1); + let task = TerminatableTask::spawn_abortable(zenoh_runtime::ZRuntime::Net, async move { + loop { + tokio::time::sleep(std::time::Duration::from_millis( + *TREES_COMPUTATION_DELAY_MS, + )) + .await; + if let Ok(tables_ref) = rx.recv_async().await { let mut tables = zwrite!(tables_ref.tables); tracing::trace!("Compute trees"); @@ -165,15 +143,37 @@ impl HatTables { pubsub::pubsub_tree_change(&mut tables, &new_children); queries::queries_tree_change(&mut tables, &new_children); token::token_tree_change(&mut tables, &new_children); + drop(tables); + } + } + }); + Self { _task: task, tx } + } +} - tracing::trace!("Computations completed"); - hat_mut!(tables).linkstatepeers_trees_task = None; - }, - TerminatableTask::create_cancellation_token(), - ); - self.linkstatepeers_trees_task = Some(task); +struct HatTables { + linkstatepeer_subs: HashSet>, + linkstatepeer_tokens: HashSet>, + linkstatepeer_qabls: HashSet>, + linkstatepeers_net: Option, + linkstatepeers_trees_worker: TreesComputationWorker, +} + +impl HatTables { + fn new() -> Self { + Self { + linkstatepeer_subs: HashSet::new(), + linkstatepeer_tokens: HashSet::new(), + linkstatepeer_qabls: HashSet::new(), + linkstatepeers_net: None, + linkstatepeers_trees_worker: TreesComputationWorker::new(), } } + + fn schedule_compute_trees(&mut self, tables_ref: Arc) { + tracing::trace!("Schedule trees computation"); + let _ = self.linkstatepeers_trees_worker.tx.try_send(tables_ref); + } } pub(crate) struct HatCode {} diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index df1729c087..a2d3c66aa3 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -22,7 +22,6 @@ use std::{ collections::{hash_map::DefaultHasher, HashMap, HashSet}, hash::Hasher, sync::{atomic::AtomicU32, Arc}, - time::Duration, }; use token::{token_linkstate_change, token_remove_node, undeclare_simple_token}; @@ -117,6 +116,49 @@ macro_rules! face_hat_mut { } use face_hat_mut; +struct TreesComputationWorker { + _task: TerminatableTask, + tx: flume::Sender>, +} + +impl TreesComputationWorker { + fn new(net_type: WhatAmI) -> Self { + let (tx, rx) = flume::bounded::>(1); + let task = TerminatableTask::spawn_abortable(zenoh_runtime::ZRuntime::Net, async move { + loop { + tokio::time::sleep(std::time::Duration::from_millis( + *TREES_COMPUTATION_DELAY_MS, + )) + .await; + if let Ok(tables_ref) = rx.recv_async().await { + let mut tables = zwrite!(tables_ref.tables); + + tracing::trace!("Compute trees"); + let new_children = match net_type { + WhatAmI::Router => hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .compute_trees(), + _ => hat_mut!(tables) + .linkstatepeers_net + .as_mut() + .unwrap() + .compute_trees(), + }; + + tracing::trace!("Compute routes"); + pubsub::pubsub_tree_change(&mut tables, &new_children, net_type); + queries::queries_tree_change(&mut tables, &new_children, net_type); + token::token_tree_change(&mut tables, &new_children, net_type); + drop(tables); + } + } + }); + Self { _task: task, tx } + } +} + struct HatTables { router_subs: HashSet>, linkstatepeer_subs: HashSet>, @@ -127,22 +169,11 @@ struct HatTables { routers_net: Option, linkstatepeers_net: Option, shared_nodes: Vec, - routers_trees_task: Option, - linkstatepeers_trees_task: Option, + routers_trees_worker: TreesComputationWorker, + linkstatepeers_trees_worker: TreesComputationWorker, router_peers_failover_brokering: bool, } -impl Drop for HatTables { - fn drop(&mut self) { - if let Some(mut task) = self.linkstatepeers_trees_task.take() { - task.terminate(Duration::from_secs(10)); - } - if let Some(mut task) = self.routers_trees_task.take() { - task.terminate(Duration::from_secs(10)); - } - } -} - impl HatTables { fn new(router_peers_failover_brokering: bool) -> Self { Self { @@ -155,8 +186,8 @@ impl HatTables { routers_net: None, linkstatepeers_net: None, shared_nodes: vec![], - routers_trees_task: None, - linkstatepeers_trees_task: None, + routers_trees_worker: TreesComputationWorker::new(WhatAmI::Router), + linkstatepeers_trees_worker: TreesComputationWorker::new(WhatAmI::Peer), router_peers_failover_brokering, } } @@ -259,49 +290,15 @@ impl HatTables { } fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { - if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) - || (net_type == WhatAmI::Peer && self.linkstatepeers_trees_task.is_none()) - { - let task = TerminatableTask::spawn( - zenoh_runtime::ZRuntime::Net, - async move { - tokio::time::sleep(std::time::Duration::from_millis( - *TREES_COMPUTATION_DELAY_MS, - )) - .await; - let mut tables = zwrite!(tables_ref.tables); - - tracing::trace!("Compute trees"); - let new_children = match net_type { - WhatAmI::Router => hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .compute_trees(), - _ => hat_mut!(tables) - .linkstatepeers_net - .as_mut() - .unwrap() - .compute_trees(), - }; - - tracing::trace!("Compute routes"); - pubsub::pubsub_tree_change(&mut tables, &new_children, net_type); - queries::queries_tree_change(&mut tables, &new_children, net_type); - token::token_tree_change(&mut tables, &new_children, net_type); - - tracing::trace!("Computations completed"); - match net_type { - WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, - _ => hat_mut!(tables).linkstatepeers_trees_task = None, - }; - }, - TerminatableTask::create_cancellation_token(), - ); - match net_type { - WhatAmI::Router => self.routers_trees_task = Some(task), - _ => self.linkstatepeers_trees_task = Some(task), - }; + tracing::trace!("Schedule trees computation"); + match net_type { + WhatAmI::Router => { + let _ = self.routers_trees_worker.tx.try_send(tables_ref); + } + WhatAmI::Peer => { + let _ = self.linkstatepeers_trees_worker.tx.try_send(tables_ref); + } + _ => (), } } } From 5b628ac3ca8d4d2d52569ed93c6c2c49239557af Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 12 Aug 2024 19:23:56 +0200 Subject: [PATCH 578/598] Avoid warning when failing to connect to an already connected peer (#1309) --- zenoh/src/net/runtime/orchestrator.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index 0bd2b8ef33..fca109fc24 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -964,11 +964,18 @@ impl Runtime { } } - tracing::warn!( - "Unable to connect to any locator of scouted peer {}: {:?}", - zid, - locators - ); + if self.manager().get_transport_unicast(zid).await.is_none() { + tracing::warn!( + "Unable to connect to any locator of scouted peer {}: {:?}", + zid, + locators + ); + } else { + tracing::trace!( + "Unable to connect to any locator of scouted peer {}: Already connected!", + zid + ); + } false } From 0e2f78a7cfe4e6f9aa7056cfdd58607e1ddcbd9f Mon Sep 17 00:00:00 2001 From: yellowhatter <104833606+yellowhatter@users.noreply.github.com> Date: Wed, 14 Aug 2024 17:27:18 +0300 Subject: [PATCH 579/598] make SHM enabled by default in Config (#1312) * make SHM enabled by default in Config * [skip ci] add SHM description in the config file * review fixes: assert -> unwrap --- DEFAULT_CONFIG.json5 | 9 +++++++-- commons/zenoh-config/src/defaults.rs | 2 +- examples/examples/z_get_shm.rs | 7 +------ examples/examples/z_ping_shm.rs | 7 +------ examples/examples/z_pong.rs | 7 +------ examples/examples/z_pub_shm.rs | 7 +------ examples/examples/z_pub_shm_thr.rs | 7 +------ examples/examples/z_queryable.rs | 7 +------ examples/examples/z_queryable_shm.rs | 7 +------ examples/examples/z_sub.rs | 7 +------ examples/examples/z_sub_shm.rs | 7 +------ examples/examples/z_sub_thr.rs | 7 +------ zenoh/tests/shm.rs | 8 +++----- 13 files changed, 21 insertions(+), 68 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 893a1930a5..27af64ef93 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -421,9 +421,14 @@ server_name_verification: null, }, }, - /// Shared memory configuration + /// Shared memory configuration. + /// NOTE: shared memory can be used only if zenoh is compiled with "shared-memory" feature, otherwise + /// settings in this section have no effect. shared_memory: { - enabled: false, + /// A probing procedure for shared memory is performed upon session opening. To enable zenoh to operate + /// over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + /// subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + enabled: true, }, auth: { /// The configuration of authentication. diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index 810e0931e2..c6e69dd148 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -247,7 +247,7 @@ impl Default for LinkRxConf { #[allow(clippy::derivable_impls)] impl Default for ShmConf { fn default() -> Self { - Self { enabled: false } + Self { enabled: true } } } diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index b40834afc4..b3c2dedd6e 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -31,12 +31,7 @@ async fn main() { // initiate logging zenoh::try_init_log_from_env(); - let (mut config, selector, mut payload, target, timeout) = parse_args(); - - // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate - // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the - // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. - config.transport.shared_memory.set_enabled(true).unwrap(); + let (config, selector, mut payload, target, timeout) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 4c6f8fed01..de33819818 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -28,12 +28,7 @@ fn main() { // Initiate logging zenoh::try_init_log_from_env(); - let (mut config, warmup, size, n) = parse_args(); - - // A probing procedure for shared memory is performed upon session opening. To enable `z_ping_shm` to operate - // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the - // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. - config.transport.shared_memory.set_enabled(true).unwrap(); + let (config, warmup, size, n) = parse_args(); let session = zenoh::open(config).wait().unwrap(); diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index ef022d234c..86b31d41f3 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -19,12 +19,7 @@ fn main() { // initiate logging zenoh::try_init_log_from_env(); - let (mut config, express) = parse_args(); - - // A probing procedure for shared memory is performed upon session opening. To enable `z_ping_shm` to operate - // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the - // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. - config.transport.shared_memory.set_enabled(true).unwrap(); + let (config, express) = parse_args(); let session = zenoh::open(config).wait().unwrap().into_arc(); diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 5a3ca9590f..6731ae8d0d 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -29,12 +29,7 @@ async fn main() -> Result<(), ZError> { // Initiate logging zenoh::try_init_log_from_env(); - let (mut config, path, payload) = parse_args(); - - // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate - // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the - // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. - config.transport.shared_memory.set_enabled(true).unwrap(); + let (config, path, payload) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 4641c51c95..17bd6de804 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -25,12 +25,7 @@ use zenoh_examples::CommonArgs; async fn main() { // initiate logging zenoh::try_init_log_from_env(); - let (mut config, sm_size, size) = parse_args(); - - // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm_thr` to operate - // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the - // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. - config.transport.shared_memory.set_enabled(true).unwrap(); + let (config, sm_size, size) = parse_args(); let z = zenoh::open(config).await.unwrap(); diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index d6c5c7ea46..4b950a0a33 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -20,12 +20,7 @@ async fn main() { // initiate logging zenoh::try_init_log_from_env(); - let (mut config, key_expr, payload, complete) = parse_args(); - - // A probing procedure for shared memory is performed upon session opening. To enable `z_get_shm` to operate - // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the - // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. - config.transport.shared_memory.set_enabled(true).unwrap(); + let (config, key_expr, payload, complete) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs index b0b443d313..e92efbdc38 100644 --- a/examples/examples/z_queryable_shm.rs +++ b/examples/examples/z_queryable_shm.rs @@ -31,12 +31,7 @@ async fn main() { // initiate logging zenoh::try_init_log_from_env(); - let (mut config, key_expr, payload, complete) = parse_args(); - - // A probing procedure for shared memory is performed upon session opening. To enable `z_get_shm` to operate - // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the - // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. - config.transport.shared_memory.set_enabled(true).unwrap(); + let (config, key_expr, payload, complete) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index 690a211119..7f3a93c5fb 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -20,12 +20,7 @@ async fn main() { // Initiate logging zenoh::try_init_log_from_env(); - let (mut config, key_expr) = parse_args(); - - // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate - // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the - // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. - config.transport.shared_memory.set_enabled(true).unwrap(); + let (config, key_expr) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 6f9bb3f070..f45dab099d 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -22,12 +22,7 @@ async fn main() { // Initiate logging zenoh::try_init_log_from_env(); - let (mut config, key_expr) = parse_args(); - - // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate - // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the - // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. - config.transport.shared_memory.set_enabled(true).unwrap(); + let (config, key_expr) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index 18514b3ba8..fa78b4688a 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -71,12 +71,7 @@ fn main() { // initiate logging zenoh::try_init_log_from_env(); - let (mut config, m, n) = parse_args(); - - // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm_thr` to operate - // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the - // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. - config.transport.shared_memory.set_enabled(true).unwrap(); + let (config, m, n) = parse_args(); let session = zenoh::open(config).wait().unwrap(); diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index e47de65812..0febac2eaa 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -26,7 +26,8 @@ use zenoh::{ pubsub::Reliability, qos::CongestionControl, shm::{ - BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID, + zshm, BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, + POSIX_PROTOCOL_ID, }, Session, }; @@ -52,7 +53,6 @@ async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { ) .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); - config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][01a] Opening peer01 session: {:?}", endpoints); let peer01 = ztimeout!(zenoh::open(config)).unwrap(); @@ -68,7 +68,6 @@ async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { ) .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); - config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][02a] Opening peer02 session: {:?}", endpoints); let peer02 = ztimeout!(zenoh::open(config)).unwrap(); @@ -84,7 +83,6 @@ async fn open_session_multicast(endpoint01: &str, endpoint02: &str) -> (Session, .set(vec![endpoint01.parse().unwrap()]) .unwrap(); config.scouting.multicast.set_enabled(Some(true)).unwrap(); - config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][01a] Opening peer01 session: {}", endpoint01); let peer01 = ztimeout!(zenoh::open(config)).unwrap(); @@ -95,7 +93,6 @@ async fn open_session_multicast(endpoint01: &str, endpoint02: &str) -> (Session, .set(vec![endpoint02.parse().unwrap()]) .unwrap(); config.scouting.multicast.set_enabled(Some(true)).unwrap(); - config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][02a] Opening peer02 session: {}", endpoint02); let peer02 = ztimeout!(zenoh::open(config)).unwrap(); @@ -128,6 +125,7 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re .declare_subscriber(&key_expr) .callback(move |sample| { assert_eq!(sample.payload().len(), size); + let _ = sample.payload().deserialize::<&zshm>().unwrap(); c_msgs.fetch_add(1, Ordering::Relaxed); })) .unwrap(); From 07ddce1c524301cabc0584c10b60bfcc702dfc03 Mon Sep 17 00:00:00 2001 From: zettascale-bot <161707711+zettascale-bot@users.noreply.github.com> Date: Tue, 20 Aug 2024 10:20:54 +0200 Subject: [PATCH 580/598] Bump dev version to `1.0.0-dev` (#1311) * chore: Bump version to `1.0.0-dev` * chore: Bump /zenoh.*/ dependencies to `1.0.0-dev` * chore: Update Cargo lockfile --------- Co-authored-by: eclipse-zenoh-bot --- Cargo.lock | 76 +++++++++---------- Cargo.toml | 64 ++++++++-------- plugins/zenoh-plugin-rest/Cargo.toml | 2 +- .../zenoh-plugin-storage-manager/Cargo.toml | 2 +- zenoh/Cargo.toml | 2 +- 5 files changed, 73 insertions(+), 73 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dc465ce342..1de420de99 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5453,7 +5453,7 @@ dependencies = [ [[package]] name = "zenoh" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "ahash", "async-trait", @@ -5510,7 +5510,7 @@ dependencies = [ [[package]] name = "zenoh-backend-example" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "const_format", @@ -5526,7 +5526,7 @@ dependencies = [ [[package]] name = "zenoh-buffers" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "rand 0.8.5", "zenoh-collections", @@ -5534,7 +5534,7 @@ dependencies = [ [[package]] name = "zenoh-codec" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "criterion", "rand 0.8.5", @@ -5549,14 +5549,14 @@ dependencies = [ [[package]] name = "zenoh-collections" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "rand 0.8.5", ] [[package]] name = "zenoh-config" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "flume", "json5", @@ -5577,7 +5577,7 @@ dependencies = [ [[package]] name = "zenoh-core" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-global-executor", "lazy_static", @@ -5588,7 +5588,7 @@ dependencies = [ [[package]] name = "zenoh-crypto" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "aes 0.8.3", "hmac 0.12.1", @@ -5600,7 +5600,7 @@ dependencies = [ [[package]] name = "zenoh-examples" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "clap", "flume", @@ -5623,7 +5623,7 @@ dependencies = [ [[package]] name = "zenoh-ext" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "bincode", "flume", @@ -5641,7 +5641,7 @@ dependencies = [ [[package]] name = "zenoh-ext-examples" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "bincode", "clap", @@ -5657,7 +5657,7 @@ dependencies = [ [[package]] name = "zenoh-keyexpr" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "ahash", "criterion", @@ -5673,7 +5673,7 @@ dependencies = [ [[package]] name = "zenoh-link" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "rcgen", @@ -5694,7 +5694,7 @@ dependencies = [ [[package]] name = "zenoh-link-commons" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "base64 0.22.1", @@ -5719,7 +5719,7 @@ dependencies = [ [[package]] name = "zenoh-link-quic" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "base64 0.22.1", @@ -5749,7 +5749,7 @@ dependencies = [ [[package]] name = "zenoh-link-serial" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "futures", @@ -5770,7 +5770,7 @@ dependencies = [ [[package]] name = "zenoh-link-tcp" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "socket2 0.5.6", @@ -5788,7 +5788,7 @@ dependencies = [ [[package]] name = "zenoh-link-tls" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "base64 0.22.1", @@ -5818,7 +5818,7 @@ dependencies = [ [[package]] name = "zenoh-link-udp" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "socket2 0.5.6", @@ -5838,7 +5838,7 @@ dependencies = [ [[package]] name = "zenoh-link-unixpipe" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "advisory-lock", "async-trait", @@ -5861,7 +5861,7 @@ dependencies = [ [[package]] name = "zenoh-link-unixsock_stream" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "futures", @@ -5880,7 +5880,7 @@ dependencies = [ [[package]] name = "zenoh-link-vsock" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "libc", @@ -5899,7 +5899,7 @@ dependencies = [ [[package]] name = "zenoh-link-ws" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "futures-util", @@ -5919,7 +5919,7 @@ dependencies = [ [[package]] name = "zenoh-macros" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "proc-macro2", "quote", @@ -5929,7 +5929,7 @@ dependencies = [ [[package]] name = "zenoh-plugin-example" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "const_format", "futures", @@ -5945,7 +5945,7 @@ dependencies = [ [[package]] name = "zenoh-plugin-rest" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "anyhow", "base64 0.22.1", @@ -5970,7 +5970,7 @@ dependencies = [ [[package]] name = "zenoh-plugin-storage-manager" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-global-executor", "async-trait", @@ -5998,7 +5998,7 @@ dependencies = [ [[package]] name = "zenoh-plugin-trait" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "libloading", "serde", @@ -6012,7 +6012,7 @@ dependencies = [ [[package]] name = "zenoh-protocol" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "const_format", "lazy_static", @@ -6027,14 +6027,14 @@ dependencies = [ [[package]] name = "zenoh-result" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "anyhow", ] [[package]] name = "zenoh-runtime" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "futures", "lazy_static", @@ -6048,7 +6048,7 @@ dependencies = [ [[package]] name = "zenoh-shm" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "bincode", @@ -6073,7 +6073,7 @@ dependencies = [ [[package]] name = "zenoh-sync" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "event-listener 5.3.1", "futures", @@ -6087,7 +6087,7 @@ dependencies = [ [[package]] name = "zenoh-task" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "futures", "tokio", @@ -6099,7 +6099,7 @@ dependencies = [ [[package]] name = "zenoh-transport" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "flume", @@ -6134,7 +6134,7 @@ dependencies = [ [[package]] name = "zenoh-util" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "const_format", @@ -6158,7 +6158,7 @@ dependencies = [ [[package]] name = "zenoh_backend_traits" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "const_format", @@ -6174,7 +6174,7 @@ dependencies = [ [[package]] name = "zenohd" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "clap", "futures", diff --git a/Cargo.toml b/Cargo.toml index 034d059862..e2aac0cb40 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -57,7 +57,7 @@ exclude = ["ci/nostd-check", "ci/valgrind-check"] [workspace.package] rust-version = "1.75.0" -version = "0.11.0-dev" # Zenoh version +version = "1.0.0-dev" repository = "https://github.com/eclipse-zenoh/zenoh" homepage = "http://zenoh.io" authors = [ @@ -188,37 +188,37 @@ winapi = { version = "0.3.9", features = ["iphlpapi"] } x509-parser = "0.16.0" z-serial = "0.2.3" either = "1.13.0" -zenoh-ext = { version = "0.11.0-dev", path = "zenoh-ext" } -zenoh-shm = { version = "0.11.0-dev", path = "commons/zenoh-shm" } -zenoh-result = { version = "0.11.0-dev", path = "commons/zenoh-result", default-features = false } -zenoh-config = { version = "0.11.0-dev", path = "commons/zenoh-config" } -zenoh-protocol = { version = "0.11.0-dev", path = "commons/zenoh-protocol", default-features = false } -zenoh-keyexpr = { version = "0.11.0-dev", path = "commons/zenoh-keyexpr", default-features = false } -zenoh-core = { version = "0.11.0-dev", path = "commons/zenoh-core" } -zenoh-buffers = { version = "0.11.0-dev", path = "commons/zenoh-buffers", default-features = false } -zenoh-util = { version = "0.11.0-dev", path = "commons/zenoh-util" } -zenoh-crypto = { version = "0.11.0-dev", path = "commons/zenoh-crypto" } -zenoh-codec = { version = "0.11.0-dev", path = "commons/zenoh-codec" } -zenoh-sync = { version = "0.11.0-dev", path = "commons/zenoh-sync" } -zenoh-collections = { version = "0.11.0-dev", path = "commons/zenoh-collections", default-features = false } -zenoh-macros = { version = "0.11.0-dev", path = "commons/zenoh-macros" } -zenoh-plugin-trait = { version = "0.11.0-dev", path = "plugins/zenoh-plugin-trait", default-features = false } -zenoh_backend_traits = { version = "0.11.0-dev", path = "plugins/zenoh-backend-traits" } -zenoh-transport = { version = "0.11.0-dev", path = "io/zenoh-transport", default-features = false } -zenoh-link-tls = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-tls" } -zenoh-link-tcp = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-tcp" } -zenoh-link-unixsock_stream = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-unixsock_stream" } -zenoh-link-quic = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-quic" } -zenoh-link-udp = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-udp" } -zenoh-link-ws = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-ws" } -zenoh-link-unixpipe = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-unixpipe" } -zenoh-link-serial = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-serial" } -zenoh-link-vsock = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-vsock" } -zenoh-link = { version = "0.11.0-dev", path = "io/zenoh-link" } -zenoh-link-commons = { version = "0.11.0-dev", path = "io/zenoh-link-commons" } -zenoh = { version = "0.11.0-dev", path = "zenoh", default-features = false } -zenoh-runtime = { version = "0.11.0-dev", path = "commons/zenoh-runtime" } -zenoh-task = { version = "0.11.0-dev", path = "commons/zenoh-task" } +zenoh-ext = { version = "1.0.0-dev", path = "zenoh-ext" } +zenoh-shm = { version = "1.0.0-dev", path = "commons/zenoh-shm" } +zenoh-result = { version = "1.0.0-dev", path = "commons/zenoh-result", default-features = false } +zenoh-config = { version = "1.0.0-dev", path = "commons/zenoh-config" } +zenoh-protocol = { version = "1.0.0-dev", path = "commons/zenoh-protocol", default-features = false } +zenoh-keyexpr = { version = "1.0.0-dev", path = "commons/zenoh-keyexpr", default-features = false } +zenoh-core = { version = "1.0.0-dev", path = "commons/zenoh-core" } +zenoh-buffers = { version = "1.0.0-dev", path = "commons/zenoh-buffers", default-features = false } +zenoh-util = { version = "1.0.0-dev", path = "commons/zenoh-util" } +zenoh-crypto = { version = "1.0.0-dev", path = "commons/zenoh-crypto" } +zenoh-codec = { version = "1.0.0-dev", path = "commons/zenoh-codec" } +zenoh-sync = { version = "1.0.0-dev", path = "commons/zenoh-sync" } +zenoh-collections = { version = "1.0.0-dev", path = "commons/zenoh-collections", default-features = false } +zenoh-macros = { version = "1.0.0-dev", path = "commons/zenoh-macros" } +zenoh-plugin-trait = { version = "1.0.0-dev", path = "plugins/zenoh-plugin-trait", default-features = false } +zenoh_backend_traits = { version = "1.0.0-dev", path = "plugins/zenoh-backend-traits" } +zenoh-transport = { version = "1.0.0-dev", path = "io/zenoh-transport", default-features = false } +zenoh-link-tls = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-tls" } +zenoh-link-tcp = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-tcp" } +zenoh-link-unixsock_stream = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-unixsock_stream" } +zenoh-link-quic = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-quic" } +zenoh-link-udp = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-udp" } +zenoh-link-ws = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-ws" } +zenoh-link-unixpipe = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-unixpipe" } +zenoh-link-serial = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-serial" } +zenoh-link-vsock = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-vsock" } +zenoh-link = { version = "1.0.0-dev", path = "io/zenoh-link" } +zenoh-link-commons = { version = "1.0.0-dev", path = "io/zenoh-link-commons" } +zenoh = { version = "1.0.0-dev", path = "zenoh", default-features = false } +zenoh-runtime = { version = "1.0.0-dev", path = "commons/zenoh-runtime" } +zenoh-task = { version = "1.0.0-dev", path = "commons/zenoh-task" } [profile.dev] debug = true diff --git a/plugins/zenoh-plugin-rest/Cargo.toml b/plugins/zenoh-plugin-rest/Cargo.toml index d9a53e9f43..ba35c43c86 100644 --- a/plugins/zenoh-plugin-rest/Cargo.toml +++ b/plugins/zenoh-plugin-rest/Cargo.toml @@ -74,4 +74,4 @@ maintainer = "zenoh-dev@eclipse.org" copyright = "2024 ZettaScale Technology" section = "net" license-file = ["../../LICENSE", "0"] -depends = "zenohd (=0.11.0-dev-1)" +depends = "zenohd (=1.0.0~dev-1)" diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index 08367f75c3..27458af929 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -73,4 +73,4 @@ maintainer = "zenoh-dev@eclipse.org" copyright = "2024 ZettaScale Technology" section = "net" license-file = ["../../LICENSE", "0"] -depends = "zenohd (=0.11.0-dev-1)" +depends = "zenohd (=1.0.0~dev-1)" diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 968acac805..7bdb393a6c 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -138,7 +138,7 @@ maintainer = "zenoh-dev@eclipse.org" copyright = "2024 ZettaScale Technology" section = "net" license-file = ["../LICENSE", "0"] -depends = "zenohd (=0.11.0-dev-1), zenoh-plugin-rest (=0.11.0-dev-1), zenoh-plugin-storage-manager (=0.11.0-dev-1)" +depends = "zenohd (=1.0.0~dev-1), zenoh-plugin-rest (=1.0.0~dev-1), zenoh-plugin-storage-manager (=1.0.0~dev-1)" maintainer-scripts = ".deb" assets = [["../README.md", "README.md", "644"]] From efc31486943d20e99214289741040ed90f72e9ba Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Tue, 20 Aug 2024 11:19:48 +0200 Subject: [PATCH 581/598] Hide `UndeclarableInner::undeclare_inner` (#1294) * Hide `UndeclarableInner::undeclare_inner` * Rename `UndeclarableInner` to `UndeclareSealed` --- zenoh/src/api/key_expr.rs | 4 ++-- zenoh/src/api/liveliness.rs | 6 +++--- zenoh/src/api/publisher.rs | 14 +++++++------- zenoh/src/api/queryable.rs | 10 +++++----- zenoh/src/api/session.rs | 26 +++++++++++++++++++++----- zenoh/src/api/subscriber.rs | 10 +++++----- 6 files changed, 43 insertions(+), 27 deletions(-) diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index dbd44a85c6..fc472e0db3 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -25,7 +25,7 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; -use super::session::{Session, Undeclarable}; +use super::session::{Session, UndeclarableSealed}; use crate::net::primitives::Primitives; #[derive(Clone, Debug)] @@ -549,7 +549,7 @@ impl<'a> KeyExpr<'a> { } } -impl<'a> Undeclarable<&'a Session, KeyExprUndeclaration<'a>> for KeyExpr<'a> { +impl<'a> UndeclarableSealed<&'a Session, KeyExprUndeclaration<'a>> for KeyExpr<'a> { fn undeclare_inner(self, session: &'a Session) -> KeyExprUndeclaration<'a> { KeyExprUndeclaration { session, diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 038a4b8eab..64f87c6de5 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -27,7 +27,7 @@ use super::{ key_expr::KeyExpr, query::Reply, sample::{Locality, Sample}, - session::{Session, SessionRef, Undeclarable}, + session::{Session, SessionRef, UndeclarableSealed}, subscriber::{Subscriber, SubscriberInner}, Id, }; @@ -386,7 +386,7 @@ impl<'a> LivelinessToken<'a> { /// ``` #[inline] pub fn undeclare(self) -> impl Resolve> + 'a { - Undeclarable::undeclare_inner(self, ()) + UndeclarableSealed::undeclare_inner(self, ()) } /// Keep this liveliness token in background, until the session is closed. @@ -401,7 +401,7 @@ impl<'a> LivelinessToken<'a> { } #[zenoh_macros::unstable] -impl<'a> Undeclarable<(), LivelinessTokenUndeclaration<'a>> for LivelinessToken<'a> { +impl<'a> UndeclarableSealed<(), LivelinessTokenUndeclaration<'a>> for LivelinessToken<'a> { fn undeclare_inner(self, _: ()) -> LivelinessTokenUndeclaration<'a> { LivelinessTokenUndeclaration { token: self } } diff --git a/zenoh/src/api/publisher.rs b/zenoh/src/api/publisher.rs index f4b969b18f..c8e0ace03e 100644 --- a/zenoh/src/api/publisher.rs +++ b/zenoh/src/api/publisher.rs @@ -51,7 +51,7 @@ use super::{ encoding::Encoding, key_expr::KeyExpr, sample::{DataInfo, Locality, QoS, Sample, SampleFields, SampleKind}, - session::{SessionRef, Undeclarable}, + session::{SessionRef, UndeclarableSealed}, }; use crate::{ api::{subscriber::SubscriberKind, Id}, @@ -361,7 +361,7 @@ impl<'a> Publisher<'a> { /// # } /// ``` pub fn undeclare(self) -> impl Resolve> + 'a { - Undeclarable::undeclare_inner(self, ()) + UndeclarableSealed::undeclare_inner(self, ()) } #[cfg(feature = "unstable")] @@ -462,7 +462,7 @@ impl PublisherDeclarations for std::sync::Arc> { } } -impl<'a> Undeclarable<(), PublisherUndeclaration<'a>> for Publisher<'a> { +impl<'a> UndeclarableSealed<(), PublisherUndeclaration<'a>> for Publisher<'a> { fn undeclare_inner(self, _: ()) -> PublisherUndeclaration<'a> { PublisherUndeclaration { publisher: self } } @@ -974,12 +974,12 @@ pub(crate) struct MatchingListenerInner<'a> { impl<'a> MatchingListenerInner<'a> { #[inline] pub fn undeclare(self) -> MatchingListenerUndeclaration<'a> { - Undeclarable::undeclare_inner(self, ()) + UndeclarableSealed::undeclare_inner(self, ()) } } #[zenoh_macros::unstable] -impl<'a> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListenerInner<'a> { +impl<'a> UndeclarableSealed<(), MatchingListenerUndeclaration<'a>> for MatchingListenerInner<'a> { fn undeclare_inner(self, _: ()) -> MatchingListenerUndeclaration<'a> { MatchingListenerUndeclaration { subscriber: self } } @@ -1046,9 +1046,9 @@ impl<'a, Receiver> MatchingListener<'a, Receiver> { } #[zenoh_macros::unstable] -impl<'a, T> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListener<'a, T> { +impl<'a, T> UndeclarableSealed<(), MatchingListenerUndeclaration<'a>> for MatchingListener<'a, T> { fn undeclare_inner(self, _: ()) -> MatchingListenerUndeclaration<'a> { - Undeclarable::undeclare_inner(self.listener, ()) + UndeclarableSealed::undeclare_inner(self.listener, ()) } } diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 566a903bd1..61ae0093ea 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -47,7 +47,7 @@ use super::{ publisher::Priority, sample::{Locality, QoSBuilder, Sample, SampleKind}, selector::Selector, - session::{SessionRef, Undeclarable}, + session::{SessionRef, UndeclarableSealed}, value::Value, Id, }; @@ -567,7 +567,7 @@ pub(crate) struct CallbackQueryable<'a> { undeclare_on_drop: bool, } -impl<'a> Undeclarable<(), QueryableUndeclaration<'a>> for CallbackQueryable<'a> { +impl<'a> UndeclarableSealed<(), QueryableUndeclaration<'a>> for CallbackQueryable<'a> { fn undeclare_inner(self, _: ()) -> QueryableUndeclaration<'a> { QueryableUndeclaration { queryable: self } } @@ -848,7 +848,7 @@ impl<'a, Handler> Queryable<'a, Handler> { #[inline] pub fn undeclare(self) -> impl Resolve> + 'a { - Undeclarable::undeclare_inner(self, ()) + UndeclarableSealed::undeclare_inner(self, ()) } /// Make the queryable run in background, until the session is closed. @@ -862,9 +862,9 @@ impl<'a, Handler> Queryable<'a, Handler> { } } -impl<'a, T> Undeclarable<(), QueryableUndeclaration<'a>> for Queryable<'a, T> { +impl<'a, T> UndeclarableSealed<(), QueryableUndeclaration<'a>> for Queryable<'a, T> { fn undeclare_inner(self, _: ()) -> QueryableUndeclaration<'a> { - Undeclarable::undeclare_inner(self.queryable, ()) + UndeclarableSealed::undeclare_inner(self.queryable, ()) } } diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index ed1c75d3f2..7727999158 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -449,24 +449,40 @@ impl fmt::Debug for SessionRef<'_> { } } -/// A trait implemented by types that can be undeclared. -pub trait Undeclarable> +pub(crate) trait UndeclarableSealed> where O: Resolve + Send, { fn undeclare_inner(self, session: S) -> O; } -impl<'a, O, T, G> Undeclarable<&'a Session, O, T> for G +impl<'a, O, T, G> UndeclarableSealed<&'a Session, O, T> for G where O: Resolve + Send, - G: Undeclarable<(), O, T>, + G: UndeclarableSealed<(), O, T>, { fn undeclare_inner(self, _: &'a Session) -> O { self.undeclare_inner(()) } } +// NOTE: `UndeclarableInner` is only pub(crate) to hide the `undeclare_inner` method. So we don't +// care about the `private_bounds` lint in this particular case. +#[allow(private_bounds)] +/// A trait implemented by types that can be undeclared. +pub trait Undeclarable: UndeclarableSealed +where + O: Resolve + Send, +{ +} + +impl Undeclarable for U +where + O: Resolve + Send, + U: UndeclarableSealed, +{ +} + /// A zenoh session. /// pub struct Session { @@ -623,7 +639,7 @@ impl Session { O: Resolve>, T: Undeclarable<&'a Self, O, ZResult<()>>, { - Undeclarable::undeclare_inner(decl, self) + UndeclarableSealed::undeclare_inner(decl, self) } /// Get the current configuration of the zenoh [`Session`](Session). diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index f3c1fa14e7..2d14cc1adb 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -29,7 +29,7 @@ use super::{ handlers::{locked, Callback, DefaultHandler, IntoHandler}, key_expr::KeyExpr, sample::{Locality, Sample}, - session::{SessionRef, Undeclarable}, + session::{SessionRef, UndeclarableSealed}, Id, }; @@ -105,11 +105,11 @@ impl<'a> SubscriberInner<'a> { /// ``` #[inline] pub fn undeclare(self) -> SubscriberUndeclaration<'a> { - Undeclarable::undeclare_inner(self, ()) + UndeclarableSealed::undeclare_inner(self, ()) } } -impl<'a> Undeclarable<(), SubscriberUndeclaration<'a>> for SubscriberInner<'a> { +impl<'a> UndeclarableSealed<(), SubscriberUndeclaration<'a>> for SubscriberInner<'a> { fn undeclare_inner(self, _: ()) -> SubscriberUndeclaration<'a> { SubscriberUndeclaration { subscriber: self } } @@ -521,9 +521,9 @@ impl<'a, Handler> Subscriber<'a, Handler> { } } -impl<'a, T> Undeclarable<(), SubscriberUndeclaration<'a>> for Subscriber<'a, T> { +impl<'a, T> UndeclarableSealed<(), SubscriberUndeclaration<'a>> for Subscriber<'a, T> { fn undeclare_inner(self, _: ()) -> SubscriberUndeclaration<'a> { - Undeclarable::undeclare_inner(self.subscriber, ()) + UndeclarableSealed::undeclare_inner(self.subscriber, ()) } } From ae1d109968f60c2001e692ac928c60f307c62335 Mon Sep 17 00:00:00 2001 From: Yuyuan Yuan Date: Tue, 20 Aug 2024 18:22:30 +0800 Subject: [PATCH 582/598] refactor: remove the unnecessary `Option` in the z_liveliness example for clarity (#1316) --- examples/examples/z_liveliness.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/examples/examples/z_liveliness.rs b/examples/examples/z_liveliness.rs index 7bc8e857fe..bf8890a267 100644 --- a/examples/examples/z_liveliness.rs +++ b/examples/examples/z_liveliness.rs @@ -26,16 +26,14 @@ async fn main() { let session = zenoh::open(config).await.unwrap(); println!("Declaring LivelinessToken on '{}'...", &key_expr); - let mut token = Some(session.liveliness().declare_token(&key_expr).await.unwrap()); + let token = session.liveliness().declare_token(&key_expr).await.unwrap(); println!("Press CTRL-C to undeclare LivelinessToken and quit..."); std::thread::park(); + // LivelinessTokens are automatically closed when dropped // Use the code below to manually undeclare it if needed - if let Some(token) = token.take() { - println!("Undeclaring LivelinessToken..."); - token.undeclare().await.unwrap(); - }; + token.undeclare().await.unwrap(); } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] From 2d86a7311948e57e1a9683fd644988fcb7c4c750 Mon Sep 17 00:00:00 2001 From: Yuyuan Yuan Date: Tue, 20 Aug 2024 23:47:51 +0800 Subject: [PATCH 583/598] refactor: demostrate how to use a background subscriber in the example (#1315) --- examples/examples/z_sub_thr.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index fa78b4688a..78626d1d1d 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -78,7 +78,7 @@ fn main() { let key_expr = "test/thr"; let mut stats = Stats::new(n); - let _sub = session + session .declare_subscriber(key_expr) .callback_mut(move |_sample| { stats.increment(); @@ -87,7 +87,9 @@ fn main() { } }) .wait() - .unwrap(); + .unwrap() + // Make the subscriber run in background, until the session is closed. + .background(); println!("Press CTRL-C to quit..."); std::thread::park(); From 1696e755198e1ffe01851f7ea5d5d0e8ebe5745c Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 21 Aug 2024 09:17:41 +0200 Subject: [PATCH 584/598] make reliability feature unstable (#1317) * make reliability feature unstable * clippy fix --- zenoh/src/api/session.rs | 7 +++++-- zenoh/src/api/subscriber.rs | 18 +++++++++++++----- zenoh/src/lib.rs | 1 + zenoh/tests/session.rs | 7 +++++-- 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 7727999158..451c1340ad 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -39,8 +39,7 @@ use zenoh_protocol::network::{ use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, - AtomicExprId, CongestionControl, EntityId, ExprId, Parameters, Reliability, WireExpr, - EMPTY_EXPR_ID, + AtomicExprId, CongestionControl, EntityId, ExprId, Parameters, WireExpr, EMPTY_EXPR_ID, }, network::{ self, @@ -102,6 +101,8 @@ use crate::net::{ routing::dispatcher::face::Face, runtime::{Runtime, RuntimeBuilder}, }; +#[cfg(feature = "unstable")] +use crate::pubsub::Reliability; zconfigurable! { pub(crate) static ref API_DATA_RECEPTION_CHANNEL_SIZE: usize = 256; @@ -377,6 +378,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { SubscriberBuilder { session: self.clone(), key_expr: TryIntoKeyExpr::try_into(key_expr).map_err(Into::into), + #[cfg(feature = "unstable")] reliability: Reliability::DEFAULT, origin: Locality::default(), handler: DefaultHandler::default(), @@ -2035,6 +2037,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { SubscriberBuilder { session: SessionRef::Shared(self.clone()), key_expr: key_expr.try_into().map_err(Into::into), + #[cfg(feature = "unstable")] reliability: Reliability::DEFAULT, origin: Locality::default(), handler: DefaultHandler::default(), diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 2d14cc1adb..0e82a20331 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -20,7 +20,7 @@ use std::{ }; use zenoh_core::{Resolvable, Wait}; -use zenoh_protocol::{core::Reliability, network::declare::subscriber::ext::SubscriberInfo}; +use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; use zenoh_result::ZResult; #[cfg(feature = "unstable")] use {zenoh_config::wrappers::EntityGlobalId, zenoh_protocol::core::EntityGlobalIdProto}; @@ -32,6 +32,8 @@ use super::{ session::{SessionRef, UndeclarableSealed}, Id, }; +#[cfg(feature = "unstable")] +use crate::pubsub::Reliability; pub(crate) struct SubscriberState { pub(crate) id: Id, @@ -200,8 +202,6 @@ pub struct SubscriberBuilder<'a, 'b, Handler> { #[cfg(feature = "unstable")] pub reliability: Reliability, - #[cfg(not(feature = "unstable"))] - pub(crate) reliability: Reliability, #[cfg(feature = "unstable")] pub origin: Locality, @@ -239,16 +239,16 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { let SubscriberBuilder { session, key_expr, + #[cfg(feature = "unstable")] reliability, - origin, handler: _, } = self; SubscriberBuilder { session, key_expr, + #[cfg(feature = "unstable")] reliability, - origin, handler: callback, } @@ -312,6 +312,7 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { let SubscriberBuilder { session, key_expr, + #[cfg(feature = "unstable")] reliability, origin, handler: _, @@ -319,6 +320,7 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { SubscriberBuilder { session, key_expr, + #[cfg(feature = "unstable")] reliability, origin, handler, @@ -329,6 +331,7 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { impl<'a, 'b, Handler> SubscriberBuilder<'a, 'b, Handler> { /// Change the subscription reliability. #[inline] + #[zenoh_macros::unstable] pub fn reliability(mut self, reliability: Reliability) -> Self { self.reliability = reliability; self @@ -336,6 +339,7 @@ impl<'a, 'b, Handler> SubscriberBuilder<'a, 'b, Handler> { /// Change the subscription reliability to `Reliable`. #[inline] + #[zenoh_macros::unstable] pub fn reliable(mut self) -> Self { self.reliability = Reliability::Reliable; self @@ -343,6 +347,7 @@ impl<'a, 'b, Handler> SubscriberBuilder<'a, 'b, Handler> { /// Change the subscription reliability to `BestEffort`. #[inline] + #[zenoh_macros::unstable] pub fn best_effort(mut self) -> Self { self.reliability = Reliability::BestEffort; self @@ -381,9 +386,12 @@ where &key_expr, self.origin, callback, + #[cfg(feature = "unstable")] &SubscriberInfo { reliability: self.reliability, }, + #[cfg(not(feature = "unstable"))] + &SubscriberInfo::default(), ) .map(|sub_state| Subscriber { subscriber: SubscriberInner { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 02c90ce0ec..0190acc319 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -230,6 +230,7 @@ pub mod bytes { /// Pub/sub primitives pub mod pubsub { + #[zenoh_macros::unstable] pub use zenoh_protocol::core::Reliability; #[zenoh_macros::unstable] diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 916b0c4fb3..7515eefc49 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -21,11 +21,14 @@ use std::{ #[cfg(feature = "internal")] use zenoh::internal::runtime::{Runtime, RuntimeBuilder}; +#[cfg(feature = "unstable")] +use zenoh::pubsub::Reliability; use zenoh::{ - config, key_expr::KeyExpr, prelude::*, pubsub::Reliability, qos::CongestionControl, - sample::SampleKind, Session, + config, key_expr::KeyExpr, prelude::*, qos::CongestionControl, sample::SampleKind, Session, }; use zenoh_core::ztimeout; +#[cfg(not(feature = "unstable"))] +use zenoh_protocol::core::Reliability; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); From 8b027e90799ce693cc5e181a48e6b831f3f18b82 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 22 Aug 2024 10:35:39 +0200 Subject: [PATCH 585/598] Fix liveliness bug (#1321) --- zenoh/src/net/routing/hat/linkstate_peer/token.rs | 14 +++++++------- zenoh/src/net/routing/hat/p2p_peer/token.rs | 15 ++++++++------- zenoh/src/net/routing/hat/router/token.rs | 12 ++++++------ 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/token.rs b/zenoh/src/net/routing/hat/linkstate_peer/token.rs index 0fa65481cc..27f1e1370b 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/token.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/token.rs @@ -294,10 +294,10 @@ fn simple_tokens(res: &Arc) -> Vec> { } #[inline] -fn remote_simple_tokens(res: &Arc, face: &Arc) -> bool { +fn remote_simple_tokens(tables: &Tables, res: &Arc, face: &Arc) -> bool { res.session_ctxs .values() - .any(|ctx| ctx.face.id != face.id && ctx.token) + .any(|ctx| (ctx.face.id != face.id || face.zid == tables.zid) && ctx.token) } #[inline] @@ -375,7 +375,7 @@ fn propagate_forget_simple_token( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_simple_tokens(&m, &face) + && (remote_simple_tokens(tables, &m, &face) || remote_linkstatepeer_tokens(tables, &m)) }) }) { @@ -530,7 +530,7 @@ pub(super) fn undeclare_simple_token( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_simple_tokens(&m, face) + && (remote_simple_tokens(tables, &m, face) || remote_linkstatepeer_tokens(tables, &m)) }) }) { @@ -643,7 +643,7 @@ pub(crate) fn declare_token_interest( if hat!(tables).linkstatepeer_tokens.iter().any(|token| { token.context.is_some() && token.matches(res) - && (remote_simple_tokens(token, face) + && (remote_simple_tokens(tables, token, face) || remote_linkstatepeer_tokens(tables, token)) }) { let id = if mode.future() { @@ -672,7 +672,7 @@ pub(crate) fn declare_token_interest( for token in &hat!(tables).linkstatepeer_tokens { if token.context.is_some() && token.matches(res) - && (remote_simple_tokens(token, face) + && (remote_simple_tokens(tables, token, face) || remote_linkstatepeer_tokens(tables, token)) { let id = if mode.future() { @@ -702,7 +702,7 @@ pub(crate) fn declare_token_interest( } else { for token in &hat!(tables).linkstatepeer_tokens { if token.context.is_some() - && (remote_simple_tokens(token, face) + && (remote_simple_tokens(tables, token, face) || remote_linkstatepeer_tokens(tables, token)) { let id = if mode.future() { diff --git a/zenoh/src/net/routing/hat/p2p_peer/token.rs b/zenoh/src/net/routing/hat/p2p_peer/token.rs index 539599d2a2..fa996433db 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/token.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/token.rs @@ -33,13 +33,13 @@ use crate::net::routing::{ #[inline] fn propagate_simple_token_to( - _tables: &mut Tables, + tables: &mut Tables, dst_face: &mut Arc, res: &Arc, src_face: &mut Arc, send_declare: &mut SendDeclare, ) { - if (src_face.id != dst_face.id || dst_face.whatami == WhatAmI::Client) + if (src_face.id != dst_face.id || dst_face.zid == tables.zid) && !face_hat!(dst_face).local_tokens.contains_key(res) && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { @@ -173,10 +173,10 @@ fn simple_tokens(res: &Arc) -> Vec> { } #[inline] -fn remote_simple_tokens(res: &Arc, face: &Arc) -> bool { +fn remote_simple_tokens(tables: &Tables, res: &Arc, face: &Arc) -> bool { res.session_ctxs .values() - .any(|ctx| ctx.face.id != face.id && ctx.token) + .any(|ctx| (ctx.face.id != face.id || face.zid == tables.zid) && ctx.token) } fn propagate_forget_simple_token( @@ -234,7 +234,7 @@ fn propagate_forget_simple_token( { if !res.context().matches.iter().any(|m| { m.upgrade() - .is_some_and(|m| m.context.is_some() && remote_simple_tokens(&m, &face)) + .is_some_and(|m| m.context.is_some() && remote_simple_tokens(tables, &m, &face)) }) { if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { send_declare( @@ -332,8 +332,9 @@ pub(super) fn undeclare_simple_token( .collect::>>() { if !res.context().matches.iter().any(|m| { - m.upgrade() - .is_some_and(|m| m.context.is_some() && remote_simple_tokens(&m, face)) + m.upgrade().is_some_and(|m| { + m.context.is_some() && remote_simple_tokens(tables, &m, face) + }) }) { if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { send_declare( diff --git a/zenoh/src/net/routing/hat/router/token.rs b/zenoh/src/net/routing/hat/router/token.rs index 5677901987..1be66cb70b 100644 --- a/zenoh/src/net/routing/hat/router/token.rs +++ b/zenoh/src/net/routing/hat/router/token.rs @@ -88,7 +88,7 @@ fn propagate_simple_token_to( full_peer_net: bool, send_declare: &mut SendDeclare, ) { - if (src_face.id != dst_face.id || dst_face.whatami == WhatAmI::Client) + if (src_face.id != dst_face.id || dst_face.zid == tables.zid) && !face_hat!(dst_face).local_tokens.contains_key(res) && if full_peer_net { dst_face.whatami == WhatAmI::Client @@ -335,10 +335,10 @@ fn simple_tokens(res: &Arc) -> Vec> { } #[inline] -fn remote_simple_tokens(res: &Arc, face: &Arc) -> bool { +fn remote_simple_tokens(tables: &Tables, res: &Arc, face: &Arc) -> bool { res.session_ctxs .values() - .any(|ctx| ctx.face.id != face.id && ctx.token) + .any(|ctx| (ctx.face.id != face.id || face.zid == tables.zid) && ctx.token) } #[inline] @@ -446,7 +446,7 @@ fn propagate_forget_simple_token( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_simple_tokens(&m, &face) + && (remote_simple_tokens(tables, &m, &face) || remote_linkstatepeer_tokens(tables, &m) || remote_router_tokens(tables, &m)) }) @@ -729,7 +729,7 @@ pub(super) fn undeclare_simple_token( if !res.context().matches.iter().any(|m| { m.upgrade().is_some_and(|m| { m.context.is_some() - && (remote_simple_tokens(&m, face) + && (remote_simple_tokens(tables, &m, face) || remote_linkstatepeer_tokens(tables, &m) || remote_router_tokens(tables, &m)) }) @@ -966,7 +966,7 @@ pub(crate) fn declare_token_interest( if hat!(tables).router_tokens.iter().any(|token| { token.context.is_some() && token.matches(res) - && (remote_simple_tokens(token, face) + && (remote_simple_tokens(tables, token, face) || remote_linkstatepeer_tokens(tables, token) || remote_router_tokens(tables, token)) }) { From c61a0beca73fac39031fd494e8b003680bed317b Mon Sep 17 00:00:00 2001 From: brianPA <80439594+brian049@users.noreply.github.com> Date: Mon, 5 Aug 2024 22:59:54 +0800 Subject: [PATCH 586/598] fix: typos (#1297) --- DEFAULT_CONFIG.json5 | 2 +- commons/zenoh-config/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 27af64ef93..cb61b35a31 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -388,7 +388,7 @@ /// Configure the zenoh RX parameters of a link rx: { /// Receiving buffer size in bytes for each link - /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// The default the rx_buffer_size value is the same as the default batch size: 65535. /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index b7b63e1602..743bde178a 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -441,7 +441,7 @@ validated_struct::validator! { }, pub rx: LinkRxConf { /// Receiving buffer size in bytes for each link - /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// The default the rx_buffer_size value is the same as the default batch size: 65535. /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. From c817e65561308d0225f1264ab951e45c51f8fe8b Mon Sep 17 00:00:00 2001 From: Tiago Neves <32251249+anhaabaete@users.noreply.github.com> Date: Mon, 12 Aug 2024 11:15:05 -0300 Subject: [PATCH 587/598] zenohd-default config error #1292 (#1298) * Zenohd panic when tring load file When zenohd trying load file, if it have a problem it crash cause another treat was "unwrap", and it return to a type config. So, it crash and cause painic. * zenohd default config error #1292 When tring load config file defined by -c option. With haver any problema "unwrap" has been to Config type. I treat it return a Default Config whe it happen * If file fail when try load configs If file fail when try load configs * Update main.rs * Resolve typos at comment Resolve typos at comment --- zenohd/src/main.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 9ce0a64333..18abb72354 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -120,7 +120,11 @@ fn config_from_args(args: &Args) -> Config { .config .as_ref() .map_or_else(Config::default, |conf_file| { - Config::from_file(conf_file).unwrap() + Config::from_file(conf_file).unwrap_or_else(|e| { + // if file load fail, wanning it, and load default config + tracing::warn!("Warn: File {} not found! {}", conf_file, e.to_string()); + Config::default() + }) }); if config.mode().is_none() { From 11811f973f1e60d43c4d9714d0c3b8f6d6d3bf24 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 9 Aug 2024 16:29:31 +0200 Subject: [PATCH 588/598] Replace trees computation tasks with a worker (#1303) * Replace trees computation tasks with a worker * Address review comments * Remove review comments From 78019083c8aa29afa5bda84888f4d5e58bf625b1 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 27 Aug 2024 16:54:14 +0200 Subject: [PATCH 589/598] revering fix https://github.com/eclipse-zenoh/zenoh/pull/1298 --- zenohd/src/main.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 18abb72354..9ce0a64333 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -120,11 +120,7 @@ fn config_from_args(args: &Args) -> Config { .config .as_ref() .map_or_else(Config::default, |conf_file| { - Config::from_file(conf_file).unwrap_or_else(|e| { - // if file load fail, wanning it, and load default config - tracing::warn!("Warn: File {} not found! {}", conf_file, e.to_string()); - Config::default() - }) + Config::from_file(conf_file).unwrap() }); if config.mode().is_none() { From 6f43f95ffbf5eb593112e41f6d34447688b46662 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 27 Aug 2024 17:22:53 +0200 Subject: [PATCH 590/598] Merge main into dev/1.0.0 (#1326) * Add NOTE for LowLatency transport. (#1088) Signed-off-by: ChenYing Kuo * fix: Improve debug messages in `zenoh-transport` (#1090) * fix: Improve debug messages for failing RX/TX tasks * fix: Improve debug message for `accept_link` timeout * chore: Fix `clippy::redundant_pattern_matching` error * Improve pipeline backoff (#1097) * Yield task for backoff * Improve comments and error handling in backoff * Simplify pipeline pull * Consider backoff configuration * Add typos check to CI (#1065) * Fix typos * Add typos check to CI * Start link tx_task before notifying router (#1098) * Fix typos (#1110) * bump quinn & rustls (#1086) * bump quinn & rustls * fix ci windows check * add comments * Fix interface name scanning when listening on IP unspecified for TCP/TLS/QUIC/WS (#1123) Co-authored-by: Julien Enoch * Enable releasing from any branch (#1136) * Fix cargo clippy (#1145) * Release tables locks before propagating subscribers and queryables declarations to void dead locks (#1150) * Send simple sub and qabl declarations using a given function * Send simple sub and qabl declarations after releasing tables lock * Send simple sub and qabl declarations after releasing tables lock (missing places) * feat: make `TerminatableTask` terminate itself when dropped (#1151) * Fix bug in keyexpr::includes leading to call get_unchecked on empty array UB (#1208) * REST plugin uses unbounded flume channels for queries (#1213) * fix: typo in selector.rs (#1228) * fix: zenohd --cfg (#1263) * fix: zenohd --cfg * ci: trigger * Update zenohd/src/main.rs --------- Co-authored-by: Luca Cominardi * Fix failover brokering bug reacting to linkstate changes (#1272) * Change missleading log * Fix failover brokering bug reacting to linkstate changes * Retrigger CI --------- Co-authored-by: Luca Cominardi * Code format * Fix clippy warnings * Code format * Fix Clippy errors from Rust 1.80 (#1273) * Allow unexpected `doc_auto_cfg` flag * Keep never-constructed logger interceptor * Ignore interior mutability of `Resource` * Fix typo * Resolve `clippy::doc-lazy-continuation` errors * Upgrade `time@0.3.28` to `time@0.3.36` See https://github.com/time-rs/time/issues/693 * Update Cargo.toml (#1277) Updated description to be aligned with what we use everywhere else * fix: typos (#1297) * Replace trees computation tasks with a worker (#1303) * Replace trees computation tasks with a worker * Address review comments * Remove review comments * zenohd-default config error #1292 (#1298) * Zenohd panic when tring load file When zenohd trying load file, if it have a problem it crash cause another treat was "unwrap", and it return to a type config. So, it crash and cause painic. * zenohd default config error #1292 When tring load config file defined by -c option. With haver any problema "unwrap" has been to Config type. I treat it return a Default Config whe it happen * If file fail when try load configs If file fail when try load configs * Update main.rs * Resolve typos at comment Resolve typos at comment * fix: typos (#1297) * zenohd-default config error #1292 (#1298) * Zenohd panic when tring load file When zenohd trying load file, if it have a problem it crash cause another treat was "unwrap", and it return to a type config. So, it crash and cause painic. * zenohd default config error #1292 When tring load config file defined by -c option. With haver any problema "unwrap" has been to Config type. I treat it return a Default Config whe it happen * If file fail when try load configs If file fail when try load configs * Update main.rs * Resolve typos at comment Resolve typos at comment * Replace trees computation tasks with a worker (#1303) * Replace trees computation tasks with a worker * Address review comments * Remove review comments * revering fix https://github.com/eclipse-zenoh/zenoh/pull/1298 --------- Signed-off-by: ChenYing Kuo Co-authored-by: ChenYing Kuo (CY) Co-authored-by: Mahmoud Mazouz Co-authored-by: Luca Cominardi Co-authored-by: Tavo Annus Co-authored-by: JLer Co-authored-by: Julien Enoch Co-authored-by: OlivierHecart Co-authored-by: Yuyuan Yuan Co-authored-by: Diogo Matsubara Co-authored-by: OlivierHecart Co-authored-by: kydos Co-authored-by: brianPA <80439594+brian049@users.noreply.github.com> Co-authored-by: Tiago Neves <32251249+anhaabaete@users.noreply.github.com> --- DEFAULT_CONFIG.json5 | 2 +- commons/zenoh-config/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 27af64ef93..cb61b35a31 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -388,7 +388,7 @@ /// Configure the zenoh RX parameters of a link rx: { /// Receiving buffer size in bytes for each link - /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// The default the rx_buffer_size value is the same as the default batch size: 65535. /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index b7b63e1602..743bde178a 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -441,7 +441,7 @@ validated_struct::validator! { }, pub rx: LinkRxConf { /// Receiving buffer size in bytes for each link - /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// The default the rx_buffer_size value is the same as the default batch size: 65535. /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. From a98971a523ce281c9eb0758e8a240421c12476c6 Mon Sep 17 00:00:00 2001 From: hakanlundvall Date: Tue, 27 Aug 2024 17:43:30 +0200 Subject: [PATCH 591/598] Upgrade stabby dependency (#1328) --- Cargo.lock | 14 ++++++-------- Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1de420de99..c198dffcb3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4022,22 +4022,20 @@ dependencies = [ [[package]] name = "stabby" -version = "5.0.1" +version = "36.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7708f5b0e8bddba162d20fa10c8d17c31a2ec6bba369f7904bb18a8bde49ba2" +checksum = "311d6bcf0070c462ff626122ec2246f42bd2acd44b28908eedbfd07d500c7d99" dependencies = [ - "lazy_static", "rustversion", "stabby-abi", ] [[package]] name = "stabby-abi" -version = "5.0.1" +version = "36.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a6e7a8b2ff2c116bfab6afcce0adec14509eb38fd3f231bb97826d01de4021e" +checksum = "e6daae1a0707399f56d27fce7f212e50e31d215112a447e1bbcd837ae1bf5f49" dependencies = [ - "libc", "rustversion", "sha2-const-stable", "stabby-macros", @@ -4045,9 +4043,9 @@ dependencies = [ [[package]] name = "stabby-macros" -version = "5.0.1" +version = "36.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db97bd3101fab9929a08fa0138d30d46c7a80b9d32bc8a3a00706ba00358a275" +checksum = "43cf89a0cc9131279235baf8599b0e073fbcb096419204de0cc5d1a48ae73f74" dependencies = [ "proc-macro-crate", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index e2aac0cb40..035b329efa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -157,7 +157,7 @@ serde_json = "1.0.114" serde-pickle = "1.1.1" serde_yaml = "0.9.19" static_init = "1.0.3" -stabby = "5.0.1" +stabby = "36.1.1" sha3 = "0.10.6" shared_memory = "0.12.4" shellexpand = "3.0.0" From 469e9d11266fea38279205ec1cc91529fa9977a1 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 27 Aug 2024 18:15:42 +0200 Subject: [PATCH 592/598] Revert "Merge main into dev/1.0.0 (#1326)" This reverts commit 6f43f95ffbf5eb593112e41f6d34447688b46662. --- DEFAULT_CONFIG.json5 | 2 +- commons/zenoh-config/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index cb61b35a31..27af64ef93 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -388,7 +388,7 @@ /// Configure the zenoh RX parameters of a link rx: { /// Receiving buffer size in bytes for each link - /// The default the rx_buffer_size value is the same as the default batch size: 65535. + /// The default the rx_buffer_size value is the same as the default batch size: 65335. /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 743bde178a..b7b63e1602 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -441,7 +441,7 @@ validated_struct::validator! { }, pub rx: LinkRxConf { /// Receiving buffer size in bytes for each link - /// The default the rx_buffer_size value is the same as the default batch size: 65535. + /// The default the rx_buffer_size value is the same as the default batch size: 65335. /// For very high throughput scenarios, the rx_buffer_size can be increased to accommodate /// more in-flight data. This is particularly relevant when dealing with large messages. /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. From 1d4c5c8f9a14a623df191330326656e17d54a47e Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 28 Aug 2024 08:48:29 +0200 Subject: [PATCH 593/598] Fix liveliness bug (#1327) * Fix liveliness bug * Don't duplicate token undeclarations in peers for clients * Don't duplicate token declarations in linksstate peers for clients --- zenoh/src/net/routing/hat/linkstate_peer/token.rs | 9 ++++++--- zenoh/src/net/routing/hat/p2p_peer/token.rs | 11 +++++++---- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/token.rs b/zenoh/src/net/routing/hat/linkstate_peer/token.rs index 27f1e1370b..c87866577b 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/token.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/token.rs @@ -81,13 +81,16 @@ fn send_sourced_token_to_net_clildren( #[inline] fn propagate_simple_token_to( - _tables: &mut Tables, + tables: &mut Tables, dst_face: &mut Arc, res: &Arc, - _src_face: &mut Arc, + src_face: &mut Arc, send_declare: &mut SendDeclare, ) { - if !face_hat!(dst_face).local_tokens.contains_key(res) && dst_face.whatami == WhatAmI::Client { + if (src_face.id != dst_face.id || dst_face.zid == tables.zid) + && !face_hat!(dst_face).local_tokens.contains_key(res) + && dst_face.whatami == WhatAmI::Client + { if dst_face.whatami != WhatAmI::Client { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); diff --git a/zenoh/src/net/routing/hat/p2p_peer/token.rs b/zenoh/src/net/routing/hat/p2p_peer/token.rs index fa996433db..9ef7034fd0 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/token.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/token.rs @@ -182,6 +182,7 @@ fn remote_simple_tokens(tables: &Tables, res: &Arc, face: &Arc, + src_face: &Arc, send_declare: &mut SendDeclare, ) { for mut face in tables.faces.values().cloned() { @@ -202,9 +203,11 @@ fn propagate_forget_simple_token( res.expr(), ), ); - } else if face_hat!(face).remote_interests.values().any(|(r, o)| { - o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true) && !o.aggregate() - }) { + } else if src_face.id != face.id + && face_hat!(face).remote_interests.values().any(|(r, o)| { + o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true) && !o.aggregate() + }) + { // Token has never been declared on this face. // Send an Undeclare with a one shot generated id and a WireExpr ext. send_declare( @@ -301,7 +304,7 @@ pub(super) fn undeclare_simple_token( let mut simple_tokens = simple_tokens(res); if simple_tokens.is_empty() { - propagate_forget_simple_token(tables, res, send_declare); + propagate_forget_simple_token(tables, res, face, send_declare); } if simple_tokens.len() == 1 { From 6dea3bf95a909d73411bdcb6da4211c9c32cb201 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 28 Aug 2024 11:40:39 +0200 Subject: [PATCH 594/598] Fix wire_expr mapping (#1330) * Fix wire_expr mapping * Fix find and replace error --- zenoh/src/net/routing/dispatcher/resource.rs | 18 +++++++---- zenoh/src/net/routing/hat/client/interests.rs | 6 ++-- zenoh/src/net/routing/hat/client/pubsub.rs | 2 +- zenoh/src/net/routing/hat/client/queries.rs | 2 +- zenoh/src/net/routing/hat/client/token.rs | 10 +++--- .../net/routing/hat/linkstate_peer/pubsub.rs | 18 +++++++---- .../net/routing/hat/linkstate_peer/queries.rs | 16 ++++++---- .../net/routing/hat/linkstate_peer/token.rs | 19 +++++++---- .../src/net/routing/hat/p2p_peer/interests.rs | 8 +++-- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 13 +++++--- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 10 +++--- zenoh/src/net/routing/hat/p2p_peer/token.rs | 13 +++++--- zenoh/src/net/routing/hat/router/interests.rs | 10 ++++-- zenoh/src/net/routing/hat/router/pubsub.rs | 30 +++++++++++------ zenoh/src/net/routing/hat/router/queries.rs | 24 +++++++++----- zenoh/src/net/routing/hat/router/token.rs | 32 ++++++++++++------- 16 files changed, 149 insertions(+), 82 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index f864c39049..01ff9b2817 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -461,7 +461,11 @@ impl Resource { } #[inline] - pub fn decl_key(res: &Arc, face: &mut Arc) -> WireExpr<'static> { + pub fn decl_key( + res: &Arc, + face: &mut Arc, + push: bool, + ) -> WireExpr<'static> { let (nonwild_prefix, wildsuffix) = Resource::nonwild_prefix(res); match nonwild_prefix { Some(mut nonwild_prefix) => { @@ -484,11 +488,13 @@ impl Resource { }; } } - if face.remote_key_interests.values().any(|res| { - res.as_ref() - .map(|res| res.matches(&nonwild_prefix)) - .unwrap_or(true) - }) { + if push + || face.remote_key_interests.values().any(|res| { + res.as_ref() + .map(|res| res.matches(&nonwild_prefix)) + .unwrap_or(true) + }) + { let ctx = get_mut_unchecked(&mut nonwild_prefix) .session_ctxs .entry(face.id) diff --git a/zenoh/src/net/routing/hat/client/interests.rs b/zenoh/src/net/routing/hat/client/interests.rs index b890e800f2..9347b3f0e5 100644 --- a/zenoh/src/net/routing/hat/client/interests.rs +++ b/zenoh/src/net/routing/hat/client/interests.rs @@ -53,7 +53,7 @@ pub(super) fn interests_new_face(tables: &mut Tables, face: &mut Arc) finalized: false, }, ); - let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, face)); + let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, face, true)); face.primitives.send_interest(RoutingContext::with_expr( Interest { id, @@ -125,7 +125,9 @@ impl HatInterestTrait for HatCode { .insert(id, (interest.clone(), cancellation_token)); CurrentInterestCleanup::spawn_interest_clean_up_task(dst_face, tables_ref, id); } - let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, dst_face)); + let wire_expr = res + .as_ref() + .map(|res| Resource::decl_key(res, dst_face, true)); dst_face.primitives.send_interest(RoutingContext::with_expr( Interest { id, diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 4edc9c98e6..5a19f3549c 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -56,7 +56,7 @@ fn propagate_simple_subscription_to( { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); + let key_expr = Resource::decl_key(res, dst_face, true); send_declare( &dst_face.primitives, RoutingContext::with_expr( diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 7658a509da..e711ccf2e8 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -104,7 +104,7 @@ fn propagate_simple_queryable( face_hat_mut!(&mut dst_face) .local_qabls .insert(res.clone(), (id, info)); - let key_expr = Resource::decl_key(res, &mut dst_face); + let key_expr = Resource::decl_key(res, &mut dst_face, true); send_declare( &dst_face.primitives, RoutingContext::with_expr( diff --git a/zenoh/src/net/routing/hat/client/token.rs b/zenoh/src/net/routing/hat/client/token.rs index 11fab10466..9e5923425c 100644 --- a/zenoh/src/net/routing/hat/client/token.rs +++ b/zenoh/src/net/routing/hat/client/token.rs @@ -45,7 +45,7 @@ fn propagate_simple_token_to( { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); + let key_expr = Resource::decl_key(res, dst_face, true); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -120,7 +120,7 @@ fn declare_simple_token( propagate_simple_token(tables, res, face, send_declare); - let wire_expr = Resource::decl_key(res, face); + let wire_expr = Resource::decl_key(res, face, true); if let Some(interest_id) = interest_id { if let Some((interest, _)) = face.pending_current_interests.get(&interest_id) { send_declare( @@ -312,7 +312,7 @@ pub(crate) fn declare_token_interest( } else { 0 }; - let wire_expr = Resource::decl_key(res, face); + let wire_expr = Resource::decl_key(res, face, true); send_declare( &face.primitives, RoutingContext::with_expr( @@ -343,7 +343,7 @@ pub(crate) fn declare_token_interest( } else { 0 }; - let wire_expr = Resource::decl_key(token, face); + let wire_expr = Resource::decl_key(token, face, true); send_declare( &face.primitives, RoutingContext::with_expr( @@ -379,7 +379,7 @@ pub(crate) fn declare_token_interest( } else { 0 }; - let wire_expr = Resource::decl_key(token, face); + let wire_expr = Resource::decl_key(token, face, true); send_declare( &face.primitives, RoutingContext::with_expr( diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 8b9d97872b..f1412ec807 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -66,7 +66,8 @@ fn send_sourced_subscription_to_net_children( .map(|src_face| someface.id != src_face.id) .unwrap_or(true) { - let key_expr = Resource::decl_key(res, &mut someface); + let push_declaration = someface.whatami != WhatAmI::Client; + let key_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -108,7 +109,7 @@ fn propagate_simple_subscription_to( if dst_face.whatami != WhatAmI::Client { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); + let key_expr = Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -145,7 +146,8 @@ fn propagate_simple_subscription_to( if !face_hat!(dst_face).local_subs.contains_key(res) { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); + let key_expr = + Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -352,7 +354,8 @@ fn send_forget_sourced_subscription_to_net_children( .map(|src_face| someface.id != src_face.id) .unwrap_or(true) { - let wire_expr = Resource::decl_key(res, &mut someface); + let push_declaration = someface.whatami != WhatAmI::Client; + let wire_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -702,7 +705,7 @@ pub(super) fn declare_sub_interest( } else { 0 }; - let wire_expr = Resource::decl_key(res, face); + let wire_expr = Resource::decl_key(res, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( @@ -734,7 +737,8 @@ pub(super) fn declare_sub_interest( } else { 0 }; - let wire_expr = Resource::decl_key(sub, face); + let wire_expr = + Resource::decl_key(sub, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( @@ -767,7 +771,7 @@ pub(super) fn declare_sub_interest( } else { 0 }; - let wire_expr = Resource::decl_key(sub, face); + let wire_expr = Resource::decl_key(sub, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index de70cddf9b..6941466571 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -138,7 +138,8 @@ fn send_sourced_queryable_to_net_children( .map(|src_face| someface.id != src_face.id) .unwrap_or(true) { - let key_expr = Resource::decl_key(res, &mut someface); + let push_declaration = someface.whatami != WhatAmI::Client; + let key_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -191,7 +192,8 @@ fn propagate_simple_queryable( face_hat_mut!(&mut dst_face) .local_qabls .insert(res.clone(), (id, info)); - let key_expr = Resource::decl_key(res, &mut dst_face); + let push_declaration = dst_face.whatami != WhatAmI::Client; + let key_expr = Resource::decl_key(res, &mut dst_face, push_declaration); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -369,7 +371,8 @@ fn send_forget_sourced_queryable_to_net_children( .map(|src_face| someface.id != src_face.id) .unwrap_or(true) { - let wire_expr = Resource::decl_key(res, &mut someface); + let push_declaration = someface.whatami != WhatAmI::Client; + let wire_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -767,7 +770,7 @@ pub(super) fn declare_qabl_interest( } else { 0 }; - let wire_expr = Resource::decl_key(res, face); + let wire_expr = Resource::decl_key(res, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( @@ -803,7 +806,8 @@ pub(super) fn declare_qabl_interest( } else { 0 }; - let key_expr = Resource::decl_key(qabl, face); + let key_expr = + Resource::decl_key(qabl, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( @@ -839,7 +843,7 @@ pub(super) fn declare_qabl_interest( } else { 0 }; - let key_expr = Resource::decl_key(qabl, face); + let key_expr = Resource::decl_key(qabl, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( diff --git a/zenoh/src/net/routing/hat/linkstate_peer/token.rs b/zenoh/src/net/routing/hat/linkstate_peer/token.rs index c87866577b..6e3ea08492 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/token.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/token.rs @@ -54,7 +54,8 @@ fn send_sourced_token_to_net_clildren( .map(|src_face| someface.id != src_face.id) .unwrap_or(true) { - let key_expr = Resource::decl_key(res, &mut someface); + let push_declaration = someface.whatami != WhatAmI::Client; + let key_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -94,7 +95,7 @@ fn propagate_simple_token_to( if dst_face.whatami != WhatAmI::Client { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); + let key_expr = Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -128,7 +129,8 @@ fn propagate_simple_token_to( if !face_hat!(dst_face).local_tokens.contains_key(res) { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); + let key_expr = + Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -320,7 +322,8 @@ fn send_forget_sourced_token_to_net_clildren( .map(|src_face| someface.id != src_face.id) .unwrap_or(true) { - let wire_expr = Resource::decl_key(res, &mut someface); + let push_declaration = someface.whatami != WhatAmI::Client; + let wire_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -656,7 +659,7 @@ pub(crate) fn declare_token_interest( } else { 0 }; - let wire_expr = Resource::decl_key(res, face); + let wire_expr = Resource::decl_key(res, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( @@ -685,7 +688,8 @@ pub(crate) fn declare_token_interest( } else { 0 }; - let wire_expr = Resource::decl_key(token, face); + let wire_expr = + Resource::decl_key(token, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( @@ -715,7 +719,8 @@ pub(crate) fn declare_token_interest( } else { 0 }; - let wire_expr = Resource::decl_key(token, face); + let wire_expr = + Resource::decl_key(token, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( diff --git a/zenoh/src/net/routing/hat/p2p_peer/interests.rs b/zenoh/src/net/routing/hat/p2p_peer/interests.rs index 068cdd0eeb..2ed9e22840 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/interests.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/interests.rs @@ -57,7 +57,9 @@ pub(super) fn interests_new_face(tables: &mut Tables, face: &mut Arc) finalized: false, }, ); - let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, face)); + let wire_expr = res + .as_ref() + .map(|res| Resource::decl_key(res, face, face.whatami != WhatAmI::Client)); face.primitives.send_interest(RoutingContext::with_expr( Interest { id, @@ -152,7 +154,9 @@ impl HatInterestTrait for HatCode { .insert(id, (interest.clone(), cancellation_token)); CurrentInterestCleanup::spawn_interest_clean_up_task(dst_face, tables_ref, id); } - let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, dst_face)); + let wire_expr = res + .as_ref() + .map(|res| Resource::decl_key(res, dst_face, dst_face.whatami == WhatAmI::Client)); dst_face.primitives.send_interest(RoutingContext::with_expr( Interest { id, diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 12a1e67186..0dccf9ba3c 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -60,7 +60,7 @@ fn propagate_simple_subscription_to( if dst_face.whatami != WhatAmI::Client { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); + let key_expr = Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -97,7 +97,8 @@ fn propagate_simple_subscription_to( if !face_hat!(dst_face).local_subs.contains_key(res) { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); + let key_expr = + Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -432,7 +433,7 @@ pub(super) fn declare_sub_interest( } else { 0 }; - let wire_expr = Resource::decl_key(res, face); + let wire_expr = Resource::decl_key(res, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( @@ -468,7 +469,8 @@ pub(super) fn declare_sub_interest( } else { 0 }; - let wire_expr = Resource::decl_key(sub, face); + let wire_expr = + Resource::decl_key(sub, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( @@ -509,7 +511,8 @@ pub(super) fn declare_sub_interest( } else { 0 }; - let wire_expr = Resource::decl_key(sub, face); + let wire_expr = + Resource::decl_key(sub, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 87b6372dae..2fd6d6fa81 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -112,7 +112,7 @@ fn propagate_simple_queryable_to( face_hat_mut!(dst_face) .local_qabls .insert(res.clone(), (id, info)); - let key_expr = Resource::decl_key(res, dst_face); + let key_expr = Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -411,7 +411,7 @@ pub(super) fn declare_qabl_interest( } else { 0 }; - let wire_expr = Resource::decl_key(res, face); + let wire_expr = Resource::decl_key(res, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( @@ -450,7 +450,8 @@ pub(super) fn declare_qabl_interest( } else { 0 }; - let key_expr = Resource::decl_key(qabl, face); + let key_expr = + Resource::decl_key(qabl, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( @@ -493,7 +494,8 @@ pub(super) fn declare_qabl_interest( } else { 0 }; - let key_expr = Resource::decl_key(qabl, face); + let key_expr = + Resource::decl_key(qabl, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( diff --git a/zenoh/src/net/routing/hat/p2p_peer/token.rs b/zenoh/src/net/routing/hat/p2p_peer/token.rs index 9ef7034fd0..866737f0df 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/token.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/token.rs @@ -46,7 +46,7 @@ fn propagate_simple_token_to( if dst_face.whatami != WhatAmI::Client { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); + let key_expr = Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -80,7 +80,8 @@ fn propagate_simple_token_to( if !face_hat!(dst_face).local_tokens.contains_key(res) { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); + let key_expr = + Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -427,7 +428,7 @@ pub(crate) fn declare_token_interest( } else { 0 }; - let wire_expr = Resource::decl_key(res, face); + let wire_expr = Resource::decl_key(res, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( @@ -458,7 +459,8 @@ pub(crate) fn declare_token_interest( } else { 0 }; - let wire_expr = Resource::decl_key(token, face); + let wire_expr = + Resource::decl_key(token, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( @@ -494,7 +496,8 @@ pub(crate) fn declare_token_interest( } else { 0 }; - let wire_expr = Resource::decl_key(token, face); + let wire_expr = + Resource::decl_key(token, face, face.whatami != WhatAmI::Client); send_declare( &face.primitives, RoutingContext::with_expr( diff --git a/zenoh/src/net/routing/hat/router/interests.rs b/zenoh/src/net/routing/hat/router/interests.rs index fcd0269fcc..33bb3ddf6b 100644 --- a/zenoh/src/net/routing/hat/router/interests.rs +++ b/zenoh/src/net/routing/hat/router/interests.rs @@ -24,8 +24,8 @@ use zenoh_protocol::{ use zenoh_sync::get_mut_unchecked; use super::{ - face_hat_mut, pubsub::declare_sub_interest, queries::declare_qabl_interest, - token::declare_token_interest, HatCode, HatFace, + face_hat_mut, hat, pubsub::declare_sub_interest, queries::declare_qabl_interest, + token::declare_token_interest, HatCode, HatFace, HatTables, }; use crate::net::routing::{ dispatcher::{ @@ -112,3 +112,9 @@ impl HatInterestTrait for HatCode { face_hat_mut!(face).remote_interests.remove(&id); } } + +#[inline] +pub(super) fn push_declaration_profile(tables: &Tables, face: &FaceState) -> bool { + face.whatami == WhatAmI::Client + || (face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer)) +} diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 506c85888c..cc0251a07a 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -32,7 +32,8 @@ use zenoh_sync::get_mut_unchecked; use super::{ face_hat, face_hat_mut, get_peer, get_router, get_routes_entries, hat, hat_mut, - network::Network, res_hat, res_hat_mut, HatCode, HatContext, HatFace, HatTables, + interests::push_declaration_profile, network::Network, res_hat, res_hat_mut, HatCode, + HatContext, HatFace, HatTables, }; #[cfg(feature = "unstable")] use crate::key_expr::KeyExpr; @@ -66,7 +67,8 @@ fn send_sourced_subscription_to_net_children( .map(|src_face| someface.id != src_face.id) .unwrap_or(true) { - let key_expr = Resource::decl_key(res, &mut someface); + let push_declaration = push_declaration_profile(tables, &someface); + let key_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -129,7 +131,8 @@ fn propagate_simple_subscription_to( if !face_hat!(dst_face).local_subs.contains_key(res) { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); + let key_expr = + Resource::decl_key(res, dst_face, push_declaration_profile(tables, dst_face)); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -384,7 +387,8 @@ fn send_forget_sourced_subscription_to_net_children( .map(|src_face| someface.id != src_face.id) .unwrap_or(true) { - let wire_expr = Resource::decl_key(res, &mut someface); + let push_declaration = push_declaration_profile(tables, &someface); + let wire_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -883,15 +887,18 @@ pub(super) fn pubsub_linkstate_change( } } - for dst_face in tables.faces.values_mut() { + for mut dst_face in tables.faces.values().cloned() { if src_face.id != dst_face.id && HatTables::failover_brokering_to(links, dst_face.zid) { for res in face_hat!(src_face).remote_subs.values() { if !face_hat!(dst_face).local_subs.contains_key(res) { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); + face_hat_mut!(&mut dst_face) + .local_subs + .insert(res.clone(), id); + let push_declaration = push_declaration_profile(tables, &dst_face); + let key_expr = Resource::decl_key(res, &mut dst_face, push_declaration); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; @@ -950,7 +957,8 @@ pub(crate) fn declare_sub_interest( } else { 0 }; - let wire_expr = Resource::decl_key(res, face); + let wire_expr = + Resource::decl_key(res, face, push_declaration_profile(tables, face)); send_declare( &face.primitives, RoutingContext::with_expr( @@ -995,7 +1003,8 @@ pub(crate) fn declare_sub_interest( } else { 0 }; - let wire_expr = Resource::decl_key(sub, face); + let wire_expr = + Resource::decl_key(sub, face, push_declaration_profile(tables, face)); send_declare( &face.primitives, RoutingContext::with_expr( @@ -1038,7 +1047,8 @@ pub(crate) fn declare_sub_interest( } else { 0 }; - let wire_expr = Resource::decl_key(sub, face); + let wire_expr = + Resource::decl_key(sub, face, push_declaration_profile(tables, face)); send_declare( &face.primitives, RoutingContext::with_expr( diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index d706435179..f45a260288 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -39,7 +39,8 @@ use zenoh_sync::get_mut_unchecked; use super::{ face_hat, face_hat_mut, get_peer, get_router, get_routes_entries, hat, hat_mut, - network::Network, res_hat, res_hat_mut, HatCode, HatContext, HatFace, HatTables, + interests::push_declaration_profile, network::Network, res_hat, res_hat_mut, HatCode, + HatContext, HatFace, HatTables, }; use crate::net::routing::{ dispatcher::{ @@ -206,7 +207,8 @@ fn send_sourced_queryable_to_net_children( .map(|src_face| someface.id != src_face.id) .unwrap_or(true) { - let key_expr = Resource::decl_key(res, &mut someface); + let push_declaration = push_declaration_profile(tables, &someface); + let key_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -272,7 +274,8 @@ fn propagate_simple_queryable( face_hat_mut!(&mut dst_face) .local_qabls .insert(res.clone(), (id, info)); - let key_expr = Resource::decl_key(res, &mut dst_face); + let push_declaration = push_declaration_profile(tables, &dst_face); + let key_expr = Resource::decl_key(res, &mut dst_face, push_declaration); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -515,7 +518,8 @@ fn send_forget_sourced_queryable_to_net_children( .map(|src_face| someface.id != src_face.id) .unwrap_or(true) { - let wire_expr = Resource::decl_key(res, &mut someface); + let push_declaration = push_declaration_profile(tables, &someface); + let wire_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -999,7 +1003,8 @@ pub(super) fn queries_linkstate_change( face_hat_mut!(&mut dst_face) .local_qabls .insert(res.clone(), (id, info)); - let key_expr = Resource::decl_key(res, &mut dst_face); + let push_declaration = push_declaration_profile(tables, &dst_face); + let key_expr = Resource::decl_key(res, &mut dst_face, push_declaration); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -1161,7 +1166,8 @@ pub(crate) fn declare_qabl_interest( } else { 0 }; - let wire_expr = Resource::decl_key(res, face); + let wire_expr = + Resource::decl_key(res, face, push_declaration_profile(tables, face)); send_declare( &face.primitives, RoutingContext::with_expr( @@ -1206,7 +1212,8 @@ pub(crate) fn declare_qabl_interest( } else { 0 }; - let key_expr = Resource::decl_key(qabl, face); + let key_expr = + Resource::decl_key(qabl, face, push_declaration_profile(tables, face)); send_declare( &face.primitives, RoutingContext::with_expr( @@ -1244,7 +1251,8 @@ pub(crate) fn declare_qabl_interest( } else { 0 }; - let key_expr = Resource::decl_key(qabl, face); + let key_expr = + Resource::decl_key(qabl, face, push_declaration_profile(tables, face)); send_declare( &face.primitives, RoutingContext::with_expr( diff --git a/zenoh/src/net/routing/hat/router/token.rs b/zenoh/src/net/routing/hat/router/token.rs index 1be66cb70b..f94a4d12d4 100644 --- a/zenoh/src/net/routing/hat/router/token.rs +++ b/zenoh/src/net/routing/hat/router/token.rs @@ -27,8 +27,9 @@ use zenoh_protocol::{ use zenoh_sync::get_mut_unchecked; use super::{ - face_hat, face_hat_mut, get_peer, get_router, hat, hat_mut, network::Network, res_hat, - res_hat_mut, HatCode, HatContext, HatFace, HatTables, + face_hat, face_hat_mut, get_peer, get_router, hat, hat_mut, + interests::push_declaration_profile, network::Network, res_hat, res_hat_mut, HatCode, + HatContext, HatFace, HatTables, }; use crate::net::routing::{ dispatcher::{face::FaceState, tables::Tables}, @@ -54,7 +55,8 @@ fn send_sourced_token_to_net_clildren( .map(|src_face| someface.id != src_face.id) .unwrap_or(true) { - let key_expr = Resource::decl_key(res, &mut someface); + let push_declaration = push_declaration_profile(tables, &someface); + let key_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -115,7 +117,8 @@ fn propagate_simple_token_to( if !face_hat!(dst_face).local_tokens.contains_key(res) { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); + let key_expr = + Resource::decl_key(res, dst_face, push_declaration_profile(tables, dst_face)); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -358,7 +361,8 @@ fn send_forget_sourced_token_to_net_clildren( .map(|src_face| someface.id != src_face.id) .unwrap_or(true) { - let wire_expr = Resource::decl_key(res, &mut someface); + let push_declaration = push_declaration_profile(tables, &someface); + let wire_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -914,15 +918,18 @@ pub(super) fn token_linkstate_change( } } - for dst_face in tables.faces.values_mut() { + for mut dst_face in tables.faces.values().cloned() { if src_face.id != dst_face.id && HatTables::failover_brokering_to(links, dst_face.zid) { for res in face_hat!(src_face).remote_tokens.values() { if !face_hat!(dst_face).local_tokens.contains_key(res) { let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); + face_hat_mut!(&mut dst_face) + .local_tokens + .insert(res.clone(), id); + let push_declaration = push_declaration_profile(tables, &dst_face); + let key_expr = Resource::decl_key(res, &mut dst_face, push_declaration); send_declare( &dst_face.primitives, RoutingContext::with_expr( @@ -977,7 +984,8 @@ pub(crate) fn declare_token_interest( } else { 0 }; - let wire_expr = Resource::decl_key(res, face); + let wire_expr = + Resource::decl_key(res, face, push_declaration_profile(tables, face)); send_declare( &face.primitives, RoutingContext::with_expr( @@ -1021,7 +1029,8 @@ pub(crate) fn declare_token_interest( } else { 0 }; - let wire_expr = Resource::decl_key(token, face); + let wire_expr = + Resource::decl_key(token, face, push_declaration_profile(tables, face)); send_declare( &face.primitives, RoutingContext::with_expr( @@ -1063,7 +1072,8 @@ pub(crate) fn declare_token_interest( } else { 0 }; - let wire_expr = Resource::decl_key(token, face); + let wire_expr = + Resource::decl_key(token, face, push_declaration_profile(tables, face)); send_declare( &face.primitives, RoutingContext::with_expr( From 7f7d648e76c46f0584d56405c5f162892278d610 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 28 Aug 2024 15:14:25 +0200 Subject: [PATCH 595/598] Improve pipeline jitter (#1335) * Rework of pipeline backoff * Cargo fmt * Fic backoff calculation * Fix backoff calculation * Fix lint * Add event tests * Improve event tests * Update event API * Improve event tests * Precommit * Fix event wait_timeout and wait_deadline impls * Add event_deadline tests * Pre-commit * Update batching config * Fix typos * Address review comments --- Cargo.lock | 2 + Cargo.toml | 1 + DEFAULT_CONFIG.json5 | 15 +- commons/zenoh-config/src/defaults.rs | 20 +- commons/zenoh-config/src/lib.rs | 17 +- commons/zenoh-shm/src/header/storage.rs | 2 +- commons/zenoh-shm/src/header/subscription.rs | 5 +- commons/zenoh-shm/src/watchdog/confirmator.rs | 5 +- commons/zenoh-shm/src/watchdog/storage.rs | 2 +- commons/zenoh-sync/src/event.rs | 622 ++++++++++++++++++ commons/zenoh-sync/src/lib.rs | 3 + io/zenoh-transport/Cargo.toml | 2 + io/zenoh-transport/src/common/pipeline.rs | 217 +++--- io/zenoh-transport/src/manager.rs | 34 +- io/zenoh-transport/src/multicast/link.rs | 4 +- .../src/unicast/universal/link.rs | 4 +- 16 files changed, 803 insertions(+), 152 deletions(-) create mode 100644 commons/zenoh-sync/src/event.rs diff --git a/Cargo.lock b/Cargo.lock index c198dffcb3..32a5b96661 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6100,9 +6100,11 @@ name = "zenoh-transport" version = "1.0.0-dev" dependencies = [ "async-trait", + "crossbeam-utils", "flume", "futures", "futures-util", + "lazy_static", "lz4_flex", "paste", "rand 0.8.5", diff --git a/Cargo.toml b/Cargo.toml index 035b329efa..e78b692612 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,6 +90,7 @@ console-subscriber = "0.3.0" const_format = "0.2.30" crc = "3.0.1" criterion = "0.5" +crossbeam-utils = "0.8.2" derive_more = "0.99.17" derive-new = "0.6.0" tracing-subscriber = { version = "0.3", features = ["json", "env-filter"] } diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index cb61b35a31..0bca1c92c1 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -353,8 +353,6 @@ /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). /// The default batch size value is the maximum batch size: 65535. batch_size: 65535, - /// Perform batching of messages if they are smaller of the batch_size - batching: true, /// Each zenoh link has a transmission queue that can be configured queue: { /// The size of each priority queue indicates the number of batches a given queue can contain. @@ -380,9 +378,16 @@ /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. wait_before_drop: 1000, }, - /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. - /// Higher values lead to a more aggressive batching but it will introduce additional latency. - backoff: 100, + /// Perform batching of messages if they are smaller of the batch_size + batching: { + /// Perform adaptive batching of messages if they are smaller of the batch_size. + /// When the network is detected to not be fast enough to transmit every message individually, many small messages may be + /// batched together and sent all at once on the wire reducing the overall network overhead. This is typically of a high-throughput + /// scenario mainly composed of small messages. In other words, batching is activated by the network back-pressure. + enabled: true, + /// The maximum time limit (in ms) a message should be retained for batching when back-pressure happens. + time_limit: 1, + } }, }, /// Configure the zenoh RX parameters of a link diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index c6e69dd148..cc6bf5854a 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -191,17 +191,6 @@ impl Default for LinkTxConf { batch_size: BatchSize::MAX, queue: QueueConf::default(), threads: num, - batching: true, - } - } -} - -impl Default for QueueConf { - fn default() -> Self { - Self { - size: QueueSizeConf::default(), - congestion_control: CongestionControlConf::default(), - backoff: 100, } } } @@ -234,6 +223,15 @@ impl Default for CongestionControlConf { } } +impl Default for BatchingConf { + fn default() -> Self { + BatchingConf { + enabled: true, + time_limit: 1, + } + } +} + impl Default for LinkRxConf { fn default() -> Self { Self { diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 743bde178a..2d9ba0beee 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -407,9 +407,8 @@ validated_struct::validator! { keep_alive: usize, /// Zenoh's MTU equivalent (default: 2^16-1) (max: 2^16-1) batch_size: BatchSize, - /// Perform batching of messages if they are smaller of the batch_size - batching: bool, - pub queue: QueueConf { + pub queue: #[derive(Default)] + QueueConf { /// The size of each priority queue indicates the number of batches a given queue can contain. /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, @@ -432,9 +431,15 @@ validated_struct::validator! { /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. pub wait_before_drop: u64, }, - /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. - /// Higher values lead to a more aggressive batching but it will introduce additional latency. - backoff: u64, + pub batching: BatchingConf { + /// Perform adaptive batching of messages if they are smaller of the batch_size. + /// When the network is detected to not be fast enough to transmit every message individually, many small messages may be + /// batched together and sent all at once on the wire reducing the overall network overhead. This is typically of a high-throughput + /// scenario mainly composed of small messages. In other words, batching is activated by the network back-pressure. + enabled: bool, + /// The maximum time limit (in ms) a message should be retained for batching when back-pressure happens. + time_limit: u64, + }, }, // Number of threads used for TX threads: usize, diff --git a/commons/zenoh-shm/src/header/storage.rs b/commons/zenoh-shm/src/header/storage.rs index 7d4c06cd2a..db556937d0 100644 --- a/commons/zenoh-shm/src/header/storage.rs +++ b/commons/zenoh-shm/src/header/storage.rs @@ -25,7 +25,7 @@ use super::{ segment::HeaderSegment, }; -#[dynamic(lazy,drop)] +#[dynamic(lazy, drop)] pub static mut GLOBAL_HEADER_STORAGE: HeaderStorage = HeaderStorage::new(32768usize).unwrap(); pub struct HeaderStorage { diff --git a/commons/zenoh-shm/src/header/subscription.rs b/commons/zenoh-shm/src/header/subscription.rs index 6259877302..6f92960aaa 100644 --- a/commons/zenoh-shm/src/header/subscription.rs +++ b/commons/zenoh-shm/src/header/subscription.rs @@ -24,9 +24,8 @@ use super::{ segment::HeaderSegment, }; -#[dynamic(lazy,drop)] - pub static mut GLOBAL_HEADER_SUBSCRIPTION: Subscription = Subscription::new(); - +#[dynamic(lazy, drop)] +pub static mut GLOBAL_HEADER_SUBSCRIPTION: Subscription = Subscription::new(); pub struct Subscription { linked_table: Mutex>>, diff --git a/commons/zenoh-shm/src/watchdog/confirmator.rs b/commons/zenoh-shm/src/watchdog/confirmator.rs index 9d87adfb97..1a9ac0f04f 100644 --- a/commons/zenoh-shm/src/watchdog/confirmator.rs +++ b/commons/zenoh-shm/src/watchdog/confirmator.rs @@ -27,10 +27,9 @@ use super::{ segment::Segment, }; -#[dynamic(lazy,drop)] +#[dynamic(lazy, drop)] pub static mut GLOBAL_CONFIRMATOR: WatchdogConfirmator = - WatchdogConfirmator::new(Duration::from_millis(50)); - + WatchdogConfirmator::new(Duration::from_millis(50)); pub struct ConfirmedDescriptor { pub owned: OwnedDescriptor, diff --git a/commons/zenoh-shm/src/watchdog/storage.rs b/commons/zenoh-shm/src/watchdog/storage.rs index 48fa4cde40..ff9772961c 100644 --- a/commons/zenoh-shm/src/watchdog/storage.rs +++ b/commons/zenoh-shm/src/watchdog/storage.rs @@ -21,7 +21,7 @@ use zenoh_result::{zerror, ZResult}; use super::{allocated_watchdog::AllocatedWatchdog, descriptor::OwnedDescriptor, segment::Segment}; -#[dynamic(lazy,drop)] +#[dynamic(lazy, drop)] pub static mut GLOBAL_STORAGE: WatchdogStorage = WatchdogStorage::new(32768usize).unwrap(); pub struct WatchdogStorage { diff --git a/commons/zenoh-sync/src/event.rs b/commons/zenoh-sync/src/event.rs new file mode 100644 index 0000000000..f1aa5b5b69 --- /dev/null +++ b/commons/zenoh-sync/src/event.rs @@ -0,0 +1,622 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{ + fmt, + sync::{ + atomic::{AtomicU16, AtomicU8, Ordering}, + Arc, + }, + time::{Duration, Instant}, +}; + +use event_listener::{Event as EventLib, Listener}; + +// Error types +const WAIT_ERR_STR: &str = "No notifier available"; +pub struct WaitError; + +impl fmt::Display for WaitError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl fmt::Debug for WaitError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(WAIT_ERR_STR) + } +} + +impl std::error::Error for WaitError {} + +#[repr(u8)] +pub enum WaitDeadlineError { + Deadline, + WaitError, +} + +impl fmt::Display for WaitDeadlineError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl fmt::Debug for WaitDeadlineError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Deadline => f.write_str("Deadline reached"), + Self::WaitError => f.write_str(WAIT_ERR_STR), + } + } +} + +impl std::error::Error for WaitDeadlineError {} + +#[repr(u8)] +pub enum WaitTimeoutError { + Timeout, + WaitError, +} + +impl fmt::Display for WaitTimeoutError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl fmt::Debug for WaitTimeoutError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Timeout => f.write_str("Timeout expired"), + Self::WaitError => f.write_str(WAIT_ERR_STR), + } + } +} + +impl std::error::Error for WaitTimeoutError {} + +const NOTIFY_ERR_STR: &str = "No waiter available"; +pub struct NotifyError; + +impl fmt::Display for NotifyError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl fmt::Debug for NotifyError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(NOTIFY_ERR_STR) + } +} + +impl std::error::Error for NotifyError {} + +// Inner +struct EventInner { + event: EventLib, + flag: AtomicU8, + notifiers: AtomicU16, + waiters: AtomicU16, +} + +const UNSET: u8 = 0; +const OK: u8 = 1; +const ERR: u8 = 1 << 1; + +#[repr(u8)] +enum EventCheck { + Unset = UNSET, + Ok = OK, + Err = ERR, +} + +#[repr(u8)] +enum EventSet { + Ok = OK, + Err = ERR, +} + +impl EventInner { + fn check(&self) -> EventCheck { + let f = self.flag.fetch_and(!OK, Ordering::SeqCst); + if f & ERR != 0 { + return EventCheck::Err; + } + if f == OK { + return EventCheck::Ok; + } + EventCheck::Unset + } + + fn set(&self) -> EventSet { + let f = self.flag.fetch_or(OK, Ordering::SeqCst); + if f & ERR != 0 { + return EventSet::Err; + } + EventSet::Ok + } + + fn err(&self) { + self.flag.store(ERR, Ordering::SeqCst); + } +} + +/// Creates a new lock-free event variable. Every time a [`Notifier`] calls ['Notifier::notify`], one [`Waiter`] will be waken-up. +/// If no waiter is waiting when the `notify` is called, the notification will not be lost. That means the next waiter will return +/// immediately when calling `wait`. +pub fn new() -> (Notifier, Waiter) { + let inner = Arc::new(EventInner { + event: EventLib::new(), + flag: AtomicU8::new(UNSET), + notifiers: AtomicU16::new(1), + waiters: AtomicU16::new(1), + }); + (Notifier(inner.clone()), Waiter(inner)) +} + +/// A [`Notifier`] is used to notify and wake up one and only one [`Waiter`]. +#[repr(transparent)] +pub struct Notifier(Arc); + +impl Notifier { + /// Notifies one pending listener + #[inline] + pub fn notify(&self) -> Result<(), NotifyError> { + // Set the flag. + match self.0.set() { + EventSet::Ok => { + self.0.event.notify_additional_relaxed(1); + Ok(()) + } + EventSet::Err => Err(NotifyError), + } + } +} + +impl Clone for Notifier { + fn clone(&self) -> Self { + let n = self.0.notifiers.fetch_add(1, Ordering::SeqCst); + // Panic on overflow + assert!(n != 0); + Self(self.0.clone()) + } +} + +impl Drop for Notifier { + fn drop(&mut self) { + let n = self.0.notifiers.fetch_sub(1, Ordering::SeqCst); + if n == 1 { + // The last Notifier has been dropped, close the event and notify everyone + self.0.err(); + self.0.event.notify(usize::MAX); + } + } +} + +#[repr(transparent)] +pub struct Waiter(Arc); + +impl Waiter { + /// Waits for the condition to be notified + #[inline] + pub async fn wait_async(&self) -> Result<(), WaitError> { + // Wait until the flag is set. + loop { + // Check the flag. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitError), + } + + // Start listening for events. + let listener = self.0.event.listen(); + + // Check the flag again after creating the listener. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitError), + } + + // Wait for a notification and continue the loop. + listener.await; + } + + Ok(()) + } + + /// Waits for the condition to be notified + #[inline] + pub fn wait(&self) -> Result<(), WaitError> { + // Wait until the flag is set. + loop { + // Check the flag. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitError), + } + + // Start listening for events. + let listener = self.0.event.listen(); + + // Check the flag again after creating the listener. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitError), + } + + // Wait for a notification and continue the loop. + listener.wait(); + } + + Ok(()) + } + + /// Waits for the condition to be notified or returns an error when the deadline is reached + #[inline] + pub fn wait_deadline(&self, deadline: Instant) -> Result<(), WaitDeadlineError> { + // Wait until the flag is set. + loop { + // Check the flag. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitDeadlineError::WaitError), + } + + // Start listening for events. + let listener = self.0.event.listen(); + + // Check the flag again after creating the listener. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitDeadlineError::WaitError), + } + + // Wait for a notification and continue the loop. + if listener.wait_deadline(deadline).is_none() { + return Err(WaitDeadlineError::Deadline); + } + } + + Ok(()) + } + + /// Waits for the condition to be notified or returns an error when the timeout is expired + #[inline] + pub fn wait_timeout(&self, timeout: Duration) -> Result<(), WaitTimeoutError> { + // Wait until the flag is set. + loop { + // Check the flag. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitTimeoutError::WaitError), + } + + // Start listening for events. + let listener = self.0.event.listen(); + + // Check the flag again after creating the listener. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitTimeoutError::WaitError), + } + + // Wait for a notification and continue the loop. + if listener.wait_timeout(timeout).is_none() { + return Err(WaitTimeoutError::Timeout); + } + } + + Ok(()) + } +} + +impl Clone for Waiter { + fn clone(&self) -> Self { + let n = self.0.waiters.fetch_add(1, Ordering::Relaxed); + // Panic on overflow + assert!(n != 0); + Self(self.0.clone()) + } +} + +impl Drop for Waiter { + fn drop(&mut self) { + let n = self.0.waiters.fetch_sub(1, Ordering::SeqCst); + if n == 1 { + // The last Waiter has been dropped, close the event + self.0.err(); + } + } +} + +mod tests { + #[test] + fn event_timeout() { + use std::{ + sync::{Arc, Barrier}, + time::Duration, + }; + + use crate::WaitTimeoutError; + + let barrier = Arc::new(Barrier::new(2)); + let (notifier, waiter) = super::new(); + let tslot = Duration::from_secs(1); + + let bs = barrier.clone(); + let s = std::thread::spawn(move || { + // 1 - Wait one notification + match waiter.wait_timeout(tslot) { + Ok(()) => {} + Err(WaitTimeoutError::Timeout) => panic!("Timeout {:#?}", tslot), + Err(WaitTimeoutError::WaitError) => panic!("Event closed"), + } + + bs.wait(); + + // 2 - Being notified twice but waiting only once + bs.wait(); + + match waiter.wait_timeout(tslot) { + Ok(()) => {} + Err(WaitTimeoutError::Timeout) => panic!("Timeout {:#?}", tslot), + Err(WaitTimeoutError::WaitError) => panic!("Event closed"), + } + + match waiter.wait_timeout(tslot) { + Ok(()) => panic!("Event Ok but it should be Timeout"), + Err(WaitTimeoutError::Timeout) => {} + Err(WaitTimeoutError::WaitError) => panic!("Event closed"), + } + + bs.wait(); + + // 3 - Notifier has been dropped + bs.wait(); + + waiter.wait().unwrap_err(); + + bs.wait(); + }); + + let bp = barrier.clone(); + let p = std::thread::spawn(move || { + // 1 - Notify once + notifier.notify().unwrap(); + + bp.wait(); + + // 2 - Notify twice + notifier.notify().unwrap(); + notifier.notify().unwrap(); + + bp.wait(); + bp.wait(); + + // 3 - Drop notifier yielding an error in the waiter + drop(notifier); + + bp.wait(); + bp.wait(); + }); + + s.join().unwrap(); + p.join().unwrap(); + } + + #[test] + fn event_deadline() { + use std::{ + sync::{Arc, Barrier}, + time::{Duration, Instant}, + }; + + use crate::WaitDeadlineError; + + let barrier = Arc::new(Barrier::new(2)); + let (notifier, waiter) = super::new(); + let tslot = Duration::from_secs(1); + + let bs = barrier.clone(); + let s = std::thread::spawn(move || { + // 1 - Wait one notification + match waiter.wait_deadline(Instant::now() + tslot) { + Ok(()) => {} + Err(WaitDeadlineError::Deadline) => panic!("Timeout {:#?}", tslot), + Err(WaitDeadlineError::WaitError) => panic!("Event closed"), + } + + bs.wait(); + + // 2 - Being notified twice but waiting only once + bs.wait(); + + match waiter.wait_deadline(Instant::now() + tslot) { + Ok(()) => {} + Err(WaitDeadlineError::Deadline) => panic!("Timeout {:#?}", tslot), + Err(WaitDeadlineError::WaitError) => panic!("Event closed"), + } + + match waiter.wait_deadline(Instant::now() + tslot) { + Ok(()) => panic!("Event Ok but it should be Timeout"), + Err(WaitDeadlineError::Deadline) => {} + Err(WaitDeadlineError::WaitError) => panic!("Event closed"), + } + + bs.wait(); + + // 3 - Notifier has been dropped + bs.wait(); + + waiter.wait().unwrap_err(); + + bs.wait(); + }); + + let bp = barrier.clone(); + let p = std::thread::spawn(move || { + // 1 - Notify once + notifier.notify().unwrap(); + + bp.wait(); + + // 2 - Notify twice + notifier.notify().unwrap(); + notifier.notify().unwrap(); + + bp.wait(); + bp.wait(); + + // 3 - Drop notifier yielding an error in the waiter + drop(notifier); + + bp.wait(); + bp.wait(); + }); + + s.join().unwrap(); + p.join().unwrap(); + } + + #[test] + fn event_loop() { + use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, Barrier, + }, + time::{Duration, Instant}, + }; + + const N: usize = 1_000; + static COUNTER: AtomicUsize = AtomicUsize::new(0); + + let (notifier, waiter) = super::new(); + let barrier = Arc::new(Barrier::new(2)); + + let bs = barrier.clone(); + let s = std::thread::spawn(move || { + for _ in 0..N { + waiter.wait().unwrap(); + COUNTER.fetch_add(1, Ordering::Relaxed); + bs.wait(); + } + }); + let p = std::thread::spawn(move || { + for _ in 0..N { + notifier.notify().unwrap(); + barrier.wait(); + } + }); + + let start = Instant::now(); + let tout = Duration::from_secs(60); + loop { + let n = COUNTER.load(Ordering::Relaxed); + if n == N { + break; + } + if start.elapsed() > tout { + panic!("Timeout {:#?}. Counter: {n}/{N}", tout); + } + + std::thread::sleep(Duration::from_millis(100)); + } + + s.join().unwrap(); + p.join().unwrap(); + } + + #[test] + fn event_multiple() { + use std::{ + sync::atomic::{AtomicUsize, Ordering}, + time::{Duration, Instant}, + }; + + const N: usize = 1_000; + static COUNTER: AtomicUsize = AtomicUsize::new(0); + + let (notifier, waiter) = super::new(); + + let w1 = waiter.clone(); + let s1 = std::thread::spawn(move || { + let mut n = 0; + while COUNTER.fetch_add(1, Ordering::Relaxed) < N - 2 { + w1.wait().unwrap(); + n += 1; + } + println!("S1: {}", n); + }); + let s2 = std::thread::spawn(move || { + let mut n = 0; + while COUNTER.fetch_add(1, Ordering::Relaxed) < N - 2 { + waiter.wait().unwrap(); + n += 1; + } + println!("S2: {}", n); + }); + + let n1 = notifier.clone(); + let p1 = std::thread::spawn(move || { + let mut n = 0; + while COUNTER.load(Ordering::Relaxed) < N { + n1.notify().unwrap(); + n += 1; + std::thread::sleep(Duration::from_millis(1)); + } + println!("P1: {}", n); + }); + let p2 = std::thread::spawn(move || { + let mut n = 0; + while COUNTER.load(Ordering::Relaxed) < N { + notifier.notify().unwrap(); + n += 1; + std::thread::sleep(Duration::from_millis(1)); + } + println!("P2: {}", n); + }); + + std::thread::spawn(move || { + let start = Instant::now(); + let tout = Duration::from_secs(60); + loop { + let n = COUNTER.load(Ordering::Relaxed); + if n == N { + break; + } + if start.elapsed() > tout { + panic!("Timeout {:#?}. Counter: {n}/{N}", tout); + } + + std::thread::sleep(Duration::from_millis(100)); + } + }); + + p1.join().unwrap(); + p2.join().unwrap(); + + s1.join().unwrap(); + s2.join().unwrap(); + } +} diff --git a/commons/zenoh-sync/src/lib.rs b/commons/zenoh-sync/src/lib.rs index 20e95d2bb8..8289b29fbb 100644 --- a/commons/zenoh-sync/src/lib.rs +++ b/commons/zenoh-sync/src/lib.rs @@ -25,6 +25,9 @@ use std::{ use futures::FutureExt; +pub mod event; +pub use event::*; + pub mod fifo_queue; pub use fifo_queue::*; diff --git a/io/zenoh-transport/Cargo.toml b/io/zenoh-transport/Cargo.toml index c1a2c9b8ae..a3dabbae0e 100644 --- a/io/zenoh-transport/Cargo.toml +++ b/io/zenoh-transport/Cargo.toml @@ -52,6 +52,7 @@ default = ["test", "transport_multilink"] [dependencies] async-trait = { workspace = true } +crossbeam-utils = { workspace = true } tokio = { workspace = true, features = [ "sync", "fs", @@ -61,6 +62,7 @@ tokio = { workspace = true, features = [ "io-util", "net", ] } +lazy_static = { workspace = true } tokio-util = { workspace = true, features = ["rt"]} flume = { workspace = true } tracing = {workspace = true} diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index 68a4b87d24..60ea3b215d 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -1,12 +1,25 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// use std::{ sync::{ - atomic::{AtomicBool, Ordering}, + atomic::{AtomicBool, AtomicU32, Ordering}, Arc, Mutex, MutexGuard, }, time::{Duration, Instant}, }; -use flume::{bounded, Receiver, Sender}; +use crossbeam_utils::CachePadded; use ringbuffer_spsc::{RingBuffer, RingBufferReader, RingBufferWriter}; use zenoh_buffers::{ reader::{HasReader, Reader}, @@ -25,35 +38,19 @@ use zenoh_protocol::{ AtomicBatchSize, BatchSize, TransportMessage, }, }; +use zenoh_sync::{event, Notifier, Waiter}; -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// use super::{ batch::{Encode, WBatch}, priority::{TransportChannelTx, TransportPriorityTx}, }; use crate::common::batch::BatchConfig; -// It's faster to work directly with nanoseconds. -// Backoff will never last more the u32::MAX nanoseconds. -type NanoSeconds = u32; - const RBLEN: usize = QueueSizeConf::MAX; // Inner structure to reuse serialization batches struct StageInRefill { - n_ref_r: Receiver<()>, + n_ref_r: Waiter, s_ref_r: RingBufferReader, } @@ -63,36 +60,48 @@ impl StageInRefill { } fn wait(&self) -> bool { - self.n_ref_r.recv().is_ok() + self.n_ref_r.wait().is_ok() } fn wait_deadline(&self, instant: Instant) -> bool { - self.n_ref_r.recv_deadline(instant).is_ok() + self.n_ref_r.wait_deadline(instant).is_ok() } } +lazy_static::lazy_static! { + static ref LOCAL_EPOCH: Instant = Instant::now(); +} + +type AtomicMicroSeconds = AtomicU32; +type MicroSeconds = u32; + +struct AtomicBackoff { + active: CachePadded, + bytes: CachePadded, + first_write: CachePadded, +} + // Inner structure to link the initial stage with the final stage of the pipeline struct StageInOut { - n_out_w: Sender<()>, + n_out_w: Notifier, s_out_w: RingBufferWriter, - bytes: Arc, - backoff: Arc, + atomic_backoff: Arc, } impl StageInOut { #[inline] fn notify(&self, bytes: BatchSize) { - self.bytes.store(bytes, Ordering::Relaxed); - if !self.backoff.load(Ordering::Relaxed) { - let _ = self.n_out_w.try_send(()); + self.atomic_backoff.bytes.store(bytes, Ordering::Relaxed); + if !self.atomic_backoff.active.load(Ordering::Relaxed) { + let _ = self.n_out_w.notify(); } } #[inline] fn move_batch(&mut self, batch: WBatch) { let _ = self.s_out_w.push(batch); - self.bytes.store(0, Ordering::Relaxed); - let _ = self.n_out_w.try_send(()); + self.atomic_backoff.bytes.store(0, Ordering::Relaxed); + let _ = self.n_out_w.notify(); } } @@ -145,6 +154,7 @@ impl StageIn { None => match self.s_ref.pull() { Some(mut batch) => { batch.clear(); + self.s_out.atomic_backoff.first_write.store(LOCAL_EPOCH.elapsed().as_micros() as MicroSeconds, Ordering::Relaxed); break batch; } None => { @@ -299,6 +309,10 @@ impl StageIn { None => match self.s_ref.pull() { Some(mut batch) => { batch.clear(); + self.s_out.atomic_backoff.first_write.store( + LOCAL_EPOCH.elapsed().as_micros() as MicroSeconds, + Ordering::Relaxed, + ); break batch; } None => { @@ -333,7 +347,6 @@ impl StageIn { // Get the current serialization batch. let mut batch = zgetbatch_rets!(); // Attempt the serialization on the current batch - // Attempt the serialization on the current batch match batch.encode(&msg) { Ok(_) => zretok!(batch), Err(_) => { @@ -354,54 +367,27 @@ impl StageIn { enum Pull { Some(WBatch), None, - Backoff(NanoSeconds), + Backoff(MicroSeconds), } // Inner structure to keep track and signal backoff operations #[derive(Clone)] struct Backoff { - tslot: NanoSeconds, - retry_time: NanoSeconds, + threshold: Duration, last_bytes: BatchSize, - bytes: Arc, - backoff: Arc, + atomic: Arc, + // active: bool, } impl Backoff { - fn new(tslot: NanoSeconds, bytes: Arc, backoff: Arc) -> Self { + fn new(threshold: Duration, atomic: Arc) -> Self { Self { - tslot, - retry_time: 0, + threshold, last_bytes: 0, - bytes, - backoff, - } - } - - fn next(&mut self) { - if self.retry_time == 0 { - self.retry_time = self.tslot; - self.backoff.store(true, Ordering::Relaxed); - } else { - match self.retry_time.checked_mul(2) { - Some(rt) => { - self.retry_time = rt; - } - None => { - self.retry_time = NanoSeconds::MAX; - tracing::warn!( - "Pipeline pull backoff overflow detected! Retrying in {}ns.", - self.retry_time - ); - } - } + atomic, + // active: false, } } - - fn reset(&mut self) { - self.retry_time = 0; - self.backoff.store(false, Ordering::Relaxed); - } } // Inner structure to link the final stage with the initial stage of the pipeline @@ -422,13 +408,38 @@ impl StageOutIn { } fn try_pull_deep(&mut self) -> Pull { - let new_bytes = self.backoff.bytes.load(Ordering::Relaxed); - let old_bytes = self.backoff.last_bytes; - self.backoff.last_bytes = new_bytes; + // Verify first backoff is not active + let mut pull = !self.backoff.atomic.active.load(Ordering::Relaxed); - if new_bytes == old_bytes { + // If backoff is active, verify the current number of bytes is equal to the old number + // of bytes seen in the previous backoff iteration + if !pull { + let new_bytes = self.backoff.atomic.bytes.load(Ordering::Relaxed); + let old_bytes = self.backoff.last_bytes; + self.backoff.last_bytes = new_bytes; + + pull = new_bytes == old_bytes; + } + + // Verify that we have not been doing backoff for too long + let mut backoff = 0; + if !pull { + let diff = LOCAL_EPOCH.elapsed().as_micros() as MicroSeconds + - self.backoff.atomic.first_write.load(Ordering::Relaxed); + let threshold = self.backoff.threshold.as_micros() as MicroSeconds; + + if diff >= threshold { + pull = true; + } else { + backoff = threshold - diff; + } + } + + if pull { // It seems no new bytes have been written on the batch, try to pull if let Ok(mut g) = self.current.try_lock() { + self.backoff.atomic.active.store(false, Ordering::Relaxed); + // First try to pull from stage OUT to make sure we are not in the case // where new_bytes == old_bytes are because of two identical serializations if let Some(batch) = self.s_out_r.pull() { @@ -445,24 +456,25 @@ impl StageOutIn { } } } - // Go to backoff } + // Activate backoff + self.backoff.atomic.active.store(true, Ordering::Relaxed); + // Do backoff - self.backoff.next(); - Pull::Backoff(self.backoff.retry_time) + Pull::Backoff(backoff) } } struct StageOutRefill { - n_ref_w: Sender<()>, + n_ref_w: Notifier, s_ref_w: RingBufferWriter, } impl StageOutRefill { fn refill(&mut self, batch: WBatch) { assert!(self.s_ref_w.push(batch).is_none()); - let _ = self.n_ref_w.try_send(()); + let _ = self.n_ref_w.notify(); } } @@ -501,8 +513,8 @@ pub(crate) struct TransmissionPipelineConf { pub(crate) batch: BatchConfig, pub(crate) queue_size: [usize; Priority::NUM], pub(crate) wait_before_drop: Duration, - pub(crate) batching: bool, - pub(crate) backoff: Duration, + pub(crate) batching_enabled: bool, + pub(crate) batching_time_limit: Duration, } // A 2-stage transmission pipeline @@ -525,7 +537,7 @@ impl TransmissionPipeline { // Create the channel for notifying that new batches are in the out ring buffer // This is a MPSC channel - let (n_out_w, n_out_r) = bounded(1); + let (n_out_w, n_out_r) = event::new(); for (prio, num) in size_iter.enumerate() { assert!(*num != 0 && *num <= RBLEN); @@ -540,29 +552,33 @@ impl TransmissionPipeline { } // Create the channel for notifying that new batches are in the refill ring buffer // This is a SPSC channel - let (n_ref_w, n_ref_r) = bounded(1); + let (n_ref_w, n_ref_r) = event::new(); // Create the refill ring buffer // This is a SPSC ring buffer let (s_out_w, s_out_r) = RingBuffer::::init(); let current = Arc::new(Mutex::new(None)); - let bytes = Arc::new(AtomicBatchSize::new(0)); - let backoff = Arc::new(AtomicBool::new(false)); + let bytes = Arc::new(AtomicBackoff { + active: CachePadded::new(AtomicBool::new(false)), + bytes: CachePadded::new(AtomicBatchSize::new(0)), + first_write: CachePadded::new(AtomicMicroSeconds::new( + LOCAL_EPOCH.elapsed().as_micros() as MicroSeconds, + )), + }); stage_in.push(Mutex::new(StageIn { s_ref: StageInRefill { n_ref_r, s_ref_r }, s_out: StageInOut { n_out_w: n_out_w.clone(), s_out_w, - bytes: bytes.clone(), - backoff: backoff.clone(), + atomic_backoff: bytes.clone(), }, mutex: StageInMutex { current: current.clone(), priority: priority[prio].clone(), }, fragbuf: ZBuf::empty(), - batching: config.batching, + batching: config.batching_enabled, })); // The stage out for this priority @@ -570,7 +586,7 @@ impl TransmissionPipeline { s_in: StageOutIn { s_out_r, current, - backoff: Backoff::new(config.backoff.as_nanos() as NanoSeconds, bytes, backoff), + backoff: Backoff::new(config.batching_time_limit, bytes), }, s_ref: StageOutRefill { n_ref_w, s_ref_w }, }); @@ -652,28 +668,23 @@ impl TransmissionPipelineProducer { pub(crate) struct TransmissionPipelineConsumer { // A single Mutex for all the priority queues stage_out: Box<[StageOut]>, - n_out_r: Receiver<()>, + n_out_r: Waiter, active: Arc, } impl TransmissionPipelineConsumer { pub(crate) async fn pull(&mut self) -> Option<(WBatch, usize)> { - // Reset backoff before pulling - for queue in self.stage_out.iter_mut() { - queue.s_in.backoff.reset(); - } - while self.active.load(Ordering::Relaxed) { + let mut backoff = MicroSeconds::MAX; // Calculate the backoff maximum - let mut bo = NanoSeconds::MAX; for (prio, queue) in self.stage_out.iter_mut().enumerate() { match queue.try_pull() { Pull::Some(batch) => { return Some((batch, prio)); } - Pull::Backoff(b) => { - if b < bo { - bo = b; + Pull::Backoff(deadline) => { + if deadline < backoff { + backoff = deadline; } } Pull::None => {} @@ -687,9 +698,11 @@ impl TransmissionPipelineConsumer { tokio::task::yield_now().await; // Wait for the backoff to expire or for a new message - let res = - tokio::time::timeout(Duration::from_nanos(bo as u64), self.n_out_r.recv_async()) - .await; + let res = tokio::time::timeout( + Duration::from_micros(backoff as u64), + self.n_out_r.wait_async(), + ) + .await; match res { Ok(Ok(())) => { // We have received a notification from the channel that some bytes are available, retry to pull. @@ -774,9 +787,9 @@ mod tests { is_compression: true, }, queue_size: [1; Priority::NUM], - batching: true, + batching_enabled: true, wait_before_drop: Duration::from_millis(1), - backoff: Duration::from_micros(1), + batching_time_limit: Duration::from_micros(1), }; const CONFIG_NOT_STREAMED: TransmissionPipelineConf = TransmissionPipelineConf { @@ -787,9 +800,9 @@ mod tests { is_compression: false, }, queue_size: [1; Priority::NUM], - batching: true, + batching_enabled: true, wait_before_drop: Duration::from_millis(1), - backoff: Duration::from_micros(1), + batching_time_limit: Duration::from_micros(1), }; #[tokio::test(flavor = "multi_thread", worker_threads = 4)] diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index 669744838f..305ccab574 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -130,10 +130,10 @@ pub struct TransportManagerBuilder { whatami: WhatAmI, resolution: Resolution, batch_size: BatchSize, - batching: bool, + batching_enabled: bool, + batching_time_limit: Duration, wait_before_drop: Duration, queue_size: QueueSizeConf, - queue_backoff: Duration, defrag_buff_size: usize, link_rx_buffer_size: usize, unicast: TransportManagerBuilderUnicast, @@ -172,8 +172,13 @@ impl TransportManagerBuilder { self } - pub fn batching(mut self, batching: bool) -> Self { - self.batching = batching; + pub fn batching_enabled(mut self, batching_enabled: bool) -> Self { + self.batching_enabled = batching_enabled; + self + } + + pub fn batching_time_limit(mut self, batching_time_limit: Duration) -> Self { + self.batching_time_limit = batching_time_limit; self } @@ -187,11 +192,6 @@ impl TransportManagerBuilder { self } - pub fn queue_backoff(mut self, queue_backoff: Duration) -> Self { - self.queue_backoff = queue_backoff; - self - } - pub fn defrag_buff_size(mut self, defrag_buff_size: usize) -> Self { self.defrag_buff_size = defrag_buff_size; self @@ -238,14 +238,16 @@ impl TransportManagerBuilder { resolution.set(Field::FrameSN, *link.tx().sequence_number_resolution()); self = self.resolution(resolution); self = self.batch_size(*link.tx().batch_size()); - self = self.batching(*link.tx().batching()); + self = self.batching_enabled(*link.tx().queue().batching().enabled()); + self = self.batching_time_limit(Duration::from_millis( + *link.tx().queue().batching().time_limit(), + )); self = self.defrag_buff_size(*link.rx().max_message_size()); self = self.link_rx_buffer_size(*link.rx().buffer_size()); self = self.wait_before_drop(Duration::from_micros( *link.tx().queue().congestion_control().wait_before_drop(), )); self = self.queue_size(link.tx().queue().size().clone()); - self = self.queue_backoff(Duration::from_nanos(*link.tx().queue().backoff())); self = self.tx_threads(*link.tx().threads()); self = self.protocols(link.protocols().clone()); @@ -301,10 +303,10 @@ impl TransportManagerBuilder { whatami: self.whatami, resolution: self.resolution, batch_size: self.batch_size, - batching: self.batching, + batching: self.batching_enabled, wait_before_drop: self.wait_before_drop, queue_size, - queue_backoff: self.queue_backoff, + queue_backoff: self.batching_time_limit, defrag_buff_size: self.defrag_buff_size, link_rx_buffer_size: self.link_rx_buffer_size, unicast: unicast.config, @@ -340,7 +342,7 @@ impl Default for TransportManagerBuilder { fn default() -> Self { let link_rx = LinkRxConf::default(); let queue = QueueConf::default(); - let backoff = *queue.backoff(); + let backoff = *queue.batching().time_limit(); let wait_before_drop = *queue.congestion_control().wait_before_drop(); Self { version: VERSION, @@ -348,10 +350,10 @@ impl Default for TransportManagerBuilder { whatami: zenoh_config::defaults::mode, resolution: Resolution::default(), batch_size: BatchSize::MAX, - batching: true, + batching_enabled: true, wait_before_drop: Duration::from_micros(wait_before_drop), queue_size: queue.size, - queue_backoff: Duration::from_nanos(backoff), + batching_time_limit: Duration::from_millis(backoff), defrag_buff_size: *link_rx.max_message_size(), link_rx_buffer_size: *link_rx.buffer_size(), endpoints: HashMap::new(), diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 90999d32ce..d0d5ef4fb0 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -321,8 +321,8 @@ impl TransportLinkMulticastUniversal { batch: self.link.config.batch, queue_size: self.transport.manager.config.queue_size, wait_before_drop: self.transport.manager.config.wait_before_drop, - batching: self.transport.manager.config.batching, - backoff: self.transport.manager.config.queue_backoff, + batching_enabled: self.transport.manager.config.batching, + batching_time_limit: self.transport.manager.config.queue_backoff, }; // The pipeline let (producer, consumer) = TransmissionPipeline::make(tpc, &priority_tx); diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index cc3afc06e5..fff842c255 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -62,8 +62,8 @@ impl TransportLinkUnicastUniversal { }, queue_size: transport.manager.config.queue_size, wait_before_drop: transport.manager.config.wait_before_drop, - batching: transport.manager.config.batching, - backoff: transport.manager.config.queue_backoff, + batching_enabled: transport.manager.config.batching, + batching_time_limit: transport.manager.config.queue_backoff, }; // The pipeline From 335e83cb01c6d271a620b52ab5ddac6b331415ce Mon Sep 17 00:00:00 2001 From: J-Loudet Date: Thu, 29 Aug 2024 10:23:41 +0200 Subject: [PATCH 596/598] refactor(storage-manager): move prefix related functions in crate (#1325) This change is motivated by the refactor of the Replication feature. In order to exchange metadata that can be processed by all Replicas, the key expressions associated with the data stored must be prefixed (when sent) and stripped (when received). This commit exposes two functions, at the `zenoh-plugin-storage-manager` crate, that perform these operations. The objective is to reuse these functions in the Replication refactor and, as we intend to move the Replication in its own crate, exposing them at the crate level makes it easier to then import them. * plugins/zenoh-plugin-storage-manager/src/lib.rs: - moved there the `strip_prefix` function, - moved there the `get_prefixed` function and renamed it to `prefix`. * plugins/zenoh-plugin-storage-manager/src/replica/mod.rs: updated the call to the previously named `get_prefixed` function. * plugins/zenoh-plugin-storage-manager/src/replica/storage.rs: - removed the `strip_prefix` method, - removed the `prefix` function, - updated the call to `strip_prefix` and `get_prefixed`. --- .../zenoh-plugin-storage-manager/src/lib.rs | 65 ++++++++++++++- .../src/replica/mod.rs | 5 +- .../src/replica/storage.rs | 80 ++++++------------- 3 files changed, 89 insertions(+), 61 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index fb578b198d..ac778f3633 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -22,6 +22,7 @@ use std::{ collections::HashMap, convert::TryFrom, + str::FromStr, sync::{Arc, Mutex}, }; @@ -35,7 +36,7 @@ use zenoh::{ runtime::Runtime, zlock, LibLoader, }, - key_expr::{keyexpr, KeyExpr}, + key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}, prelude::Wait, session::Session, Result as ZResult, @@ -408,3 +409,65 @@ fn with_extended_string R>( prefix.truncate(prefix_len); result } + +/// Returns the key expression stripped of the provided prefix. +/// +/// If no prefix is provided this function returns the key expression untouched. +/// +/// If `None` is returned, it indicates that the key expression is equal to the prefix. +/// +/// This function will internally call [strip_prefix], see its documentation for possible outcomes. +/// +/// # Errors +/// +/// This function will return an error if: +/// - The provided prefix contains a wildcard. +/// NOTE: The configuration of a Storage is checked and will reject any prefix that contains a +/// wildcard. In theory, this error should never occur. +/// - The key expression is not prefixed by the provided prefix. +/// - The resulting stripped key is not a valid key expression (this should, in theory, never +/// happen). +/// +/// [strip_prefix]: zenoh::key_expr::keyexpr::strip_prefix() +pub fn strip_prefix( + maybe_prefix: Option<&OwnedKeyExpr>, + key_expr: &KeyExpr<'_>, +) -> ZResult> { + match maybe_prefix { + None => Ok(Some(key_expr.clone().into())), + Some(prefix) => { + if prefix.is_wild() { + bail!( + "Prefix < {} > contains a wild character (\"**\" or \"*\")", + prefix + ); + } + + match key_expr.strip_prefix(prefix).as_slice() { + [stripped_key_expr] => { + if stripped_key_expr.is_empty() { + return Ok(None); + } + + OwnedKeyExpr::from_str(stripped_key_expr).map(Some) + } + _ => bail!("Failed to strip prefix < {} > from: {}", prefix, key_expr), + } + } + } +} + +/// Returns the key with an additional prefix, if one was provided. +/// +/// If no prefix is provided, this function returns `maybe_stripped_key`. +/// +/// If a prefix is provided, this function returns the concatenation of both. +pub fn prefix( + maybe_prefix: Option<&OwnedKeyExpr>, + maybe_stripped_key: &OwnedKeyExpr, +) -> OwnedKeyExpr { + match maybe_prefix { + Some(prefix) => prefix / maybe_stripped_key, + None => maybe_stripped_key.clone(), + } +} diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index ecb8815153..4766914e21 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -94,10 +94,7 @@ impl Replica { } } else { result.push(( - StorageService::get_prefixed( - &storage_config.strip_prefix, - &entry.0.unwrap().into(), - ), + crate::prefix(storage_config.strip_prefix.as_ref(), &entry.0.unwrap()), entry.1, )); } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index d3e34f064c..d2147b137c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -25,7 +25,6 @@ use tokio::sync::{Mutex, RwLock}; use zenoh::{ bytes::EncodingBuilderTrait, internal::{ - bail, buffers::{SplitBuffer, ZBuf}, zenoh_home, Timed, TimedEvent, Timer, Value, }, @@ -39,7 +38,6 @@ use zenoh::{ sample::{Sample, SampleBuilder, SampleKind, TimestampBuilderTrait}, session::{Session, SessionDeclarations}, time::{Timestamp, NTP64}, - Result as ZResult, }; use zenoh_backend_traits::{ config::{GarbageCollectionConfig, StorageConfig}, @@ -342,7 +340,10 @@ impl StorageService { } }; - let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { + let stripped_key = match crate::strip_prefix( + self.strip_prefix.as_ref(), + sample_to_store.key_expr(), + ) { Ok(stripped) => stripped, Err(e) => { tracing::error!("{}", e); @@ -463,13 +464,14 @@ impl StorageService { if weight.is_some() && weight.unwrap().data.timestamp > *ts { // if the key matches a wild card update, check whether it was saved in storage // remember that wild card updates change only existing keys - let stripped_key = match self.strip_prefix(&key_expr.into()) { - Ok(stripped) => stripped, - Err(e) => { - tracing::error!("{}", e); - break; - } - }; + let stripped_key = + match crate::strip_prefix(self.strip_prefix.as_ref(), &key_expr.into()) { + Ok(stripped) => stripped, + Err(e) => { + tracing::error!("{}", e); + break; + } + }; let mut storage = self.storage.lock().await; match storage.get(stripped_key, "").await { Ok(stored_data) => { @@ -498,7 +500,7 @@ impl StorageService { async fn is_latest(&self, key_expr: &OwnedKeyExpr, timestamp: &Timestamp) -> bool { // @TODO: if cache exists, read from there let mut storage = self.storage.lock().await; - let stripped_key = match self.strip_prefix(&key_expr.into()) { + let stripped_key = match crate::strip_prefix(self.strip_prefix.as_ref(), &key_expr.into()) { Ok(stripped) => stripped, Err(e) => { tracing::error!("{}", e); @@ -529,14 +531,15 @@ impl StorageService { let matching_keys = self.get_matching_keys(q.key_expr()).await; let mut storage = self.storage.lock().await; for key in matching_keys { - let stripped_key = match self.strip_prefix(&key.clone().into()) { - Ok(k) => k, - Err(e) => { - tracing::error!("{}", e); - // @TODO: return error when it is supported - return; - } - }; + let stripped_key = + match crate::strip_prefix(self.strip_prefix.as_ref(), &key.clone().into()) { + Ok(k) => k, + Err(e) => { + tracing::error!("{}", e); + // @TODO: return error when it is supported + return; + } + }; match storage.get(stripped_key, q.parameters().as_str()).await { Ok(stored_data) => { for entry in stored_data { @@ -561,7 +564,7 @@ impl StorageService { } drop(storage); } else { - let stripped_key = match self.strip_prefix(q.key_expr()) { + let stripped_key = match crate::strip_prefix(self.strip_prefix.as_ref(), q.key_expr()) { Ok(k) => k, Err(e) => { tracing::error!("{}", e); @@ -603,7 +606,7 @@ impl StorageService { for (k, _ts) in entries { // @TODO: optimize adding back the prefix (possible inspiration from https://github.com/eclipse-zenoh/zenoh/blob/0.5.0-beta.9/backends/traits/src/utils.rs#L79) let full_key = match k { - Some(key) => StorageService::get_prefixed(&self.strip_prefix, &key.into()), + Some(key) => crate::prefix(self.strip_prefix.as_ref(), &key), None => self.strip_prefix.clone().unwrap(), }; if key_expr.intersects(&full_key.clone()) { @@ -620,41 +623,6 @@ impl StorageService { result } - fn strip_prefix(&self, key_expr: &KeyExpr<'_>) -> ZResult> { - let key = match &self.strip_prefix { - Some(prefix) => { - if key_expr.as_str().eq(prefix.as_str()) { - "" - } else { - match key_expr.strip_prefix(prefix).as_slice() { - [ke] => ke.as_str(), - _ => bail!( - "Keyexpr doesn't start with prefix '{}': '{}'", - prefix, - key_expr - ), - } - } - } - None => key_expr.as_str(), - }; - if key.is_empty() { - Ok(None) - } else { - Ok(Some(OwnedKeyExpr::new(key.to_string()).unwrap())) - } - } - - pub fn get_prefixed( - strip_prefix: &Option, - key_expr: &KeyExpr<'_>, - ) -> OwnedKeyExpr { - match strip_prefix { - Some(prefix) => prefix.join(key_expr.as_keyexpr()).unwrap(), - None => OwnedKeyExpr::from(key_expr.as_keyexpr()), - } - } - async fn initialize_if_empty(&mut self) { if self.replication.is_some() && self.replication.as_ref().unwrap().empty_start { // align with other storages, querying them on key_expr, From 3579f12e8d8d12b30305a9801c8dfda6b4d8ecc2 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 29 Aug 2024 11:51:55 +0200 Subject: [PATCH 597/598] Upgrade dependencies to latest version (#1338) * Upgrade dependencies * Fix dependencies * Fix winerror dependency --- Cargo.lock | 846 ++++++++---------- Cargo.toml | 108 +-- examples/Cargo.toml | 4 +- .../zenoh-link-unixsock_stream/src/unicast.rs | 4 + 4 files changed, 435 insertions(+), 527 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 32a5b96661..bec69fa53f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -70,9 +70,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if 1.0.0", "cipher 0.4.4", @@ -115,9 +115,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.10", @@ -179,9 +179,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" @@ -213,9 +213,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "array-init" @@ -285,6 +285,18 @@ dependencies = [ "futures-core", ] +[[package]] +name = "async-channel" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite 0.2.13", +] + [[package]] name = "async-dup" version = "1.2.2" @@ -297,30 +309,29 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.5.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" +checksum = "d7ebdfa2ebdab6b1760375fa7d6f382b9f486eac35fc994625a00e89280bdbb7" dependencies = [ - "async-lock", "async-task", "concurrent-queue", - "fastrand 1.9.0", - "futures-lite", + "fastrand 2.0.2", + "futures-lite 2.0.0", "slab", ] [[package]] name = "async-global-executor" -version = "2.3.1" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel", + "async-channel 2.3.1", "async-executor", - "async-io", - "async-lock", + "async-io 2.3.3", + "async-lock 3.4.0", "blocking", - "futures-lite", + "futures-lite 2.0.0", "once_cell", "tokio", ] @@ -331,7 +342,7 @@ version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8101020758a4fc3a7c326cb42aa99e9fa77cbfb76987c128ad956406fe1f70a7" dependencies = [ - "async-channel", + "async-channel 1.9.0", "async-dup", "async-std", "futures-core", @@ -347,20 +358,39 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ - "async-lock", + "async-lock 2.8.0", "autocfg", "cfg-if 1.0.0", "concurrent-queue", - "futures-lite", + "futures-lite 1.13.0", "log", "parking", - "polling", + "polling 2.8.0", "rustix 0.37.25", "slab", "socket2 0.4.9", "waker-fn", ] +[[package]] +name = "async-io" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6baa8f0178795da0e71bc42c9e5d13261aac7ee549853162e66a241ba17964" +dependencies = [ + "async-lock 3.4.0", + "cfg-if 1.0.0", + "concurrent-queue", + "futures-io", + "futures-lite 2.0.0", + "parking", + "polling 3.7.2", + "rustix 0.38.32", + "slab", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "async-lock" version = "2.8.0" @@ -370,19 +400,30 @@ dependencies = [ "event-listener 2.5.3", ] +[[package]] +name = "async-lock" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +dependencies = [ + "event-listener 5.3.1", + "event-listener-strategy", + "pin-project-lite 0.2.13", +] + [[package]] name = "async-process" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a9d28b1d97e08915212e2e45310d47854eafa69600756fc735fb788f75199c9" dependencies = [ - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "autocfg", "blocking", "cfg-if 1.0.0", "event-listener 2.5.3", - "futures-lite", + "futures-lite 1.13.0", "rustix 0.37.25", "signal-hook", "windows-sys 0.48.0", @@ -415,7 +456,7 @@ version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53bba003996b8fd22245cd0c59b869ba764188ed435392cf2796d03b805ade10" dependencies = [ - "async-channel", + "async-channel 1.9.0", "async-std", "http-types", "log", @@ -429,16 +470,16 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" dependencies = [ - "async-channel", + "async-channel 1.9.0", "async-global-executor", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "async-process", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite", + "futures-lite 1.13.0", "gloo-timers", "kv-log-macro", "log", @@ -458,9 +499,9 @@ checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-trait" -version = "0.1.73" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", @@ -604,17 +645,15 @@ dependencies = [ [[package]] name = "blocking" -version = "1.3.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" dependencies = [ - "async-channel", - "async-lock", + "async-channel 2.3.1", "async-task", - "atomic-waker", - "fastrand 1.9.0", - "futures-lite", - "log", + "futures-io", + "futures-lite 2.0.0", + "piper", ] [[package]] @@ -637,9 +676,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.6.1" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cache-padded" @@ -686,6 +725,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" version = "0.4.30" @@ -749,9 +794,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.11" +version = "4.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2" +checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" dependencies = [ "clap_builder", "clap_derive", @@ -759,9 +804,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.11" +version = "4.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb" +checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" dependencies = [ "anstream", "anstyle", @@ -771,9 +816,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.7" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" +checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ "heck", "proc-macro2", @@ -783,9 +828,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.6.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "cobs" @@ -832,18 +877,18 @@ checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" [[package]] name = "const_format" -version = "0.2.31" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c990efc7a285731f9a4378d81aff2f0e85a2c8781a05ef0f8baa8dac54d0ff48" +checksum = "e3a214c7af3d04997541b18d432afaff4c455e79e2029079647e72fc2bd27673" dependencies = [ "const_format_proc_macros", ] [[package]] name = "const_format_proc_macros" -version = "0.2.31" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e026b6ce194a874cb9cf32cd5772d1ef9767cc8fcb5765948d74f37a9d8b2bf6" +checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" dependencies = [ "proc-macro2", "quote", @@ -856,12 +901,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - [[package]] name = "cookie" version = "0.14.4" @@ -912,18 +951,18 @@ checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" [[package]] name = "crc" -version = "3.0.1" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" dependencies = [ "crc-catalog", ] [[package]] name = "crc-catalog" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "criterion" @@ -997,12 +1036,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crypto-common" @@ -1085,9 +1121,9 @@ dependencies = [ [[package]] name = "derive-new" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad" +checksum = "2cdc8d50f426189eef89dac62fabfa0abb27d5cc008f25bf4156a0203325becc" dependencies = [ "proc-macro2", "quote", @@ -1096,15 +1132,22 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.17" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ - "convert_case", "proc-macro2", "quote", - "rustc_version 0.4.0", - "syn 1.0.109", + "syn 2.0.52", ] [[package]] @@ -1250,6 +1293,16 @@ dependencies = [ "pin-project-lite 0.2.13", ] +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener 5.3.1", + "pin-project-lite 0.2.13", +] + [[package]] name = "fancy-regex" version = "0.13.0" @@ -1317,7 +1370,7 @@ dependencies = [ "futures-core", "futures-sink", "nanorand", - "spin 0.9.8", + "spin", ] [[package]] @@ -1362,9 +1415,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -1377,9 +1430,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -1387,15 +1440,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -1404,9 +1457,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" @@ -1423,11 +1476,26 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c1155db57329dca6d018b61e76b1488ce9a2e5e44028cac420a5898f4fcef63" +dependencies = [ + "fastrand 2.0.2", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite 0.2.13", + "waker-fn", +] + [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", @@ -1436,21 +1504,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -1516,24 +1584,22 @@ checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" [[package]] name = "git-version" -version = "0.3.5" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6b0decc02f4636b9ccad390dcbe77b722a77efedfa393caf8379a51d5c61899" +checksum = "1ad568aa3db0fcbc81f2f116137f263d7304f512a1209b35b85150d3ef88ad19" dependencies = [ "git-version-macro", - "proc-macro-hack", ] [[package]] name = "git-version-macro" -version = "0.3.5" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe69f1cbdb6e28af2bac214e943b99ce8a0a06b447d15d3e61161b0423139f3f" +checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ - "proc-macro-hack", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.52", ] [[package]] @@ -1585,15 +1651,21 @@ dependencies = [ [[package]] name = "heck" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" [[package]] name = "hkdf" @@ -1636,11 +1708,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.5" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -1695,11 +1767,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e9b187a72d63adbfba487f48095306ac823049cb504ee195541e91c7775f5ad" dependencies = [ "anyhow", - "async-channel", + "async-channel 1.9.0", "async-std", "base64 0.13.1", "cookie", - "futures-lite", + "futures-lite 1.13.0", "infer", "pin-project-lite 0.2.13", "rand 0.7.3", @@ -1745,7 +1817,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.13", - "socket2 0.5.6", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -1788,141 +1860,21 @@ dependencies = [ "cc", ] -[[package]] -name = "icu_collections" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" -dependencies = [ - "displaydoc", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" -dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", -] - -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" - -[[package]] -name = "icu_normalizer" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "utf16_iter", - "utf8_iter", - "write16", - "zerovec", -] - -[[package]] -name = "icu_normalizer_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" - -[[package]] -name = "icu_properties" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f8ac670d7422d7f76b32e17a5db556510825b29ec9154f235977c9caba61036" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_locid_transform", - "icu_properties_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_properties_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" - -[[package]] -name = "icu_provider" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr", - "writeable", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.52", -] - [[package]] name = "idna" -version = "1.0.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4716a3a0933a1d01c2f72450e89596eb51dd34ef3c211ccd875acdf1f8fe47ed" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ - "icu_normalizer", - "icu_properties", - "smallvec", - "utf8_iter", + "unicode-bidi", + "unicode-normalization", ] [[package]] name = "indexmap" -version = "2.0.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" dependencies = [ "equivalent", "hashbrown", @@ -1958,7 +1910,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "libc", "windows-sys 0.48.0", ] @@ -1984,7 +1936,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "rustix 0.38.32", "windows-sys 0.48.0", ] @@ -2070,9 +2022,9 @@ dependencies = [ [[package]] name = "jsonschema" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0afd06142c9bcb03f4a8787c77897a87b6be9c4918f1946c33caa714c27578" +checksum = "f5f037c58cadb17e8591b620b523cc6a7ab2b91b6ce3121f8eb4171f8d80115c" dependencies = [ "ahash", "anyhow", @@ -2125,18 +2077,18 @@ dependencies = [ [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin 0.5.2", + "spin", ] [[package]] name = "libc" -version = "0.2.155" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libloading" @@ -2166,12 +2118,6 @@ version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" -[[package]] -name = "litemap" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" - [[package]] name = "lock_api" version = "0.4.10" @@ -2313,6 +2259,18 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "mio" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.52.0", +] + [[package]] name = "mio-serial" version = "5.0.5" @@ -2320,7 +2278,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20a4c60ca5c9c0e114b3bd66ff4aa5f9b2b175442be51ca6c4365d687a97a2ac" dependencies = [ "log", - "mio", + "mio 0.8.11", "nix 0.26.4", "serialport", "winapi", @@ -2391,6 +2349,18 @@ dependencies = [ "memoffset 0.9.0", ] +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.5.0", + "cfg-if 1.0.0", + "cfg_aliases 0.2.1", + "libc", +] + [[package]] name = "no-std-net" version = "0.6.0" @@ -2514,9 +2484,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -2528,7 +2498,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "libc", ] @@ -2620,9 +2590,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-float" -version = "4.1.1" +version = "4.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "536900a8093134cf9ccf00a27deb3532421099e958d9dd431135d0c7543ca1e8" +checksum = "4a91171844676f8c7990ce64959210cd2eaef32c2612c50f9fae9f8aaa6065a6" dependencies = [ "num-traits", ] @@ -2695,9 +2665,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pem" @@ -2771,9 +2741,9 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", "indexmap", @@ -2859,6 +2829,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +dependencies = [ + "atomic-waker", + "fastrand 2.0.2", + "futures-io", +] + [[package]] name = "pkcs1" version = "0.7.5" @@ -2962,6 +2943,21 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "polling" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3ed00ed3fbf728b5816498ecd316d1716eecaced9c0c8d2c5a6740ca214985b" +dependencies = [ + "cfg-if 1.0.0", + "concurrent-queue", + "hermit-abi 0.4.0", + "pin-project-lite 0.2.13", + "rustix 0.38.32", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "polyval" version = "0.4.5" @@ -3002,9 +2998,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] @@ -3021,12 +3017,12 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.6" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +checksum = "e13db3d3fde688c61e2446b4d843bc27a7e8af269a69440c0308021dc92333cc" dependencies = [ "bytes", - "prost-derive 0.12.6", + "prost-derive 0.13.1", ] [[package]] @@ -3044,12 +3040,12 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.12.6" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca" dependencies = [ "anyhow", - "itertools 0.10.5", + "itertools 0.13.0", "proc-macro2", "quote", "syn 2.0.52", @@ -3066,25 +3062,26 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.12.6" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" +checksum = "cee5168b05f49d4b0ca581206eb14a7b22fafd963efe729ac48eb03266e25cc2" dependencies = [ - "prost 0.12.6", + "prost 0.13.1", ] [[package]] name = "quinn" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ceeeeabace7857413798eb1ffa1e9c905a9946a57d81fb69b4b71c4d8eb3ad" +checksum = "b22d8e7369034b9a7132bc2008cac12f2013c8132b45e0554e6e20e2617f2156" dependencies = [ "bytes", "pin-project-lite 0.2.13", "quinn-proto", "quinn-udp", - "rustc-hash", + "rustc-hash 2.0.0", "rustls", + "socket2 0.5.7", "thiserror", "tokio", "tracing", @@ -3099,7 +3096,7 @@ dependencies = [ "bytes", "rand 0.8.5", "ring", - "rustc-hash", + "rustc-hash 1.1.0", "rustls", "rustls-platform-verifier", "slab", @@ -3110,22 +3107,22 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9096629c45860fc7fb143e125eb826b5e721e10be3263160c7d60ca832cf8c46" +checksum = "8bffec3605b73c6f1754535084a85229fa8a30f86014e6c81aeec4abb68b0285" dependencies = [ "libc", "once_cell", - "socket2 0.5.6", + "socket2 0.5.7", "tracing", "windows-sys 0.52.0", ] [[package]] name = "quote" -version = "1.0.35" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -3267,9 +3264,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.5" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", @@ -3358,7 +3355,7 @@ dependencies = [ "cc", "getrandom 0.2.10", "libc", - "spin 0.9.8", + "spin", "untrusted", "windows-sys 0.48.0", ] @@ -3425,6 +3422,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hash" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" + [[package]] name = "rustc_version" version = "0.2.3" @@ -3436,9 +3439,9 @@ dependencies = [ [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver 1.0.18", ] @@ -3481,9 +3484,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" dependencies = [ "log", "once_cell", @@ -3501,7 +3504,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", - "rustls-pemfile 2.0.0", + "rustls-pemfile 2.1.3", "rustls-pki-types", "schannel", "security-framework", @@ -3518,19 +3521,19 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.0.0" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" dependencies = [ - "base64 0.21.4", + "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] name = "rustls-platform-verifier" @@ -3561,9 +3564,9 @@ checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" dependencies = [ "ring", "rustls-pki-types", @@ -3602,9 +3605,9 @@ dependencies = [ [[package]] name = "schemars" -version = "0.8.13" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "763f8cd0d4c71ed8389c90cb8100cba87e763bd01a8e614d4f0af97bcd50a161" +checksum = "09c024468a378b7e36765cd36702b7a90cc3cba11654f6685c8f233408e89e92" dependencies = [ "dyn-clone", "either", @@ -3615,14 +3618,14 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "0.8.13" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0f696e21e10fa546b7ffb1c9672c6de8fbc7a81acf59524386d8639bf12737" +checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 1.0.109", + "syn 2.0.52", ] [[package]] @@ -3688,9 +3691,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.197" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" dependencies = [ "serde_derive", ] @@ -3720,9 +3723,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" dependencies = [ "proc-macro2", "quote", @@ -3731,13 +3734,13 @@ dependencies = [ [[package]] name = "serde_derive_internals" -version = "0.26.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.52", ] [[package]] @@ -3751,11 +3754,12 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.127" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] @@ -3785,9 +3789,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.25" +version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ "indexmap", "itoa", @@ -3987,20 +3991,14 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", ] -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - [[package]] name = "spin" version = "0.9.8" @@ -4054,12 +4052,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "standback" version = "0.2.17" @@ -4082,7 +4074,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a2a1c578e98c1c16fc3b8ec1328f7659a500737d7a0c6d625e73e830ff9c1f6" dependencies = [ "bitflags 1.3.2", - "cfg_aliases", + "cfg_aliases 0.1.1", "libc", "parking_lot 0.11.2", "parking_lot_core 0.8.6", @@ -4096,7 +4088,7 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf" dependencies = [ - "cfg_aliases", + "cfg_aliases 0.1.1", "memchr", "proc-macro2", "quote", @@ -4158,7 +4150,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af91f480ee899ab2d9f8435bfdfc14d08a5754bd9d3fef1f1a1c23336aad6c8b" dependencies = [ - "async-channel", + "async-channel 1.9.0", "cfg-if 1.0.0", "futures-core", "pin-project-lite 0.2.13", @@ -4166,9 +4158,9 @@ dependencies = [ [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "subtle" @@ -4452,16 +4444,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "tinystr" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" -dependencies = [ - "displaydoc", - "zerovec", -] - [[package]] name = "tinytemplate" version = "1.2.1" @@ -4498,26 +4480,25 @@ dependencies = [ [[package]] name = "tokio" -version = "1.36.0" +version = "1.39.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" dependencies = [ "backtrace", "bytes", "libc", - "mio", - "num_cpus", + "mio 1.0.2", "pin-project-lite 0.2.13", - "socket2 0.5.6", + "socket2 0.5.7", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", @@ -4583,9 +4564,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", @@ -4594,7 +4575,6 @@ dependencies = [ "hashbrown", "pin-project-lite 0.2.13", "tokio", - "tracing", ] [[package]] @@ -4798,15 +4778,30 @@ dependencies = [ "log", "rand 0.8.5", "serde", - "spin 0.9.8", + "spin", ] +[[package]] +name = "unicode-bidi" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" + [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +[[package]] +name = "unicode-normalization" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +dependencies = [ + "tinyvec", +] + [[package]] name = "unicode-xid" version = "0.2.4" @@ -4835,9 +4830,9 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" [[package]] name = "untrusted" @@ -4864,9 +4859,9 @@ dependencies = [ [[package]] name = "url" -version = "2.5.1" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c25da092f0a868cdf09e8674cd3b7ef3a7d92a24253e663a2fb85e2496de56" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", @@ -4886,18 +4881,6 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - -[[package]] -name = "utf8_iter" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" - [[package]] name = "utf8parse" version = "0.2.1" @@ -4906,9 +4889,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.4.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ "getrandom 0.2.10", ] @@ -5124,9 +5107,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.0" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de2cfda980f21be5a7ed2eadb3e6fe074d56022bea2cdeb1a62eb220fc04188" +checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" dependencies = [ "rustls-pki-types", ] @@ -5374,18 +5357,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - -[[package]] -name = "writeable" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" - [[package]] name = "x509-parser" version = "0.16.0" @@ -5412,30 +5383,6 @@ dependencies = [ "time 0.3.36", ] -[[package]] -name = "yoke" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" -dependencies = [ - "serde", - "stable_deref_trait", - "yoke-derive", - "zerofrom", -] - -[[package]] -name = "yoke-derive" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.52", - "synstructure", -] - [[package]] name = "z-serial" version = "0.2.3" @@ -5471,13 +5418,13 @@ dependencies = [ "phf", "rand 0.8.5", "regex", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde", "serde-pickle", "serde_cbor", "serde_json", "serde_yaml", - "socket2 0.5.6", + "socket2 0.5.7", "stop-token", "tokio", "tokio-util", @@ -5588,7 +5535,7 @@ dependencies = [ name = "zenoh-crypto" version = "1.0.0-dev" dependencies = [ - "aes 0.8.3", + "aes 0.8.4", "hmac 0.12.1", "rand 0.8.5", "rand_chacha 0.3.1", @@ -5605,10 +5552,10 @@ dependencies = [ "futures", "git-version", "json5", - "prost 0.12.6", - "prost-types 0.12.6", + "prost 0.13.1", + "prost-types 0.13.1", "rand 0.8.5", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde_json", "serde_yaml", "tokio", @@ -5724,7 +5671,7 @@ dependencies = [ "futures", "quinn", "rustls", - "rustls-pemfile 2.0.0", + "rustls-pemfile 2.1.3", "rustls-pki-types", "rustls-webpki", "secrecy", @@ -5771,7 +5718,7 @@ name = "zenoh-link-tcp" version = "1.0.0-dev" dependencies = [ "async-trait", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tokio-util", "tracing", @@ -5792,11 +5739,11 @@ dependencies = [ "base64 0.22.1", "futures", "rustls", - "rustls-pemfile 2.0.0", + "rustls-pemfile 2.1.3", "rustls-pki-types", "rustls-webpki", "secrecy", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tokio-rustls", "tokio-util", @@ -5819,7 +5766,7 @@ name = "zenoh-link-udp" version = "1.0.0-dev" dependencies = [ "async-trait", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tokio-util", "tracing", @@ -5841,7 +5788,7 @@ dependencies = [ "advisory-lock", "async-trait", "filepath", - "nix 0.27.1", + "nix 0.29.0", "rand 0.8.5", "tokio", "tokio-util", @@ -5863,7 +5810,7 @@ version = "1.0.0-dev" dependencies = [ "async-trait", "futures", - "nix 0.27.1", + "nix 0.29.0", "tokio", "tokio-util", "tracing", @@ -5955,7 +5902,7 @@ dependencies = [ "http-types", "jsonschema", "lazy_static", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "schemars", "serde", "serde_json", @@ -5981,7 +5928,7 @@ dependencies = [ "jsonschema", "lazy_static", "libloading", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "schemars", "serde", "serde_json", @@ -6182,7 +6129,7 @@ dependencies = [ "json5", "lazy_static", "rand 0.8.5", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "tokio", "tracing", "tracing-loki", @@ -6212,51 +6159,8 @@ dependencies = [ "syn 2.0.52", ] -[[package]] -name = "zerofrom" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" -dependencies = [ - "zerofrom-derive", -] - -[[package]] -name = "zerofrom-derive" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.52", - "synstructure", -] - [[package]] name = "zeroize" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" - -[[package]] -name = "zerovec" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2cc8827d6c0994478a15c53f374f46fbd41bea663d809b14744bc42e6b109c" -dependencies = [ - "yoke", - "zerofrom", - "zerovec-derive", -] - -[[package]] -name = "zerovec-derive" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97cf56601ee5052b4417d90c8755c6683473c926039908196cf35d99f893ebe7" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.52", -] diff --git a/Cargo.toml b/Cargo.toml index e78b692612..484baaaef0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -75,117 +75,117 @@ description = "Zenoh: The Zero Overhead Pub/Sub/Query Protocol." # DEFAULT-FEATURES NOTE: Be careful with default-features and additivity! # (https://github.com/rust-lang/cargo/issues/11329) [workspace.dependencies] -aes = "0.8.2" -ahash = "0.8.7" -anyhow = { version = "1.0.69", default-features = false } # Default features are disabled due to usage in no_std crates -async-executor = "1.5.0" -async-global-executor = "2.3.1" -async-io = "2.3.3" -async-trait = "0.1.60" +aes = "0.8.4" +ahash = "0.8.11" +anyhow = { version = "1.0.86", default-features = false } # Default features are disabled due to usage in no_std crates +async-executor = "1.13.0" +async-global-executor = "2.4.1" +async-io = "2.3.4" +async-trait = "0.1.81" base64 = "0.22.1" bincode = "1.3.3" -bytes = "1.6.1" -clap = { version = "4.4.11", features = ["derive"] } -console-subscriber = "0.3.0" -const_format = "0.2.30" -crc = "3.0.1" +bytes = "1.7.1" +clap = { version = "4.5.16", features = ["derive"] } +console-subscriber = "0.4.0" +const_format = "0.2.32" +crc = "3.2.1" criterion = "0.5" -crossbeam-utils = "0.8.2" -derive_more = "0.99.17" -derive-new = "0.6.0" +crossbeam-utils = "0.8.20" +derive_more = { version = "1.0.0", features = ["as_ref"] } +derive-new = "0.7.0" tracing-subscriber = { version = "0.3", features = ["json", "env-filter"] } tracing-loki = "0.2" event-listener = "5.3.1" flume = "0.11" -form_urlencoded = "1.1.0" -futures = "0.3.25" -futures-util = { version = "0.3.25", default-features = false } # Default features are disabled due to some crates' requirements -git-version = "0.3.5" +form_urlencoded = "1.2.1" +futures = "0.3.30" +futures-util = { version = "0.3.30", default-features = false } # Default features are disabled due to some crates' requirements +git-version = "0.3.9" hashbrown = "0.14" hex = { version = "0.4.3", default-features = false } # Default features are disabled due to usage in no_std crates hmac = { version = "0.12.1", features = ["std"] } -home = "0.5.4" +home = "0.5.9" http-types = "2.12.0" humantime = "2.1.0" itertools = "0.13.0" json5 = "0.4.1" -jsonschema = { version = "0.18.0", default-features = false } +jsonschema = { version = "0.18.1", default-features = false } keyed-set = "1.0.0" -lazy_static = "1.4.0" -libc = "0.2.139" +lazy_static = "1.5.0" +libc = "0.2.158" libloading = "0.8" tracing = "0.1" lockfree = "0.5" lz4_flex = "0.11" -nix = { version = "0.27.0", features = ["fs"] } +nix = { version = "0.29.0", features = ["fs"] } num_cpus = "1.16.0" -num-traits = { version = "0.2.17", default-features = false } +num-traits = { version = "0.2.19", default-features = false } once_cell = "1.19.0" -ordered-float = "4.1.1" +ordered-float = "4.2.2" panic-message = "0.3.0" -paste = "1.0.12" -petgraph = "0.6.3" +paste = "1.0.15" +petgraph = "0.6.5" phf = { version = "0.11.2", features = ["macros"] } pnet = "0.35.0" pnet_datalink = "0.35.0" -proc-macro2 = "1.0.51" -quinn = "0.11.1" -quote = "1.0.23" +proc-macro2 = "1.0.86" +quinn = "0.11.3" +quote = "1.0.37" rand = { version = "0.8.5", default-features = false } # Default features are disabled due to usage in no_std crates rand_chacha = "0.3.1" rcgen = "0.13.1" -regex = "1.7.1" +regex = "1.10.6" ron = "0.8.1" ringbuffer-spsc = "0.1.9" rsa = "0.9" -rustc_version = "0.4.0" -rustls = { version = "0.23.9", default-features = false, features = [ +rustc_version = "0.4.1" +rustls = { version = "0.23.12", default-features = false, features = [ "logging", "tls12", "ring", ] } -rustls-native-certs = "0.7.0" -rustls-pemfile = "2.0.0" -rustls-webpki = "0.102.0" -rustls-pki-types = "1.1.0" -schemars = { version = "0.8.12", features = ["either"] } +rustls-native-certs = "0.7.3" +rustls-pemfile = "2.1.3" +rustls-webpki = "0.102.7" +rustls-pki-types = "1.8.0" +schemars = { version = "0.8.21", features = ["either"] } secrecy = { version = "0.8.0", features = ["serde", "alloc"] } -serde = { version = "1.0.154", default-features = false, features = [ +serde = { version = "1.0.209", default-features = false, features = [ "derive", ] } # Default features are disabled due to usage in no_std crates serde_cbor = "0.11.2" -serde_json = "1.0.114" +serde_json = "1.0.127" serde-pickle = "1.1.1" -serde_yaml = "0.9.19" +serde_yaml = "0.9.34" static_init = "1.0.3" stabby = "36.1.1" -sha3 = "0.10.6" +sha3 = "0.10.8" shared_memory = "0.12.4" -shellexpand = "3.0.0" -socket2 = { version = "0.5.1", features = ["all"] } +shellexpand = "3.1.0" +socket2 = { version = "0.5.7", features = ["all"] } stop-token = "0.7.0" syn = "2.0" tide = "0.16.0" -token-cell = { version = "1.4.2", default-features = false } -tokio = { version = "1.35.1", default-features = false } # Default features are disabled due to some crates' requirements -tokio-util = "0.7.10" +token-cell = { version = "1.5.0", default-features = false } +tokio = { version = "1.39.3", default-features = false } # Default features are disabled due to some crates' requirements +tokio-util = "0.7.11" tokio-tungstenite = "0.23.1" tokio-rustls = { version = "0.26.0", default-features = false } # tokio-vsock = see: io/zenoh-links/zenoh-link-vsock/Cargo.toml (workspaces does not support platform dependent dependencies) thread-priority = "1.1.0" -typenum = "1.16.0" +typenum = "1.17.0" uhlc = { version = "0.8.0", default-features = false } # Default features are disabled due to usage in no_std crates unwrap-infallible = "0.1.5" unzip-n = "0.1.2" -url = "2.3.1" -urlencoding = "2.1.2" -uuid = { version = "1.3.0", default-features = false, features = [ +url = "2.5.2" +urlencoding = "2.1.3" +uuid = { version = "1.10.0", default-features = false, features = [ "v4", ] } # Default features are disabled due to usage in no_std crates validated_struct = "2.1.0" vec_map = "0.8.2" -webpki-roots = "0.26.0" -winapi = { version = "0.3.9", features = ["iphlpapi"] } +webpki-roots = "0.26.3" +winapi = { version = "0.3.9", features = ["iphlpapi", "winerror"] } x509-parser = "0.16.0" z-serial = "0.2.3" either = "1.13.0" diff --git a/examples/Cargo.toml b/examples/Cargo.toml index e8cda2ae27..c2fb23aca3 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -45,8 +45,8 @@ zenoh = { workspace = true, default-features = true } zenoh-ext = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } -prost = "0.12.6" -prost-types = "0.12.6" +prost = "0.13.1" +prost-types = "0.13.1" [dev-dependencies] rand = { workspace = true, features = ["default"] } diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index dfad4d9833..a07267416d 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -328,6 +328,8 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { })?; // We try to acquire the lock + // @TODO: flock is deprecated and upgrading to new Flock will require some refactoring of this module + #[allow(deprecated)] nix::fcntl::flock(lock_fd, nix::fcntl::FlockArg::LockExclusiveNonblock).map_err(|e| { let _ = nix::unistd::close(lock_fd); let e = zerror!( @@ -427,6 +429,8 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { listener.handle.await??; //Release the lock + // @TODO: flock is deprecated and upgrading to new Flock will require some refactoring of this module + #[allow(deprecated)] let _ = nix::fcntl::flock(listener.lock_fd, nix::fcntl::FlockArg::UnlockNonblock); let _ = nix::unistd::close(listener.lock_fd); let _ = remove_file(path.clone()); From 926208cd2bd9a7ef59e6151ff17a99ce6b78edbd Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 29 Aug 2024 16:24:00 +0200 Subject: [PATCH 598/598] Update DEFAULT_CONFIG.json5 --- DEFAULT_CONFIG.json5 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 0bca1c92c1..e7672c6057 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -89,10 +89,10 @@ }, /// Configure the scouting mechanisms and their behaviours scouting: { - /// In client mode, the period dedicated to scouting for a router before failing + /// In client mode, the period in milliseconds dedicated to scouting for a router before failing. timeout: 3000, - /// In peer mode, the period dedicated to scouting remote peers before attempting other operations - delay: 200, + /// In peer mode, the maximum period in milliseconds dedicated to scouting remote peers before attempting other operations. + delay: 500, /// The multicast scouting configuration. multicast: { /// Whether multicast scouting is enabled or not