diff --git a/Cargo.toml b/Cargo.toml index 04e824d33..ecac36c06 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,3 +23,5 @@ tls_codec = { version = "0.4.2-pre.1", features = [ "serde", "mls", ], git = "https://github.com/rustcrypto/formats" } +maybe-async = "0.2.10" +async-trait = "0.1.80" diff --git a/openmls/Cargo.toml b/openmls/Cargo.toml index 0137d32be..594902c92 100644 --- a/openmls/Cargo.toml +++ b/openmls/Cargo.toml @@ -37,6 +37,9 @@ openmls_memory_storage = { path = "../memory_storage", features = [ openmls_test = { path = "../openmls_test", optional = true } openmls_libcrux_crypto = { path = "../libcrux_crypto", optional = true } once_cell = { version = "1.19.0", optional = true } +maybe-async = {workspace = true} +async-trait = { workspace = true, optional = true } +futures = { version = "0.3.30", optional = true } [features] default = ["backtrace"] @@ -62,6 +65,7 @@ js = [ "dep:getrandom", "dep:fluvio-wasm-timer", ] # enable js randomness source for provider +async = ["dep:async-trait", "openmls_traits/async", "dep:futures"] [dev-dependencies] backtrace = "0.3" diff --git a/openmls/src/group/core_group/mod.rs b/openmls/src/group/core_group/mod.rs index 28d4c5d3a..9f76897c9 100644 --- a/openmls/src/group/core_group/mod.rs +++ b/openmls/src/group/core_group/mod.rs @@ -187,6 +187,8 @@ pub(crate) struct CoreGroupBuilder { max_past_epochs: usize, } +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl CoreGroupBuilder { /// Create a new [`CoreGroupBuilder`]. pub(crate) fn new( @@ -266,7 +268,7 @@ impl CoreGroupBuilder { /// /// This function performs cryptographic operations and there requires an /// [`OpenMlsProvider`]. - pub(crate) fn build( + pub(crate) async fn build( self, provider: &Provider, signer: &impl Signer, @@ -302,7 +304,7 @@ impl CoreGroupBuilder { // Prepare the PskSecret let psk_secret = { - let psks = load_psks(provider.storage(), &resumption_psk_store, &self.psk_ids)?; + let psks = load_psks(provider.storage(), &resumption_psk_store, &self.psk_ids).await?; PskSecret::new(provider.crypto(), ciphersuite, psks)? }; @@ -347,17 +349,21 @@ impl CoreGroupBuilder { // Store the group state group .store(provider.storage()) + .await .map_err(CoreGroupBuildError::StorageError)?; // Store the private key of the own leaf in the key store as an epoch keypair. group .store_epoch_keypairs(provider.storage(), &[leaf_keypair]) + .await .map_err(CoreGroupBuildError::StorageError)?; Ok(group) } } +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl CoreGroup { /// Get a builder for [`CoreGroup`]. pub(crate) fn builder( @@ -488,7 +494,7 @@ impl CoreGroup { } // Create application message - pub(crate) fn create_application_message( + pub(crate) async fn create_application_message( &mut self, aad: &[u8], msg: &[u8], @@ -503,11 +509,11 @@ impl CoreGroup { self.context(), signer, )?; - self.encrypt(public_message, padding_size, provider) + self.encrypt(public_message, padding_size, provider).await } // Encrypt an PublicMessage into an PrivateMessage - pub(crate) fn encrypt( + pub(crate) async fn encrypt( &mut self, public_message: AuthenticatedContent, padding_size: usize, @@ -524,6 +530,7 @@ impl CoreGroup { provider .storage() .write_message_secrets(self.group_id(), &self.message_secrets_store) + .await .map_err(MessageEncryptionError::StorageError)?; Ok(msg) @@ -726,33 +733,43 @@ impl CoreGroup { /// Stores the [`CoreGroup`]. Called from methods creating a new group and mutating an /// existing group, both inside [`CoreGroup`] and in [`MlsGroup`]. - pub(super) fn store( + pub(super) async fn store( &self, storage: &Storage, ) -> Result<(), Storage::Error> { let group_id = self.group_id(); - self.public_group.store(storage)?; - storage.write_own_leaf_index(group_id, &self.own_leaf_index())?; - storage.write_group_epoch_secrets(group_id, &self.group_epoch_secrets)?; - storage.set_use_ratchet_tree_extension(group_id, self.use_ratchet_tree_extension)?; - storage.write_message_secrets(group_id, &self.message_secrets_store)?; - storage.write_resumption_psk_store(group_id, &self.resumption_psk_store)?; + self.public_group.store(storage).await?; + storage + .write_own_leaf_index(group_id, &self.own_leaf_index()) + .await?; + storage + .write_group_epoch_secrets(group_id, &self.group_epoch_secrets) + .await?; + storage + .set_use_ratchet_tree_extension(group_id, self.use_ratchet_tree_extension) + .await?; + storage + .write_message_secrets(group_id, &self.message_secrets_store) + .await?; + storage + .write_resumption_psk_store(group_id, &self.resumption_psk_store) + .await?; Ok(()) } /// Loads a [`CoreGroup`]. Called in [`MlsGroup::load`]. - pub(super) fn load( + pub(super) async fn load( storage: &Storage, group_id: &GroupId, ) -> Result, Storage::Error> { - let public_group = PublicGroup::load(storage, group_id)?; - let group_epoch_secrets = storage.group_epoch_secrets(group_id)?; - let own_leaf_index = storage.own_leaf_index(group_id)?; - let use_ratchet_tree_extension = storage.use_ratchet_tree_extension(group_id)?; - let message_secrets_store = storage.message_secrets(group_id)?; - let resumption_psk_store = storage.resumption_psk_store(group_id)?; + let public_group = PublicGroup::load(storage, group_id).await?; + let group_epoch_secrets = storage.group_epoch_secrets(group_id).await?; + let own_leaf_index = storage.own_leaf_index(group_id).await?; + let use_ratchet_tree_extension = storage.use_ratchet_tree_extension(group_id).await?; + let message_secrets_store = storage.message_secrets(group_id).await?; + let resumption_psk_store = storage.resumption_psk_store(group_id).await?; let build = || -> Option { Some(Self { @@ -768,16 +785,20 @@ impl CoreGroup { Ok(build()) } - pub(super) fn delete( + pub(super) async fn delete( &self, storage: &Storage, ) -> Result<(), Storage::Error> { - self.public_group.delete(storage)?; - storage.delete_own_leaf_index(self.group_id())?; - storage.delete_group_epoch_secrets(self.group_id())?; - storage.delete_use_ratchet_tree_extension(self.group_id())?; - storage.delete_message_secrets(self.group_id())?; - storage.delete_all_resumption_psk_secrets(self.group_id())?; + self.public_group.delete(storage).await?; + storage.delete_own_leaf_index(self.group_id()).await?; + storage.delete_group_epoch_secrets(self.group_id()).await?; + storage + .delete_use_ratchet_tree_extension(self.group_id()) + .await?; + storage.delete_message_secrets(self.group_id()).await?; + storage + .delete_all_resumption_psk_secrets(self.group_id()) + .await?; Ok(()) } @@ -786,17 +807,19 @@ impl CoreGroup { /// indexed by this group's [`GroupId`] and [`GroupEpoch`]. /// /// Returns an error if access to the key store fails. - pub(super) fn store_epoch_keypairs( + pub(super) async fn store_epoch_keypairs( &self, store: &Storage, keypair_references: &[EncryptionKeyPair], ) -> Result<(), Storage::Error> { - store.write_encryption_epoch_key_pairs( - self.group_id(), - &self.context().epoch(), - self.own_leaf_index().u32(), - keypair_references, - ) + store + .write_encryption_epoch_key_pairs( + self.group_id(), + &self.context().epoch(), + self.own_leaf_index().u32(), + keypair_references, + ) + .await } /// Read the [`EncryptionKeyPair`]s of this group and its current @@ -804,7 +827,7 @@ impl CoreGroup { /// /// Returns an empty vector if access to the store fails or it can't find /// any keys. - pub(super) fn read_epoch_keypairs( + pub(super) async fn read_epoch_keypairs( &self, store: &Storage, ) -> Vec { @@ -814,6 +837,7 @@ impl CoreGroup { &self.context().epoch(), self.own_leaf_index().u32(), ) + .await .unwrap_or_default() } @@ -821,20 +845,22 @@ impl CoreGroup { /// the `provider`'s key store. /// /// Returns an error if access to the key store fails. - pub(super) fn delete_previous_epoch_keypairs( + pub(super) async fn delete_previous_epoch_keypairs( &self, store: &Storage, ) -> Result<(), Storage::Error> { - store.delete_encryption_epoch_key_pairs( - self.group_id(), - &GroupEpoch::from(self.context().epoch().as_u64() - 1), - self.own_leaf_index().u32(), - ) + store + .delete_encryption_epoch_key_pairs( + self.group_id(), + &GroupEpoch::from(self.context().epoch().as_u64() - 1), + self.own_leaf_index().u32(), + ) + .await } - pub(crate) fn create_commit( + pub(crate) async fn create_commit( &self, - mut params: CreateCommitParams, + mut params: CreateCommitParams<'_>, provider: &Provider, signer: &impl Signer, ) -> Result> { @@ -986,7 +1012,8 @@ impl CoreGroup { provider.storage(), &self.resumption_psk_store, &apply_proposals_values.presharedkeys, - )?; + ) + .await?; PskSecret::new(provider.crypto(), ciphersuite, psks)? }; diff --git a/openmls/src/group/core_group/new_from_external_init.rs b/openmls/src/group/core_group/new_from_external_init.rs index 29a708405..05a710f0e 100644 --- a/openmls/src/group/core_group/new_from_external_init.rs +++ b/openmls/src/group/core_group/new_from_external_init.rs @@ -13,6 +13,8 @@ use crate::group::core_group::*; pub(crate) type ExternalCommitResult = (CoreGroup, CreateCommitResult); +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl CoreGroup { /// Join a group without the help of an internal member. This function /// requires a [GroupInfo], as well as the corresponding public tree @@ -25,10 +27,10 @@ impl CoreGroup { /// /// Note: If there is a group member in the group with the same identity as us, /// this will create a remove proposal. - pub(crate) fn join_by_external_commit( + pub(crate) async fn join_by_external_commit( provider: &Provider, signer: &impl Signer, - mut params: CreateCommitParams, + mut params: CreateCommitParams<'_>, ratchet_tree: Option, verifiable_group_info: VerifiableGroupInfo, ) -> Result> { @@ -53,7 +55,8 @@ impl CoreGroup { verifiable_group_info, // Existing proposals are discarded when joining by external commit. ProposalStore::new(), - )?; + ) + .await?; let group_context = public_group.group_context(); // Obtain external_pub from GroupInfo extensions. @@ -128,7 +131,7 @@ impl CoreGroup { .build(); // Immediately create the commit to add ourselves to the group. - let create_commit_result = group.create_commit(params, provider, signer); + let create_commit_result = group.create_commit(params, provider, signer).await; debug_assert!( create_commit_result.is_ok(), "Error creating commit {create_commit_result:?}" @@ -136,6 +139,7 @@ impl CoreGroup { group .store(provider.storage()) + .await .map_err(ExternalCommitError::StorageError)?; Ok(( diff --git a/openmls/src/group/core_group/new_from_welcome.rs b/openmls/src/group/core_group/new_from_welcome.rs index 3a50c1758..8e3689c81 100644 --- a/openmls/src/group/core_group/new_from_welcome.rs +++ b/openmls/src/group/core_group/new_from_welcome.rs @@ -8,13 +8,15 @@ use crate::{ treesync::errors::{DerivePathError, PublicTreeError}, }; +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl StagedCoreWelcome { /// Create a staged join from a welcome message. The purpose of this type is to be able to /// extract information, such as the identify of who created the welcome, before joining the /// group. /// Note: calling this function will consume the key material for decrypting the [`Welcome`] /// message, even if the caller does not turn the [`StagedCoreWelcome`] into a [`CoreGroup`]. - pub fn new_from_welcome( + pub async fn new_from_welcome( welcome: Welcome, ratchet_tree: Option, key_package_bundle: KeyPackageBundle, @@ -27,7 +29,8 @@ impl StagedCoreWelcome { &key_package_bundle, provider, &resumption_psk_store, - )?; + ) + .await?; build_staged_welcome( verifiable_group_info, @@ -39,6 +42,7 @@ impl StagedCoreWelcome { resumption_psk_store, group_secrets, ) + .await } /// Returns the [`LeafNodeIndex`] of the group member that authored the [`Welcome`] message. @@ -57,7 +61,7 @@ impl StagedCoreWelcome { } /// Consumes the [`StagedCoreWelcome`] and returns the respective [`CoreGroup`]. - pub fn into_core_group( + pub async fn into_core_group( self, provider: &Provider, ) -> Result> { @@ -82,17 +86,21 @@ impl StagedCoreWelcome { group .store(provider.storage()) + .await .map_err(WelcomeError::StorageError)?; group .store_epoch_keypairs(provider.storage(), group_keypairs.as_slice()) + .await .map_err(WelcomeError::StorageError)?; Ok(group) } } +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] #[allow(clippy::too_many_arguments)] -pub(in crate::group) fn build_staged_welcome( +pub(in crate::group) async fn build_staged_welcome( verifiable_group_info: VerifiableGroupInfo, ratchet_tree: Option, provider: &Provider, @@ -124,7 +132,8 @@ pub(in crate::group) fn build_staged_welcome( ratchet_tree, verifiable_group_info.clone(), ProposalStore::new(), - )?; + ) + .await?; // Find our own leaf in the tree. let own_leaf_index = public_group @@ -234,7 +243,9 @@ pub(in crate::group) fn build_staged_welcome( } /// Process a Welcome message up to the point where the ratchet tree is required. -pub(in crate::group) fn process_welcome( +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] +pub(in crate::group) async fn process_welcome( welcome: Welcome, key_package_bundle: &KeyPackageBundle, provider: &Provider, @@ -271,7 +282,8 @@ pub(in crate::group) fn process_welcome( provider.storage(), resumption_psk_store, &group_secrets.psks, - )?; + ) + .await?; PskSecret::new(provider.crypto(), ciphersuite, psks)? }; diff --git a/openmls/src/group/core_group/process.rs b/openmls/src/group/core_group/process.rs index e203643af..e217336e5 100644 --- a/openmls/src/group/core_group/process.rs +++ b/openmls/src/group/core_group/process.rs @@ -1,4 +1,6 @@ use core_group::proposals::QueuedProposal; +#[cfg(feature = "async")] +use futures::{stream, StreamExt}; use crate::{ framing::mls_content::FramedContentBody, @@ -10,6 +12,8 @@ use crate::{ use super::{proposals::ProposalStore, *}; +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl CoreGroup { /// This processing function does most of the semantic verifications. /// It returns a [ProcessedMessage] enum. @@ -39,7 +43,7 @@ impl CoreGroup { /// - ValSem242 /// - ValSem244 /// - ValSem246 (as part of ValSem010) - pub(crate) fn process_unverified_message( + pub(crate) async fn process_unverified_message( &self, provider: &Provider, unverified_message: UnverifiedMessage, @@ -78,13 +82,15 @@ impl CoreGroup { } } FramedContentBody::Commit(_) => { - let staged_commit = self.stage_commit( - &content, - proposal_store, - old_epoch_keypairs, - leaf_node_keypairs, - provider, - )?; + let staged_commit = self + .stage_commit( + &content, + proposal_store, + old_epoch_keypairs, + leaf_node_keypairs, + provider, + ) + .await?; ProcessedMessageContent::StagedCommitMessage(Box::new(staged_commit)) } }; @@ -169,7 +175,7 @@ impl CoreGroup { /// - ValSem244 /// - ValSem245 /// - ValSem246 (as part of ValSem010) - pub(crate) fn process_message( + pub(crate) async fn process_message( &mut self, provider: &Provider, message: impl Into, @@ -195,7 +201,8 @@ impl CoreGroup { // If this is a commit, we need to load the private key material we need for decryption. let (old_epoch_keypairs, leaf_node_keypairs) = if let ContentType::Commit = unverified_message.content_type() { - self.read_decryption_keypairs(provider, own_leaf_nodes)? + self.read_decryption_keypairs(provider, own_leaf_nodes) + .await? } else { (vec![], vec![]) }; @@ -207,6 +214,7 @@ impl CoreGroup { old_epoch_keypairs, leaf_node_keypairs, ) + .await } /// Performs framing validation and, if necessary, decrypts the given message. @@ -267,30 +275,56 @@ impl CoreGroup { } /// Helper function to read decryption keypairs. - pub(super) fn read_decryption_keypairs( + pub(super) async fn read_decryption_keypairs( &self, provider: &impl OpenMlsProvider, own_leaf_nodes: &[LeafNode], ) -> Result<(Vec, Vec), StageCommitError> { // All keys from the previous epoch are potential decryption keypairs. - let old_epoch_keypairs = self.read_epoch_keypairs(provider.storage()); + let old_epoch_keypairs = self.read_epoch_keypairs(provider.storage()).await; // If we are processing an update proposal that originally came from // us, the keypair corresponding to the leaf in the update is also a // potential decryption keypair. - let leaf_node_keypairs = own_leaf_nodes + let leaf_node_keypairs = + Self::encryption_key_pairs_from_own_leaf_nodes(provider, own_leaf_nodes).await?; + + Ok((old_epoch_keypairs, leaf_node_keypairs)) + } + + #[cfg(feature = "async")] + async fn encryption_key_pairs_from_own_leaf_nodes( + provider: &impl OpenMlsProvider, + own_leaf_nodes: &[LeafNode], + ) -> Result, StageCommitError> { + stream::iter(own_leaf_nodes) + .then(|leaf_node| async { + EncryptionKeyPair::read(provider, leaf_node.encryption_key()) + .await + .ok_or(StageCommitError::MissingDecryptionKey) + }) + .collect::>() + .await + .into_iter() + .collect() + } + + #[cfg(not(feature = "async"))] + async fn encryption_key_pairs_from_own_leaf_nodes( + provider: &impl OpenMlsProvider, + own_leaf_nodes: &[LeafNode], + ) -> Result, StageCommitError> { + own_leaf_nodes .iter() .map(|leaf_node| { EncryptionKeyPair::read(provider, leaf_node.encryption_key()) .ok_or(StageCommitError::MissingDecryptionKey) }) - .collect::, StageCommitError>>()?; - - Ok((old_epoch_keypairs, leaf_node_keypairs)) + .collect() } /// Merge a [StagedCommit] into the group after inspection - pub(crate) fn merge_staged_commit( + pub(crate) async fn merge_staged_commit( &mut self, provider: &Provider, staged_commit: StagedCommit, @@ -302,7 +336,7 @@ impl CoreGroup { let leaves = self.public_group().members().collect(); // Merge the staged commit into the group state and store the secret tree from the // previous epoch in the message secrets store. - if let Some(message_secrets) = self.merge_commit(provider, staged_commit)? { + if let Some(message_secrets) = self.merge_commit(provider, staged_commit).await? { self.message_secrets_store .add(past_epoch, message_secrets, leaves); } diff --git a/openmls/src/group/core_group/proposals.rs b/openmls/src/group/core_group/proposals.rs index f08695864..93b782c16 100644 --- a/openmls/src/group/core_group/proposals.rs +++ b/openmls/src/group/core_group/proposals.rs @@ -53,6 +53,7 @@ impl ProposalStore { /// Removes a proposal from the store using its reference. It will return /// None if it wasn't found in the store. + #[cfg(not(feature = "async"))] pub(crate) fn remove(&mut self, proposal_ref: ProposalRef) -> Option<()> { let index = self .queued_proposals diff --git a/openmls/src/group/core_group/staged_commit.rs b/openmls/src/group/core_group/staged_commit.rs index 440c9c0be..c903fdeeb 100644 --- a/openmls/src/group/core_group/staged_commit.rs +++ b/openmls/src/group/core_group/staged_commit.rs @@ -13,8 +13,10 @@ use crate::{ use openmls_traits::storage::StorageProvider as _; +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl CoreGroup { - fn derive_epoch_secrets( + async fn derive_epoch_secrets( &self, provider: &impl OpenMlsProvider, apply_proposals_values: ApplyProposalsValues, @@ -65,7 +67,8 @@ impl CoreGroup { provider.storage(), &self.resumption_psk_store, &apply_proposals_values.presharedkeys, - )?; + ) + .await?; PskSecret::new(provider.crypto(), self.ciphersuite(), psks)? }; @@ -121,7 +124,7 @@ impl CoreGroup { /// - ValSem242 /// - ValSem244 Returns an error if the given commit was sent by the owner /// of this group. - pub(crate) fn stage_commit( + pub(crate) async fn stage_commit( &self, mls_content: &AuthenticatedContent, proposal_store: &ProposalStore, @@ -261,7 +264,8 @@ impl CoreGroup { self.group_epoch_secrets(), commit_secret, &serialized_provisional_group_context, - )? + ) + .await? .split_secrets( serialized_provisional_group_context, diff.tree_size(), @@ -314,19 +318,20 @@ impl CoreGroup { /// /// This function should not fail and only returns a [`Result`], because it /// might throw a `LibraryError`. - pub(crate) fn merge_commit( + pub(crate) async fn merge_commit( &mut self, provider: &Provider, staged_commit: StagedCommit, ) -> Result, MergeCommitError> { // Get all keypairs from the old epoch, so we can later store the ones // that are still relevant in the new epoch. - let old_epoch_keypairs = self.read_epoch_keypairs(provider.storage()); + let old_epoch_keypairs = self.read_epoch_keypairs(provider.storage()).await; match staged_commit.state { StagedCommitState::PublicState(staged_state) => { self.public_group .merge_diff(staged_state.into_staged_diff()); self.store(provider.storage()) + .await .map_err(MergeCommitError::StorageError)?; Ok(None) } @@ -379,24 +384,30 @@ impl CoreGroup { self.public_group .store(storage) + .await .map_err(MergeCommitError::StorageError)?; storage .write_group_epoch_secrets(group_id, &self.group_epoch_secrets) + .await .map_err(MergeCommitError::StorageError)?; storage .write_message_secrets(group_id, &self.message_secrets_store) + .await .map_err(MergeCommitError::StorageError)?; // Store the relevant keys under the new epoch self.store_epoch_keypairs(storage, epoch_keypairs.as_slice()) + .await .map_err(MergeCommitError::StorageError)?; // Delete the old keys. self.delete_previous_epoch_keypairs(storage) + .await .map_err(MergeCommitError::StorageError)?; if let Some(keypair) = state.new_leaf_keypair_option { keypair .delete(storage) + .await .map_err(MergeCommitError::StorageError)?; } diff --git a/openmls/src/group/mls_group/application.rs b/openmls/src/group/mls_group/application.rs index 52601ff4a..08d445825 100644 --- a/openmls/src/group/mls_group/application.rs +++ b/openmls/src/group/mls_group/application.rs @@ -4,6 +4,8 @@ use crate::storage::OpenMlsProvider; use super::{errors::CreateMessageError, *}; +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl MlsGroup { // === Application messages === @@ -13,7 +15,7 @@ impl MlsGroup { /// Returns `CreateMessageError::MlsGroupStateError::PendingProposal` if pending proposals /// exist. In that case `.process_pending_proposals()` must be called first /// and incoming messages from the DS must be processed afterwards. - pub fn create_message( + pub async fn create_message( &mut self, provider: &Provider, signer: &impl Signer, @@ -39,6 +41,7 @@ impl MlsGroup { provider, signer, ) + .await // We know the application message is wellformed and we have the key material of the current epoch .map_err(|_| LibraryError::custom("Malformed plaintext"))?; diff --git a/openmls/src/group/mls_group/builder.rs b/openmls/src/group/mls_group/builder.rs index 59fe151b0..e505c4ec3 100644 --- a/openmls/src/group/mls_group/builder.rs +++ b/openmls/src/group/mls_group/builder.rs @@ -23,6 +23,8 @@ pub struct MlsGroupBuilder { mls_group_create_config_builder: MlsGroupCreateConfigBuilder, } +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl MlsGroupBuilder { pub(super) fn new() -> Self { Self::default() @@ -35,13 +37,14 @@ impl MlsGroupBuilder { } /// Build a new group as configured by this builder. - pub fn build( + pub async fn build( self, provider: &Provider, signer: &impl Signer, credential_with_key: CredentialWithKey, ) -> Result> { self.build_internal(provider, signer, credential_with_key, None) + .await } /// Build a new group with the given group ID. @@ -49,7 +52,7 @@ impl MlsGroupBuilder { /// If an [`MlsGroupCreateConfig`] is provided, it will be used to configure the /// group. Otherwise, the internal builder is used to build one with the /// parameters set on this builder. - pub(super) fn build_internal( + pub(super) async fn build_internal( self, provider: &Provider, signer: &impl Signer, @@ -80,6 +83,7 @@ impl MlsGroupBuilder { .with_max_past_epoch_secrets(mls_group_create_config.join_config.max_past_epochs) .with_lifetime(*mls_group_create_config.lifetime()) .build(provider, signer) + .await .map_err(|e| match e { CoreGroupBuildError::LibraryError(e) => e.into(), // We don't support PSKs yet @@ -114,14 +118,17 @@ impl MlsGroupBuilder { provider .storage() .write_mls_join_config(mls_group.group_id(), &mls_group.mls_group_config) + .await .map_err(NewGroupError::StorageError)?; provider .storage() .write_group_state(mls_group.group_id(), &mls_group.group_state) + .await .map_err(NewGroupError::StorageError)?; mls_group .group .store(provider.storage()) + .await .map_err(NewGroupError::StorageError)?; Ok(mls_group) diff --git a/openmls/src/group/mls_group/creation.rs b/openmls/src/group/mls_group/creation.rs index a61a9eae7..1ccc97131 100644 --- a/openmls/src/group/mls_group/creation.rs +++ b/openmls/src/group/mls_group/creation.rs @@ -1,3 +1,5 @@ +#[cfg(feature = "async")] +use futures::stream::{self, StreamExt}; use openmls_traits::{signatures::Signer, storage::StorageProvider as StorageProviderTrait}; use super::{builder::MlsGroupBuilder, *}; @@ -16,6 +18,8 @@ use crate::{ treesync::RatchetTreeIn, }; +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl MlsGroup { // === Group creation === @@ -30,23 +34,25 @@ impl MlsGroup { /// /// This function removes the private key corresponding to the /// `key_package` from the key store. - pub fn new( + pub async fn new( provider: &Provider, signer: &impl Signer, mls_group_create_config: &MlsGroupCreateConfig, credential_with_key: CredentialWithKey, ) -> Result> { - MlsGroupBuilder::new().build_internal( - provider, - signer, - credential_with_key, - Some(mls_group_create_config.clone()), - ) + MlsGroupBuilder::new() + .build_internal( + provider, + signer, + credential_with_key, + Some(mls_group_create_config.clone()), + ) + .await } /// Creates a new group with a given group ID with the creator as the only /// member. - pub fn new_with_group_id( + pub async fn new_with_group_id( provider: &Provider, signer: &impl Signer, mls_group_create_config: &MlsGroupCreateConfig, @@ -61,6 +67,7 @@ impl MlsGroup { credential_with_key, Some(mls_group_create_config.clone()), ) + .await } /// Join an existing group through an External Commit. @@ -77,7 +84,7 @@ impl MlsGroup { /// /// Note: If there is a group member in the group with the same identity as /// us, this will create a remove proposal. - pub fn join_by_external_commit( + pub async fn join_by_external_commit( provider: &Provider, signer: &impl Signer, ratchet_tree: Option, @@ -102,7 +109,8 @@ impl MlsGroup { params, ratchet_tree, verifiable_group_info, - )?; + ) + .await?; group.set_max_past_epochs(mls_group_config.max_past_epochs); let mls_group = MlsGroup { @@ -119,14 +127,17 @@ impl MlsGroup { provider .storage() .write_mls_join_config(mls_group.group_id(), &mls_group.mls_group_config) + .await .map_err(ExternalCommitError::StorageError)?; provider .storage() .write_group_state(mls_group.group_id(), &mls_group.group_state) + .await .map_err(ExternalCommitError::StorageError)?; mls_group .group .store(provider.storage()) + .await .map_err(ExternalCommitError::StorageError)?; let public_message: PublicMessage = create_commit_result.commit.into(); @@ -139,6 +150,7 @@ impl MlsGroup { } } +#[cfg(not(feature = "async"))] fn transpose_err_opt(v: Result, E>) -> Option> { match v { Ok(Some(v)) => Some(Ok(v)), @@ -147,6 +159,8 @@ fn transpose_err_opt(v: Result, E>) -> Option> { } } +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl ProcessedWelcome { /// Creates a new processed [`Welcome`] message , which can be /// inspected before creating a [`StagedWelcome`]. @@ -154,13 +168,13 @@ impl ProcessedWelcome { /// This does not require a ratchet tree yet. /// /// [`Welcome`]: crate::messages::Welcome - pub fn new_from_welcome( + pub async fn new_from_welcome( provider: &Provider, mls_group_config: &MlsGroupJoinConfig, welcome: Welcome, ) -> Result> { let (resumption_psk_store, key_package_bundle) = - keys_for_welcome(mls_group_config, &welcome, provider)?; + keys_for_welcome(mls_group_config, &welcome, provider).await?; let (ciphersuite, group_secrets, key_schedule, verifiable_group_info) = crate::group::core_group::new_from_welcome::process_welcome( @@ -168,7 +182,8 @@ impl ProcessedWelcome { &key_package_bundle, provider, &resumption_psk_store, - )?; + ) + .await?; Ok(Self { mls_group_config: mls_group_config.clone(), @@ -197,7 +212,7 @@ impl ProcessedWelcome { /// Consume the `ProcessedWelcome` and combine it witht he ratchet tree into /// a `StagedWelcome`. - pub fn into_staged_welcome( + pub async fn into_staged_welcome( self, provider: &Provider, ratchet_tree: Option, @@ -211,7 +226,8 @@ impl ProcessedWelcome { self.ciphersuite, self.resumption_psk_store, self.group_secrets, - )?; + ) + .await?; let staged_welcome = StagedWelcome { mls_group_config: self.mls_group_config, @@ -222,6 +238,8 @@ impl ProcessedWelcome { } } +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl StagedWelcome { /// Creates a new staged welcome from a [`Welcome`] message. Returns an error /// ([`WelcomeError::NoMatchingKeyPackage`]) if no [`KeyPackage`] @@ -230,14 +248,14 @@ impl StagedWelcome { /// message, even if the caller does not turn the [`StagedWelcome`] into an [`MlsGroup`]. /// /// [`Welcome`]: crate::messages::Welcome - pub fn new_from_welcome( + pub async fn new_from_welcome( provider: &Provider, mls_group_config: &MlsGroupJoinConfig, welcome: Welcome, ratchet_tree: Option, ) -> Result> { let (resumption_psk_store, key_package_bundle) = - keys_for_welcome(mls_group_config, &welcome, provider)?; + keys_for_welcome(mls_group_config, &welcome, provider).await?; let group = StagedCoreWelcome::new_from_welcome( welcome, @@ -245,7 +263,8 @@ impl StagedWelcome { key_package_bundle, provider, resumption_psk_store, - )?; + ) + .await?; let staged_welcome = StagedWelcome { mls_group_config: mls_group_config.clone(), @@ -270,11 +289,11 @@ impl StagedWelcome { } /// Consumes the [`StagedWelcome`] and returns the respective [`MlsGroup`]. - pub fn into_group( + pub async fn into_group( self, provider: &Provider, ) -> Result> { - let mut group = self.group.into_core_group(provider)?; + let mut group = self.group.into_core_group(provider).await?; group.set_max_past_epochs(self.mls_group_config.max_past_epochs); let mls_group = MlsGroup { @@ -289,17 +308,21 @@ impl StagedWelcome { provider .storage() .write_mls_join_config(mls_group.group_id(), &mls_group.mls_group_config) + .await .map_err(WelcomeError::StorageError)?; provider .storage() .write_group_state(mls_group.group_id(), &MlsGroupState::Operational) + .await .map_err(WelcomeError::StorageError)?; Ok(mls_group) } } -fn keys_for_welcome( +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] +async fn keys_for_welcome( mls_group_config: &MlsGroupJoinConfig, welcome: &Welcome, provider: &Provider, @@ -308,7 +331,46 @@ fn keys_for_welcome( WelcomeError<::StorageError>, > { let resumption_psk_store = ResumptionPskStore::new(mls_group_config.number_of_resumption_psks); - let key_package_bundle: KeyPackageBundle = welcome + let key_package_bundle = get_key_package_bundle_for_welcome(welcome, provider) + .await + .ok_or(WelcomeError::NoMatchingKeyPackage)?; + if !key_package_bundle.key_package().last_resort() { + provider + .storage() + .delete_key_package(&key_package_bundle.key_package.hash_ref(provider.crypto())?) + .await + .map_err(WelcomeError::StorageError)?; + } else { + log::debug!("Key package has last resort extension, not deleting"); + } + Ok((resumption_psk_store, key_package_bundle)) +} + +#[maybe_async::must_be_async] +#[cfg(feature = "async")] +async fn get_key_package_bundle_for_welcome( + welcome: &Welcome, + provider: &Provider, +) -> Option { + let stream = stream::iter(welcome.secrets()); + let events = stream.filter_map(|egs| async move { + let hash_ref = egs.new_member(); + provider.storage().key_package(&hash_ref).await.ok()? + }); + events + .collect::>() + .await + .first() + .cloned() +} + +#[maybe_async::must_be_sync] +#[cfg(not(feature = "async"))] +async fn get_key_package_bundle_for_welcome( + welcome: &Welcome, + provider: &Provider, +) -> Option { + welcome .secrets() .iter() .find_map(|egs| { @@ -318,17 +380,9 @@ fn keys_for_welcome( provider .storage() .key_package(&hash_ref) + .await .map_err(WelcomeError::StorageError), ) - }) - .ok_or(WelcomeError::NoMatchingKeyPackage)??; - if !key_package_bundle.key_package().last_resort() { - provider - .storage() - .delete_key_package(&key_package_bundle.key_package.hash_ref(provider.crypto())?) - .map_err(WelcomeError::StorageError)?; - } else { - log::debug!("Key package has last resort extension, not deleting"); - } - Ok((resumption_psk_store, key_package_bundle)) + })? + .ok() } diff --git a/openmls/src/group/mls_group/membership.rs b/openmls/src/group/mls_group/membership.rs index 0416b4bfd..bb2b17b30 100644 --- a/openmls/src/group/mls_group/membership.rs +++ b/openmls/src/group/mls_group/membership.rs @@ -14,6 +14,8 @@ use crate::{ storage::OpenMlsProvider, treesync::LeafNode, }; +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl MlsGroup { /// Adds members to the group. /// @@ -31,7 +33,7 @@ impl MlsGroup { /// [`Welcome`]: crate::messages::Welcome // FIXME: #1217 #[allow(clippy::type_complexity)] - pub fn add_members( + pub async fn add_members( &mut self, provider: &Provider, signer: &impl Signer, @@ -63,7 +65,7 @@ impl MlsGroup { .proposal_store(&self.proposal_store) .inline_proposals(inline_proposals) .build(); - let create_commit_result = self.group.create_commit(params, provider, signer)?; + let create_commit_result = self.group.create_commit(params, provider, signer).await?; let welcome = match create_commit_result.welcome_option { Some(welcome) => welcome, @@ -74,7 +76,9 @@ impl MlsGroup { // Convert PublicMessage messages to MLSMessage and encrypt them if required by // the configuration - let mls_messages = self.content_to_mls_message(create_commit_result.commit, provider)?; + let mls_messages = self + .content_to_mls_message(create_commit_result.commit, provider) + .await?; // Set the current group state to [`MlsGroupState::PendingCommit`], // storing the current [`StagedCommit`] from the commit results @@ -85,6 +89,7 @@ impl MlsGroup { provider .storage() .write_group_state(self.group_id(), &self.group_state) + .await .map_err(AddMembersError::StorageError)?; Ok(( @@ -115,7 +120,7 @@ impl MlsGroup { /// [`Welcome`]: crate::messages::Welcome // FIXME: #1217 #[allow(clippy::type_complexity)] - pub fn remove_members( + pub async fn remove_members( &mut self, provider: &Provider, signer: &impl Signer, @@ -145,11 +150,13 @@ impl MlsGroup { .proposal_store(&self.proposal_store) .inline_proposals(inline_proposals) .build(); - let create_commit_result = self.group.create_commit(params, provider, signer)?; + let create_commit_result = self.group.create_commit(params, provider, signer).await?; // Convert PublicMessage messages to MLSMessage and encrypt them if required by // the configuration - let mls_message = self.content_to_mls_message(create_commit_result.commit, provider)?; + let mls_message = self + .content_to_mls_message(create_commit_result.commit, provider) + .await?; // Set the current group state to [`MlsGroupState::PendingCommit`], // storing the current [`StagedCommit`] from the commit results @@ -160,6 +167,7 @@ impl MlsGroup { provider .storage() .write_group_state(self.group_id(), &self.group_state) + .await .map_err(RemoveMembersError::StorageError)?; Ok(( @@ -177,7 +185,7 @@ impl MlsGroup { /// The Remove Proposal is returned as a [`MlsMessageOut`]. /// /// Returns an error if there is a pending commit. - pub fn leave_group( + pub async fn leave_group( &mut self, provider: &Provider, signer: &impl Signer, @@ -197,7 +205,9 @@ impl MlsGroup { remove_proposal.clone(), )?); - Ok(self.content_to_mls_message(remove_proposal, provider)?) + Ok(self + .content_to_mls_message(remove_proposal, provider) + .await?) } /// Returns a list of [`Member`]s in the group. diff --git a/openmls/src/group/mls_group/mod.rs b/openmls/src/group/mls_group/mod.rs index 4c7360f5a..81e1bc44c 100644 --- a/openmls/src/group/mls_group/mod.rs +++ b/openmls/src/group/mls_group/mod.rs @@ -169,6 +169,8 @@ pub struct MlsGroup { group_state: MlsGroupState, } +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl MlsGroup { // === Configuration === @@ -178,13 +180,15 @@ impl MlsGroup { } /// Sets the configuration. - pub fn set_configuration( + pub async fn set_configuration( &mut self, storage: &Storage, mls_group_config: &MlsGroupJoinConfig, ) -> Result<(), Storage::Error> { self.mls_group_config = mls_group_config.clone(); - storage.write_mls_join_config(self.group_id(), mls_group_config) + storage + .write_mls_join_config(self.group_id(), mls_group_config) + .await } /// Returns the AAD used in the framing. @@ -193,13 +197,13 @@ impl MlsGroup { } /// Sets the AAD used in the framing. - pub fn set_aad( + pub async fn set_aad( &mut self, storage: &Storage, aad: &[u8], ) -> Result<(), Storage::Error> { self.aad = aad.to_vec(); - storage.write_aad(self.group_id(), aad) + storage.write_aad(self.group_id(), aad).await } // === Advanced functions === @@ -279,7 +283,7 @@ impl MlsGroup { /// the pending commit will not be used in the group. In particular, if a /// pending commit is later accepted by the group, this client will lack the /// key material to encrypt or decrypt group messages. - pub fn clear_pending_commit( + pub async fn clear_pending_commit( &mut self, storage: &Storage, ) -> Result<(), Storage::Error> { @@ -287,7 +291,9 @@ impl MlsGroup { MlsGroupState::PendingCommit(ref pending_commit_state) => { if let PendingCommitState::Member(_) = **pending_commit_state { self.group_state = MlsGroupState::Operational; - storage.write_group_state(self.group_id(), &self.group_state) + storage + .write_group_state(self.group_id(), &self.group_state) + .await } else { Ok(()) } @@ -302,7 +308,7 @@ impl MlsGroup { /// a Commit message that references those proposals. Only use this /// function as a last resort, e.g. when a call to /// `MlsGroup::commit_to_pending_proposals` fails. - pub fn clear_pending_proposals( + pub async fn clear_pending_proposals( &mut self, storage: &Storage, ) -> Result<(), Storage::Error> { @@ -312,7 +318,9 @@ impl MlsGroup { self.proposal_store.empty(); // Clear proposals in storage - storage.clear_proposal_queue::(self.group_id())?; + storage + .clear_proposal_queue::(self.group_id()) + .await?; } Ok(()) @@ -334,16 +342,17 @@ impl MlsGroup { // === Storage Methods === /// Loads the state of the group with given id from persisted state. - pub fn load( + pub async fn load( storage: &Storage, group_id: &GroupId, ) -> Result, Storage::Error> { - let group_config = storage.mls_group_join_config(group_id)?; - let core_group = CoreGroup::load(storage, group_id)?; - let proposals: Vec<(ProposalRef, QueuedProposal)> = storage.queued_proposals(group_id)?; - let own_leaf_nodes = storage.own_leaf_nodes(group_id)?; - let aad = storage.aad(group_id)?; - let group_state = storage.group_state(group_id)?; + let group_config = storage.mls_group_join_config(group_id).await?; + let core_group = CoreGroup::load(storage, group_id).await?; + let proposals: Vec<(ProposalRef, QueuedProposal)> = + storage.queued_proposals(group_id).await?; + let own_leaf_nodes = storage.own_leaf_nodes(group_id).await?; + let aad = storage.aad(group_id).await?; + let group_state = storage.group_state(group_id).await?; let mut proposal_store = ProposalStore::new(); for (_ref, proposal) in proposals { @@ -365,16 +374,18 @@ impl MlsGroup { } /// Remove the persisted state from storage - pub fn delete( + pub async fn delete( &mut self, storage: &StorageProvider, ) -> Result<(), StorageProvider::Error> { - self.group.delete(storage)?; - storage.delete_group_config(self.group_id())?; - storage.clear_proposal_queue::(self.group_id())?; - storage.delete_own_leaf_nodes(self.group_id())?; - storage.delete_aad(self.group_id())?; - storage.delete_group_state(self.group_id())?; + self.group.delete(storage).await?; + storage.delete_group_config(self.group_id()).await?; + storage + .clear_proposal_queue::(self.group_id()) + .await?; + storage.delete_own_leaf_nodes(self.group_id()).await?; + storage.delete_aad(self.group_id()).await?; + storage.delete_group_state(self.group_id()).await?; Ok(()) } @@ -388,11 +399,13 @@ impl MlsGroup { } // Private methods of MlsGroup +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl MlsGroup { /// Converts PublicMessage to MlsMessage. Depending on whether handshake /// message should be encrypted, PublicMessage messages are encrypted to /// PrivateMessage first. - fn content_to_mls_message( + async fn content_to_mls_message( &mut self, mls_auth_content: AuthenticatedContent, provider: &impl OpenMlsProvider, @@ -419,6 +432,7 @@ impl MlsGroup { self.configuration().padding_size(), provider, ) + .await // We can be sure the encryption will work because the plaintext was created by us .map_err(|_| LibraryError::custom("Malformed plaintext"))?; MlsMessageOut::from_private_message(ciphertext, self.group.version()) @@ -447,6 +461,8 @@ impl MlsGroup { } // Methods used in tests +#[cfg(not(feature = "async"))] +#[maybe_async::must_be_sync] impl MlsGroup { #[cfg(any(feature = "test-utils", test))] pub fn export_group_context(&self) -> &GroupContext { @@ -470,13 +486,14 @@ impl MlsGroup { } /// Removes a specific proposal from the store. - pub fn remove_pending_proposal( + pub async fn remove_pending_proposal( &mut self, storage: &Storage, proposal_ref: ProposalRef, ) -> Result<(), MlsGroupStateError> { storage .remove_proposal(self.group_id(), &proposal_ref) + .await .map_err(MlsGroupStateError::StorageError)?; self.proposal_store .remove(proposal_ref) diff --git a/openmls/src/group/mls_group/processing.rs b/openmls/src/group/mls_group/processing.rs index ba78d64bc..fa5669e9a 100644 --- a/openmls/src/group/mls_group/processing.rs +++ b/openmls/src/group/mls_group/processing.rs @@ -14,6 +14,8 @@ use crate::group::errors::MergeCommitError; use super::{errors::ProcessMessageError, *}; +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl MlsGroup { /// Parses incoming messages from the DS. Checks for syntactic errors and /// makes some semantic checks as well. If the input is an encrypted @@ -24,7 +26,7 @@ impl MlsGroup { /// # Errors: /// Returns an [`ProcessMessageError`] when the validation checks fail /// with the exact reason of the failure. - pub fn process_message( + pub async fn process_message( &mut self, provider: &Provider, message: impl Into, @@ -52,22 +54,26 @@ impl MlsGroup { // Parse the message let sender_ratchet_configuration = self.configuration().sender_ratchet_configuration().clone(); - self.group.process_message( - provider, - message, - &sender_ratchet_configuration, - &self.proposal_store, - &self.own_leaf_nodes, - ) + self.group + .process_message( + provider, + message, + &sender_ratchet_configuration, + &self.proposal_store, + &self.own_leaf_nodes, + ) + .await } /// Stores a standalone proposal in the internal [ProposalStore] - pub fn store_pending_proposal( + pub async fn store_pending_proposal( &mut self, storage: &Storage, proposal: QueuedProposal, ) -> Result<(), Storage::Error> { - storage.queue_proposal(self.group_id(), &proposal.proposal_reference(), &proposal)?; + storage + .queue_proposal(self.group_id(), &proposal.proposal_reference(), &proposal) + .await?; // Store the proposal in in the internal ProposalStore self.proposal_store.add(proposal); @@ -85,7 +91,7 @@ impl MlsGroup { /// [`Welcome`]: crate::messages::Welcome // FIXME: #1217 #[allow(clippy::type_complexity)] - pub fn commit_to_pending_proposals( + pub async fn commit_to_pending_proposals( &mut self, provider: &Provider, signer: &impl Signer, @@ -101,11 +107,13 @@ impl MlsGroup { .framing_parameters(self.framing_parameters()) .proposal_store(&self.proposal_store) .build(); - let create_commit_result = self.group.create_commit(params, provider, signer)?; + let create_commit_result = self.group.create_commit(params, provider, signer).await?; // Convert PublicMessage messages to MLSMessage and encrypt them if required by // the configuration - let mls_message = self.content_to_mls_message(create_commit_result.commit, provider)?; + let mls_message = self + .content_to_mls_message(create_commit_result.commit, provider) + .await?; // Set the current group state to [`MlsGroupState::PendingCommit`], // storing the current [`StagedCommit`] from the commit results @@ -115,6 +123,7 @@ impl MlsGroup { provider .storage() .write_group_state(self.group_id(), &self.group_state) + .await .map_err(CommitToPendingProposalsError::StorageError)?; Ok(( @@ -128,7 +137,7 @@ impl MlsGroup { /// Merge a [StagedCommit] into the group after inspection. As this advances /// the epoch of the group, it also clears any pending commits. - pub fn merge_staged_commit( + pub async fn merge_staged_commit( &mut self, provider: &Provider, staged_commit: StagedCommit, @@ -140,11 +149,13 @@ impl MlsGroup { provider .storage() .write_group_state(self.group_id(), &self.group_state) + .await .map_err(MergeCommitError::StorageError)?; // Merge staged commit self.group - .merge_staged_commit(provider, staged_commit, &mut self.proposal_store)?; + .merge_staged_commit(provider, staged_commit, &mut self.proposal_store) + .await?; // Extract and store the resumption psk for the current epoch let resumption_psk = self.group.group_epoch_secrets().resumption_psk(); @@ -157,10 +168,12 @@ impl MlsGroup { provider .storage() .delete_own_leaf_nodes(self.group_id()) + .await .map_err(MergeCommitError::StorageError)?; // Delete a potential pending commit self.clear_pending_commit(provider.storage()) + .await .map_err(MergeCommitError::StorageError)?; Ok(()) @@ -168,7 +181,7 @@ impl MlsGroup { /// Merges the pending [`StagedCommit`] if there is one, and /// clears the field by setting it to `None`. - pub fn merge_pending_commit( + pub async fn merge_pending_commit( &mut self, provider: &Provider, ) -> Result<(), MergePendingCommitError> { @@ -176,7 +189,8 @@ impl MlsGroup { MlsGroupState::PendingCommit(_) => { let old_state = mem::replace(&mut self.group_state, MlsGroupState::Operational); if let MlsGroupState::PendingCommit(pending_commit_state) = old_state { - self.merge_staged_commit(provider, (*pending_commit_state).into())?; + self.merge_staged_commit(provider, (*pending_commit_state).into()) + .await?; } Ok(()) } diff --git a/openmls/src/group/mls_group/proposal.rs b/openmls/src/group/mls_group/proposal.rs index 46cd8ee64..0f60fa3f9 100644 --- a/openmls/src/group/mls_group/proposal.rs +++ b/openmls/src/group/mls_group/proposal.rs @@ -64,7 +64,9 @@ macro_rules! impl_propose_fun { /// Creates proposals to add an external PSK to the key schedule. /// /// Returns an error if there is a pending commit. - pub fn $name( + #[cfg_attr(feature = "async", maybe_async::must_be_async)] + #[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] + pub async fn $name( &mut self, provider: &Provider, signer: &impl Signer, @@ -88,16 +90,19 @@ macro_rules! impl_propose_fun { provider .storage() .queue_proposal(self.group.group_id(), &proposal_ref, &queued_proposal) + .await .map_err(ProposalError::StorageError)?; self.proposal_store.add(queued_proposal); - let mls_message = self.content_to_mls_message(proposal, provider)?; + let mls_message = self.content_to_mls_message(proposal, provider).await?; Ok((mls_message, proposal_ref)) } }; } +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl MlsGroup { impl_propose_fun!( propose_add_member_by_value, @@ -142,7 +147,7 @@ impl MlsGroup { ); /// Generate a proposal - pub fn propose( + pub async fn propose( &mut self, provider: &Provider, signer: &impl Signer, @@ -153,47 +158,58 @@ impl MlsGroup { Propose::Add(key_package) => match ref_or_value { ProposalOrRefType::Proposal => { self.propose_add_member_by_value(provider, signer, key_package) + .await } ProposalOrRefType::Reference => self .propose_add_member(provider, signer, &key_package) + .await .map_err(|e| e.into()), }, Propose::Update(leaf_node) => match ref_or_value { ProposalOrRefType::Proposal => self .propose_self_update_by_value(provider, signer, leaf_node) + .await .map_err(|e| e.into()), ProposalOrRefType::Reference => self .propose_self_update(provider, signer, leaf_node) + .await .map_err(|e| e.into()), }, Propose::Remove(leaf_index) => match ref_or_value { - ProposalOrRefType::Proposal => self.propose_remove_member_by_value( - provider, - signer, - LeafNodeIndex::new(leaf_index), - ), + ProposalOrRefType::Proposal => { + self.propose_remove_member_by_value( + provider, + signer, + LeafNodeIndex::new(leaf_index), + ) + .await + } ProposalOrRefType::Reference => self .propose_remove_member(provider, signer, LeafNodeIndex::new(leaf_index)) + .await .map_err(|e| e.into()), }, Propose::RemoveCredential(credential) => match ref_or_value { ProposalOrRefType::Proposal => { self.propose_remove_member_by_credential_by_value(provider, signer, &credential) + .await } ProposalOrRefType::Reference => self .propose_remove_member_by_credential(provider, signer, &credential) + .await .map_err(|e| e.into()), }, Propose::PreSharedKey(psk_id) => match psk_id.psk() { crate::schedule::Psk::External(_) => match ref_or_value { ProposalOrRefType::Proposal => { self.propose_external_psk_by_value(provider, signer, psk_id) + .await } ProposalOrRefType::Reference => { - self.propose_external_psk(provider, signer, psk_id) + self.propose_external_psk(provider, signer, psk_id).await } }, crate::schedule::Psk::Resumption(_) => Err(ProposalError::LibraryError( @@ -217,9 +233,11 @@ impl MlsGroup { Propose::Custom(custom_proposal) => match ref_or_value { ProposalOrRefType::Proposal => { self.propose_custom_proposal_by_value(provider, signer, custom_proposal) + .await } ProposalOrRefType::Reference => { self.propose_custom_proposal_by_reference(provider, signer, custom_proposal) + .await } }, } @@ -228,7 +246,7 @@ impl MlsGroup { /// Creates proposals to add members to the group. /// /// Returns an error if there is a pending commit. - pub fn propose_add_member( + pub async fn propose_add_member( &mut self, provider: &Provider, signer: &impl Signer, @@ -255,10 +273,11 @@ impl MlsGroup { provider .storage() .queue_proposal(self.group_id(), &proposal_ref, &proposal) + .await .map_err(ProposeAddMemberError::StorageError)?; self.proposal_store.add(proposal); - let mls_message = self.content_to_mls_message(add_proposal, provider)?; + let mls_message = self.content_to_mls_message(add_proposal, provider).await?; Ok((mls_message, proposal_ref)) } @@ -267,7 +286,7 @@ impl MlsGroup { /// The `member` has to be the member's leaf index. /// /// Returns an error if there is a pending commit. - pub fn propose_remove_member( + pub async fn propose_remove_member( &mut self, provider: &Provider, signer: &impl Signer, @@ -290,10 +309,13 @@ impl MlsGroup { provider .storage() .queue_proposal(self.group_id(), &proposal_ref, &proposal) + .await .map_err(ProposeRemoveMemberError::StorageError)?; self.proposal_store.add(proposal); - let mls_message = self.content_to_mls_message(remove_proposal, provider)?; + let mls_message = self + .content_to_mls_message(remove_proposal, provider) + .await?; Ok((mls_message, proposal_ref)) } @@ -302,7 +324,7 @@ impl MlsGroup { /// The `member` has to be the member's credential. /// /// Returns an error if there is a pending commit. - pub fn propose_remove_member_by_credential( + pub async fn propose_remove_member_by_credential( &mut self, provider: &Provider, signer: &impl Signer, @@ -319,6 +341,7 @@ impl MlsGroup { if let Some(member_index) = member_index { self.propose_remove_member(provider, signer, member_index) + .await } else { Err(ProposeRemoveMemberError::UnknownMember) } @@ -328,7 +351,7 @@ impl MlsGroup { /// The `member` has to be the member's credential. /// /// Returns an error if there is a pending commit. - pub fn propose_remove_member_by_credential_by_value( + pub async fn propose_remove_member_by_credential_by_value( &mut self, provider: &Provider, signer: &impl Signer, @@ -344,6 +367,7 @@ impl MlsGroup { if let Some(member_index) = member_index { self.propose_remove_member_by_value(provider, signer, member_index) + .await } else { Err(ProposalError::ProposeRemoveMemberError( ProposeRemoveMemberError::UnknownMember, @@ -355,7 +379,7 @@ impl MlsGroup { /// /// Returns an error when the group does not support all the required capabilities /// in the new `extensions`. - pub fn propose_group_context_extensions( + pub async fn propose_group_context_extensions( &mut self, provider: &Provider, extensions: Extensions, @@ -379,10 +403,11 @@ impl MlsGroup { provider .storage() .queue_proposal(self.group_id(), &proposal_ref, &queued_proposal) + .await .map_err(ProposalError::StorageError)?; self.proposal_store.add(queued_proposal); - let mls_message = self.content_to_mls_message(proposal, provider)?; + let mls_message = self.content_to_mls_message(proposal, provider).await?; Ok((mls_message, proposal_ref)) } @@ -395,7 +420,7 @@ impl MlsGroup { /// in the new `extensions` or if there is a pending commit. //// FIXME: #1217 #[allow(clippy::type_complexity)] - pub fn update_group_context_extensions( + pub async fn update_group_context_extensions( &mut self, provider: &Provider, extensions: Extensions, @@ -417,9 +442,11 @@ impl MlsGroup { .proposal_store(&self.proposal_store) .inline_proposals(inline_proposals) .build(); - let create_commit_result = self.group.create_commit(params, provider, signer)?; + let create_commit_result = self.group.create_commit(params, provider, signer).await?; - let mls_messages = self.content_to_mls_message(create_commit_result.commit, provider)?; + let mls_messages = self + .content_to_mls_message(create_commit_result.commit, provider) + .await?; // Set the current group state to [`MlsGroupState::PendingCommit`], // storing the current [`StagedCommit`] from the commit results @@ -430,6 +457,7 @@ impl MlsGroup { provider .storage() .write_group_state(self.group_id(), &self.group_state) + .await .map_err(CreateGroupContextExtProposalError::StorageError)?; Ok(( diff --git a/openmls/src/group/mls_group/updates.rs b/openmls/src/group/mls_group/updates.rs index 93508f984..b33301fd1 100644 --- a/openmls/src/group/mls_group/updates.rs +++ b/openmls/src/group/mls_group/updates.rs @@ -5,6 +5,8 @@ use crate::{messages::group_info::GroupInfo, storage::OpenMlsProvider, treesync: use super::*; +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl MlsGroup { /// Updates the own leaf node. /// @@ -23,7 +25,7 @@ impl MlsGroup { /// [`Welcome`]: crate::messages::Welcome // FIXME: #1217 #[allow(clippy::type_complexity)] - pub fn self_update( + pub async fn self_update( &mut self, provider: &Provider, signer: &impl Signer, @@ -39,11 +41,13 @@ impl MlsGroup { .build(); // Create Commit over all proposals. // TODO #751 - let create_commit_result = self.group.create_commit(params, provider, signer)?; + let create_commit_result = self.group.create_commit(params, provider, signer).await?; // Convert PublicMessage messages to MLSMessage and encrypt them if required by // the configuration - let mls_message = self.content_to_mls_message(create_commit_result.commit, provider)?; + let mls_message = self + .content_to_mls_message(create_commit_result.commit, provider) + .await?; // Set the current group state to [`MlsGroupState::PendingCommit`], // storing the current [`StagedCommit`] from the commit results @@ -54,9 +58,11 @@ impl MlsGroup { provider .storage() .write_group_state(self.group_id(), &self.group_state) + .await .map_err(SelfUpdateError::StorageError)?; self.group .store(provider.storage()) + .await .map_err(SelfUpdateError::StorageError)?; Ok(( @@ -71,7 +77,7 @@ impl MlsGroup { /// Creates a proposal to update the own leaf node. Optionally, a /// [`LeafNode`] can be provided to update the leaf node. Note that its /// private key must be manually added to the key store. - fn _propose_self_update( + async fn _propose_self_update( &mut self, provider: &Provider, signer: &impl Signer, @@ -108,6 +114,7 @@ impl MlsGroup { // TODO #1207: Move to the top of the function. keypair .write(provider.storage()) + .await .map_err(ProposeSelfUpdateError::StorageError)?; }; @@ -120,6 +127,7 @@ impl MlsGroup { provider .storage() .append_own_leaf_node(self.group_id(), &own_leaf) + .await .map_err(ProposeSelfUpdateError::StorageError)?; self.own_leaf_nodes.push(own_leaf); @@ -127,13 +135,15 @@ impl MlsGroup { } /// Creates a proposal to update the own leaf node. - pub fn propose_self_update( + pub async fn propose_self_update( &mut self, provider: &Provider, signer: &impl Signer, leaf_node: Option, ) -> Result<(MlsMessageOut, ProposalRef), ProposeSelfUpdateError> { - let update_proposal = self._propose_self_update(provider, signer, leaf_node)?; + let update_proposal = self + ._propose_self_update(provider, signer, leaf_node) + .await?; let proposal = QueuedProposal::from_authenticated_content_by_ref( self.ciphersuite(), provider.crypto(), @@ -143,22 +153,27 @@ impl MlsGroup { provider .storage() .queue_proposal(self.group_id(), &proposal_ref, &proposal) + .await .map_err(ProposeSelfUpdateError::StorageError)?; self.proposal_store.add(proposal); - let mls_message = self.content_to_mls_message(update_proposal, provider)?; + let mls_message = self + .content_to_mls_message(update_proposal, provider) + .await?; Ok((mls_message, proposal_ref)) } /// Creates a proposal to update the own leaf node. - pub fn propose_self_update_by_value( + pub async fn propose_self_update_by_value( &mut self, provider: &Provider, signer: &impl Signer, leaf_node: Option, ) -> Result<(MlsMessageOut, ProposalRef), ProposeSelfUpdateError> { - let update_proposal = self._propose_self_update(provider, signer, leaf_node)?; + let update_proposal = self + ._propose_self_update(provider, signer, leaf_node) + .await?; let proposal = QueuedProposal::from_authenticated_content_by_value( self.ciphersuite(), provider.crypto(), @@ -168,10 +183,13 @@ impl MlsGroup { provider .storage() .queue_proposal(self.group_id(), &proposal_ref, &proposal) + .await .map_err(ProposeSelfUpdateError::StorageError)?; self.proposal_store.add(proposal); - let mls_message = self.content_to_mls_message(update_proposal, provider)?; + let mls_message = self + .content_to_mls_message(update_proposal, provider) + .await?; Ok((mls_message, proposal_ref)) } diff --git a/openmls/src/group/public_group/mod.rs b/openmls/src/group/public_group/mod.rs index b4ffff081..df4df8f7b 100644 --- a/openmls/src/group/public_group/mod.rs +++ b/openmls/src/group/public_group/mod.rs @@ -75,6 +75,8 @@ pub struct PublicGroup { #[derive(Debug, Serialize, Deserialize)] pub struct InterimTranscriptHash(pub Vec); +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl PublicGroup { /// Create a new PublicGroup from a [`TreeSync`] instance and a /// [`GroupInfo`]. @@ -108,7 +110,7 @@ impl PublicGroup { /// This function performs basic validation checks and returns an error if /// one of the checks fails. See [`CreationFromExternalError`] for more /// details. - pub fn from_external( + pub async fn from_external( provider: &Provider, ratchet_tree: RatchetTreeIn, verifiable_group_info: VerifiableGroupInfo, @@ -174,6 +176,7 @@ impl PublicGroup { public_group .store(provider.storage()) + .await .map_err(CreationFromExternalError::WriteToStorageError)?; Ok((public_group, group_info)) @@ -294,6 +297,8 @@ impl PublicGroup { } // Getters +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl PublicGroup { /// Get the ciphersuite. pub fn ciphersuite(&self) -> Ciphersuite { @@ -355,30 +360,38 @@ impl PublicGroup { /// existing group, both inside [`PublicGroup`] and in [`CoreGroup`]. /// /// [`CoreGroup`]: crate::group::core_group::CoreGroup - pub(crate) fn store( + pub(crate) async fn store( &self, storage: &Storage, ) -> Result<(), Storage::Error> { let group_id = self.group_context.group_id(); - storage.write_tree(group_id, self.treesync())?; - storage.write_confirmation_tag(group_id, self.confirmation_tag())?; - storage.write_context(group_id, self.group_context())?; - storage.write_interim_transcript_hash( - group_id, - &InterimTranscriptHash(self.interim_transcript_hash.clone()), - )?; + storage.write_tree(group_id, self.treesync()).await?; + storage + .write_confirmation_tag(group_id, self.confirmation_tag()) + .await?; + storage + .write_context(group_id, self.group_context()) + .await?; + storage + .write_interim_transcript_hash( + group_id, + &InterimTranscriptHash(self.interim_transcript_hash.clone()), + ) + .await?; Ok(()) } /// Deletes the [`PublicGroup`] from storage. - pub(crate) fn delete( + pub(crate) async fn delete( &self, storage: &Storage, ) -> Result<(), Storage::Error> { - storage.delete_tree(self.group_id())?; - storage.delete_confirmation_tag(self.group_id())?; - storage.delete_context(self.group_id())?; - storage.delete_interim_transcript_hash(self.group_id())?; + storage.delete_tree(self.group_id()).await?; + storage.delete_confirmation_tag(self.group_id()).await?; + storage.delete_context(self.group_id()).await?; + storage + .delete_interim_transcript_hash(self.group_id()) + .await?; Ok(()) } @@ -386,15 +399,15 @@ impl PublicGroup { /// Loads the [`PublicGroup`] from storage. Called from [`CoreGroup::load`]. /// /// [`CoreGroup::load`]: crate::group::core_group::CoreGroup::load - pub(crate) fn load( + pub(crate) async fn load( storage: &Storage, group_id: &GroupId, ) -> Result, Storage::Error> { - let treesync = storage.treesync(group_id)?; - let group_context = storage.group_context(group_id)?; + let treesync = storage.treesync(group_id).await?; + let group_context = storage.group_context(group_id).await?; let interim_transcript_hash: Option = - storage.interim_transcript_hash(group_id)?; - let confirmation_tag = storage.confirmation_tag(group_id)?; + storage.interim_transcript_hash(group_id).await?; + let confirmation_tag = storage.confirmation_tag(group_id).await?; let build = || -> Option { Some(Self { diff --git a/openmls/src/group/public_group/staged_commit.rs b/openmls/src/group/public_group/staged_commit.rs index 342dbcf77..c6a920527 100644 --- a/openmls/src/group/public_group/staged_commit.rs +++ b/openmls/src/group/public_group/staged_commit.rs @@ -42,6 +42,8 @@ impl PublicStagedCommitState { } } +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl PublicGroup { pub(crate) fn validate_commit<'a>( &self, @@ -278,7 +280,7 @@ impl PublicGroup { } /// Merges a [StagedCommit] into the public group state. - pub fn merge_commit( + pub async fn merge_commit( &mut self, storage: &Storage, staged_commit: StagedCommit, @@ -291,6 +293,8 @@ impl PublicGroup { } self.proposal_store.empty(); - self.store(storage).map_err(MergeCommitError::StorageError) + self.store(storage) + .await + .map_err(MergeCommitError::StorageError) } } diff --git a/openmls/src/key_packages/mod.rs b/openmls/src/key_packages/mod.rs index 17b8621de..71ac58107 100644 --- a/openmls/src/key_packages/mod.rs +++ b/openmls/src/key_packages/mod.rs @@ -421,6 +421,8 @@ pub struct KeyPackageBuilder { last_resort: bool, } +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl KeyPackageBuilder { /// Create a key package builder. pub fn new() -> Self { @@ -497,7 +499,7 @@ impl KeyPackageBuilder { } /// Finalize and build the key package. - pub fn build( + pub async fn build( mut self, ciphersuite: Ciphersuite, provider: &impl OpenMlsProvider, @@ -530,6 +532,7 @@ impl KeyPackageBuilder { provider .storage() .write_key_package(&full_kp.key_package.hash_ref(provider.crypto())?, &full_kp) + .await .map_err(|_| KeyPackageNewError::StorageError)?; Ok(full_kp) diff --git a/openmls/src/messages/tests/test_welcome.rs b/openmls/src/messages/tests/test_welcome.rs index 888f94fd8..ad8ba6e51 100644 --- a/openmls/src/messages/tests/test_welcome.rs +++ b/openmls/src/messages/tests/test_welcome.rs @@ -300,6 +300,7 @@ fn test_welcome_message(ciphersuite: Ciphersuite, provider: &impl crate::storage /// the caller the GroupInfo. /// This allows transporting information in the Welcome for retrieving the ratchet /// tree. +#[cfg_attr()] #[openmls_test::openmls_test] fn test_welcome_processing() { let group_id = GroupId::random(provider.rand()); diff --git a/openmls/src/schedule/psk.rs b/openmls/src/schedule/psk.rs index a725bb828..7c5ef62c8 100644 --- a/openmls/src/schedule/psk.rs +++ b/openmls/src/schedule/psk.rs @@ -222,6 +222,8 @@ pub struct PreSharedKeyId { pub(crate) psk_nonce: VLBytes, } +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl PreSharedKeyId { /// Construct a `PreSharedKeyID` with a random nonce. pub fn new( @@ -277,7 +279,7 @@ impl PreSharedKeyId { /// Save this `PreSharedKeyId` in the keystore. /// /// Note: The nonce is not saved as it must be unique for each time it's being applied. - pub fn store( + pub async fn store( &self, provider: &Provider, psk: &[u8], @@ -291,6 +293,7 @@ impl PreSharedKeyId { provider .storage() .write_psk(&self.psk, &psk_bundle) + .await .map_err(|_| PskError::Storage) } @@ -446,7 +449,9 @@ impl From for PskSecret { } } -pub(crate) fn load_psks<'p, Storage: StorageProvider>( +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] +pub(crate) async fn load_psks<'p, Storage: StorageProvider>( storage: &Storage, resumption_psk_store: &ResumptionPskStore, psk_ids: &'p [PreSharedKeyId], @@ -467,6 +472,7 @@ pub(crate) fn load_psks<'p, Storage: StorageProvider>( Psk::External(_) => { let psk_bundle: Option = storage .psk(psk_id.psk()) + .await .map_err(|_| PskError::KeyNotFound)?; if let Some(psk_bundle) = psk_bundle { psk_bundles.push((psk_id, psk_bundle.secret)); diff --git a/openmls/src/treesync/node/encryption_keys.rs b/openmls/src/treesync/node/encryption_keys.rs index 0aab483a4..0a053a49f 100644 --- a/openmls/src/treesync/node/encryption_keys.rs +++ b/openmls/src/treesync/node/encryption_keys.rs @@ -146,17 +146,23 @@ pub(crate) struct EncryptionKeyPair { private_key: EncryptionPrivateKey, } +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] impl EncryptionKeyPair { /// Write the [`EncryptionKeyPair`] to the store of the `provider`. /// /// This must only be used for encryption key pairs that are generated for /// update leaf nodes. All other encryption key pairs are stored as part /// of the key package or the epoch encryption key pairs. - pub(crate) fn write( + #[cfg_attr(feature = "async", maybe_async::must_be_async)] + #[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] + pub(crate) async fn write( &self, store: &Storage, ) -> Result<(), Storage::Error> { - store.write_encryption_key_pair(self.public_key(), self) + store + .write_encryption_key_pair(self.public_key(), self) + .await } /// Read the [`EncryptionKeyPair`] from the key store of the `provider`. This @@ -168,13 +174,14 @@ impl EncryptionKeyPair { /// of the key package or the epoch encryption key pairs. /// /// Returns `None` if the keypair cannot be read from the store. - pub(crate) fn read( + pub(crate) async fn read( provider: &impl OpenMlsProvider, encryption_key: &EncryptionKey, ) -> Option { provider .storage() .encryption_key_pair(encryption_key) + .await .ok() .flatten() } @@ -184,11 +191,11 @@ impl EncryptionKeyPair { /// This must only be used for encryption key pairs that are generated for /// update leaf nodes. All other encryption key pairs are stored as part /// of the key package or the epoch encryption key pairs. - pub(crate) fn delete>( + pub(crate) async fn delete>( &self, store: &Storage, ) -> Result<(), Storage::Error> { - store.delete_encryption_key_pair(self.public_key()) + store.delete_encryption_key_pair(self.public_key()).await } pub(crate) fn public_key(&self) -> &EncryptionKey { diff --git a/traits/Cargo.toml b/traits/Cargo.toml index 440a3d64a..7fb36dbcb 100644 --- a/traits/Cargo.toml +++ b/traits/Cargo.toml @@ -15,7 +15,11 @@ path = "src/traits.rs" [features] default = [] test-utils = [] +async = ["dep:async-trait"] [dependencies] serde = { version = "1.0", features = ["derive"] } tls_codec = { workspace = true } +# async +maybe-async = { workspace = true } +async-trait = { workspace = true, optional = true } diff --git a/traits/src/storage.rs b/traits/src/storage.rs index 6c14781ad..e30c66cde 100644 --- a/traits/src/storage.rs +++ b/traits/src/storage.rs @@ -26,6 +26,8 @@ pub const V_TEST: u16 = u16::MAX; /// loading a group. /// /// More details can be taken from the comments on the respective method. +#[cfg_attr(feature = "async", maybe_async::must_be_async)] +#[cfg_attr(not(feature = "async"), maybe_async::must_be_sync)] pub trait StorageProvider { /// An opaque error returned by all methods on this trait. type Error: core::fmt::Debug + std::error::Error; @@ -40,7 +42,7 @@ pub trait StorageProvider { // /// Writes the MlsGroupJoinConfig for the group with given id to storage - fn write_mls_join_config< + async fn write_mls_join_config< GroupId: traits::GroupId, MlsGroupJoinConfig: traits::MlsGroupJoinConfig, >( @@ -50,14 +52,14 @@ pub trait StorageProvider { ) -> Result<(), Self::Error>; /// Writes the AAD for the group with given id to storage - fn write_aad>( + async fn write_aad>( &self, group_id: &GroupId, aad: &[u8], ) -> Result<(), Self::Error>; /// Adds an own leaf node for the group with given id to storage - fn append_own_leaf_node< + async fn append_own_leaf_node< GroupId: traits::GroupId, LeafNode: traits::LeafNode, >( @@ -70,7 +72,7 @@ pub trait StorageProvider { /// /// A good way to implement this could be to add a proposal to a proposal store, indexed by the /// proposal reference, and adding the reference to a per-group proposal queue list. - fn queue_proposal< + async fn queue_proposal< GroupId: traits::GroupId, ProposalRef: traits::ProposalRef, QueuedProposal: traits::QueuedProposal, @@ -82,14 +84,14 @@ pub trait StorageProvider { ) -> Result<(), Self::Error>; /// Write the TreeSync tree. - fn write_tree, TreeSync: traits::TreeSync>( + async fn write_tree, TreeSync: traits::TreeSync>( &self, group_id: &GroupId, tree: &TreeSync, ) -> Result<(), Self::Error>; /// Write the interim transcript hash. - fn write_interim_transcript_hash< + async fn write_interim_transcript_hash< GroupId: traits::GroupId, InterimTranscriptHash: traits::InterimTranscriptHash, >( @@ -99,7 +101,7 @@ pub trait StorageProvider { ) -> Result<(), Self::Error>; /// Write the group context. - fn write_context< + async fn write_context< GroupId: traits::GroupId, GroupContext: traits::GroupContext, >( @@ -109,7 +111,7 @@ pub trait StorageProvider { ) -> Result<(), Self::Error>; /// Write the confirmation tag. - fn write_confirmation_tag< + async fn write_confirmation_tag< GroupId: traits::GroupId, ConfirmationTag: traits::ConfirmationTag, >( @@ -119,7 +121,7 @@ pub trait StorageProvider { ) -> Result<(), Self::Error>; /// Writes the MlsGroupState for group with given id. - fn write_group_state< + async fn write_group_state< GroupState: traits::GroupState, GroupId: traits::GroupId, >( @@ -129,7 +131,7 @@ pub trait StorageProvider { ) -> Result<(), Self::Error>; /// Writes the MessageSecretsStore for the group with the given id. - fn write_message_secrets< + async fn write_message_secrets< GroupId: traits::GroupId, MessageSecrets: traits::MessageSecrets, >( @@ -139,7 +141,7 @@ pub trait StorageProvider { ) -> Result<(), Self::Error>; /// Writes the ResumptionPskStore for the group with the given id. - fn write_resumption_psk_store< + async fn write_resumption_psk_store< GroupId: traits::GroupId, ResumptionPskStore: traits::ResumptionPskStore, >( @@ -149,7 +151,7 @@ pub trait StorageProvider { ) -> Result<(), Self::Error>; /// Writes the own leaf index inside the group for the group with the given id. - fn write_own_leaf_index< + async fn write_own_leaf_index< GroupId: traits::GroupId, LeafNodeIndex: traits::LeafNodeIndex, >( @@ -160,14 +162,14 @@ pub trait StorageProvider { /// Returns the MlsGroupState for group with given id. /// Sets whether to use the RatchetTreeExtension for the group with the given id. - fn set_use_ratchet_tree_extension>( + async fn set_use_ratchet_tree_extension>( &self, group_id: &GroupId, value: bool, ) -> Result<(), Self::Error>; /// Writes the GroupEpochSecrets for the group with the given id. - fn write_group_epoch_secrets< + async fn write_group_epoch_secrets< GroupId: traits::GroupId, GroupEpochSecrets: traits::GroupEpochSecrets, >( @@ -184,7 +186,7 @@ pub trait StorageProvider { /// /// The signature key pair is not known to OpenMLS. This may be used by the /// application - fn write_signature_key_pair< + async fn write_signature_key_pair< SignaturePublicKey: traits::SignaturePublicKey, SignatureKeyPair: traits::SignatureKeyPair, >( @@ -199,7 +201,7 @@ pub trait StorageProvider { /// This is only be used for encryption key pairs that are generated for /// update leaf nodes. All other encryption key pairs are stored as part /// of the key package or the epoch encryption key pairs. - fn write_encryption_key_pair< + async fn write_encryption_key_pair< EncryptionKey: traits::EncryptionKey, HpkeKeyPair: traits::HpkeKeyPair, >( @@ -210,7 +212,7 @@ pub trait StorageProvider { /// Store a list of HPKE encryption key pairs for a given epoch. /// This includes the private and public keys. - fn write_encryption_epoch_key_pairs< + async fn write_encryption_epoch_key_pairs< GroupId: traits::GroupId, EpochKey: traits::EpochKey, HpkeKeyPair: traits::HpkeKeyPair, @@ -231,7 +233,7 @@ pub trait StorageProvider { /// in order to iterate over key packages. OpenMLS does not have a reference /// for them. // ANCHOR: write_key_package - fn write_key_package< + async fn write_key_package< HashReference: traits::HashReference, KeyPackage: traits::KeyPackage, >( @@ -247,7 +249,7 @@ pub trait StorageProvider { /// /// PSKs are only read by OpenMLS. The application is responsible for managing /// and storing PSKs. - fn write_psk, PskBundle: traits::PskBundle>( + async fn write_psk, PskBundle: traits::PskBundle>( &self, psk_id: &PskId, psk: &PskBundle, @@ -258,7 +260,7 @@ pub trait StorageProvider { // /// Returns the MlsGroupJoinConfig for the group with given id - fn mls_group_join_config< + async fn mls_group_join_config< GroupId: traits::GroupId, MlsGroupJoinConfig: traits::MlsGroupJoinConfig, >( @@ -267,20 +269,23 @@ pub trait StorageProvider { ) -> Result, Self::Error>; /// Returns the own leaf nodes for the group with given id - fn own_leaf_nodes, LeafNode: traits::LeafNode>( + async fn own_leaf_nodes< + GroupId: traits::GroupId, + LeafNode: traits::LeafNode, + >( &self, group_id: &GroupId, ) -> Result, Self::Error>; /// Returns the AAD for the group with given id /// If the value has not been set, returns an empty vector. - fn aad>( + async fn aad>( &self, group_id: &GroupId, ) -> Result, Self::Error>; /// Returns references of all queued proposals for the group with group id `group_id`, or an empty vector of none are stored. - fn queued_proposal_refs< + async fn queued_proposal_refs< GroupId: traits::GroupId, ProposalRef: traits::ProposalRef, >( @@ -289,7 +294,7 @@ pub trait StorageProvider { ) -> Result, Self::Error>; /// Returns all queued proposals for the group with group id `group_id`, or an empty vector of none are stored. - fn queued_proposals< + async fn queued_proposals< GroupId: traits::GroupId, ProposalRef: traits::ProposalRef, QueuedProposal: traits::QueuedProposal, @@ -299,13 +304,13 @@ pub trait StorageProvider { ) -> Result, Self::Error>; /// Returns the TreeSync tree for the group with group id `group_id`. - fn treesync, TreeSync: traits::TreeSync>( + async fn treesync, TreeSync: traits::TreeSync>( &self, group_id: &GroupId, ) -> Result, Self::Error>; /// Returns the group context for the group with group id `group_id`. - fn group_context< + async fn group_context< GroupId: traits::GroupId, GroupContext: traits::GroupContext, >( @@ -314,7 +319,7 @@ pub trait StorageProvider { ) -> Result, Self::Error>; /// Returns the interim transcript hash for the group with group id `group_id`. - fn interim_transcript_hash< + async fn interim_transcript_hash< GroupId: traits::GroupId, InterimTranscriptHash: traits::InterimTranscriptHash, >( @@ -323,7 +328,7 @@ pub trait StorageProvider { ) -> Result, Self::Error>; /// Returns the confirmation tag for the group with group id `group_id`. - fn confirmation_tag< + async fn confirmation_tag< GroupId: traits::GroupId, ConfirmationTag: traits::ConfirmationTag, >( @@ -332,13 +337,16 @@ pub trait StorageProvider { ) -> Result, Self::Error>; /// Returns the group state for the group with group id `group_id`. - fn group_state, GroupId: traits::GroupId>( + async fn group_state< + GroupState: traits::GroupState, + GroupId: traits::GroupId, + >( &self, group_id: &GroupId, ) -> Result, Self::Error>; /// Returns the MessageSecretsStore for the group with the given id. - fn message_secrets< + async fn message_secrets< GroupId: traits::GroupId, MessageSecrets: traits::MessageSecrets, >( @@ -350,7 +358,7 @@ pub trait StorageProvider { /// /// Returning `None` here is considered an error because the store is needed /// by OpenMLS when loading a group. - fn resumption_psk_store< + async fn resumption_psk_store< GroupId: traits::GroupId, ResumptionPskStore: traits::ResumptionPskStore, >( @@ -359,7 +367,7 @@ pub trait StorageProvider { ) -> Result, Self::Error>; /// Returns the own leaf index inside the group for the group with the given id. - fn own_leaf_index< + async fn own_leaf_index< GroupId: traits::GroupId, LeafNodeIndex: traits::LeafNodeIndex, >( @@ -368,13 +376,13 @@ pub trait StorageProvider { ) -> Result, Self::Error>; /// Returns whether to use the RatchetTreeExtension for the group with the given id. - fn use_ratchet_tree_extension>( + async fn use_ratchet_tree_extension>( &self, group_id: &GroupId, ) -> Result, Self::Error>; /// Returns the GroupEpochSecrets for the group with the given id. - fn group_epoch_secrets< + async fn group_epoch_secrets< GroupId: traits::GroupId, GroupEpochSecrets: traits::GroupEpochSecrets, >( @@ -390,7 +398,7 @@ pub trait StorageProvider { /// /// The signature key pair is not known to OpenMLS. This may be used by the /// application - fn signature_key_pair< + async fn signature_key_pair< SignaturePublicKey: traits::SignaturePublicKey, SignatureKeyPair: traits::SignatureKeyPair, >( @@ -403,7 +411,7 @@ pub trait StorageProvider { /// This is only be used for encryption key pairs that are generated for /// update leaf nodes. All other encryption key pairs are stored as part /// of the key package or the epoch encryption key pairs. - fn encryption_key_pair< + async fn encryption_key_pair< HpkeKeyPair: traits::HpkeKeyPair, EncryptionKey: traits::EncryptionKey, >( @@ -413,7 +421,7 @@ pub trait StorageProvider { /// Get a list of HPKE encryption key pairs for a given epoch. /// This includes the private and public keys. - fn encryption_epoch_key_pairs< + async fn encryption_epoch_key_pairs< GroupId: traits::GroupId, EpochKey: traits::EpochKey, HpkeKeyPair: traits::HpkeKeyPair, @@ -425,7 +433,7 @@ pub trait StorageProvider { ) -> Result, Self::Error>; /// Get a key package based on its hash reference. - fn key_package< + async fn key_package< KeyPackageRef: traits::HashReference, KeyPackage: traits::KeyPackage, >( @@ -434,7 +442,7 @@ pub trait StorageProvider { ) -> Result, Self::Error>; /// Get a PSK based on the PSK identifier. - fn psk, PskId: traits::PskId>( + async fn psk, PskId: traits::PskId>( &self, psk_id: &PskId, ) -> Result, Self::Error>; @@ -444,7 +452,7 @@ pub trait StorageProvider { // /// Removes an individual proposal from the proposal queue of the group with the provided id - fn remove_proposal< + async fn remove_proposal< GroupId: traits::GroupId, ProposalRef: traits::ProposalRef, >( @@ -454,85 +462,85 @@ pub trait StorageProvider { ) -> Result<(), Self::Error>; /// Deletes the AAD for the given id from storage - fn delete_aad>( + async fn delete_aad>( &self, group_id: &GroupId, ) -> Result<(), Self::Error>; /// Deletes own leaf nodes for the given id from storage - fn delete_own_leaf_nodes>( + async fn delete_own_leaf_nodes>( &self, group_id: &GroupId, ) -> Result<(), Self::Error>; /// Deletes the MlsGroupJoinConfig for the given id from storage - fn delete_group_config>( + async fn delete_group_config>( &self, group_id: &GroupId, ) -> Result<(), Self::Error>; /// Deletes the tree from storage - fn delete_tree>( + async fn delete_tree>( &self, group_id: &GroupId, ) -> Result<(), Self::Error>; /// Deletes the confirmation tag from storage - fn delete_confirmation_tag>( + async fn delete_confirmation_tag>( &self, group_id: &GroupId, ) -> Result<(), Self::Error>; /// Deletes the MlsGroupState for group with given id. - fn delete_group_state>( + async fn delete_group_state>( &self, group_id: &GroupId, ) -> Result<(), Self::Error>; /// Deletes the group context for the group with given id - fn delete_context>( + async fn delete_context>( &self, group_id: &GroupId, ) -> Result<(), Self::Error>; /// Deletes the interim transcript hash for the group with given id - fn delete_interim_transcript_hash>( + async fn delete_interim_transcript_hash>( &self, group_id: &GroupId, ) -> Result<(), Self::Error>; /// Deletes the MessageSecretsStore for the group with the given id. - fn delete_message_secrets>( + async fn delete_message_secrets>( &self, group_id: &GroupId, ) -> Result<(), Self::Error>; /// Deletes the ResumptionPskStore for the group with the given id. - fn delete_all_resumption_psk_secrets>( + async fn delete_all_resumption_psk_secrets>( &self, group_id: &GroupId, ) -> Result<(), Self::Error>; /// Deletes the own leaf index inside the group for the group with the given id. - fn delete_own_leaf_index>( + async fn delete_own_leaf_index>( &self, group_id: &GroupId, ) -> Result<(), Self::Error>; /// Deletes any preference about whether to use the RatchetTreeExtension for the group with the given id. - fn delete_use_ratchet_tree_extension>( + async fn delete_use_ratchet_tree_extension>( &self, group_id: &GroupId, ) -> Result<(), Self::Error>; /// Deletes the GroupEpochSecrets for the group with the given id. - fn delete_group_epoch_secrets>( + async fn delete_group_epoch_secrets>( &self, group_id: &GroupId, ) -> Result<(), Self::Error>; /// Clear the proposal queue for the grou pwith the given id. - fn clear_proposal_queue< + async fn clear_proposal_queue< GroupId: traits::GroupId, ProposalRef: traits::ProposalRef, >( @@ -548,7 +556,7 @@ pub trait StorageProvider { /// /// The signature key pair is not known to OpenMLS. This may be used by the /// application - fn delete_signature_key_pair>( + async fn delete_signature_key_pair>( &self, public_key: &SignaturePublicKey, ) -> Result<(), Self::Error>; @@ -558,14 +566,14 @@ pub trait StorageProvider { /// This is only be used for encryption key pairs that are generated for /// update leaf nodes. All other encryption key pairs are stored as part /// of the key package or the epoch encryption key pairs. - fn delete_encryption_key_pair>( + async fn delete_encryption_key_pair>( &self, public_key: &EncryptionKey, ) -> Result<(), Self::Error>; /// Delete a list of HPKE encryption key pairs for a given epoch. /// This includes the private and public keys. - fn delete_encryption_epoch_key_pairs< + async fn delete_encryption_epoch_key_pairs< GroupId: traits::GroupId, EpochKey: traits::EpochKey, >( @@ -579,13 +587,13 @@ pub trait StorageProvider { /// /// This function only deletes the key package. /// The corresponding encryption keys must be deleted separately. - fn delete_key_package>( + async fn delete_key_package>( &self, hash_ref: &KeyPackageRef, ) -> Result<(), Self::Error>; /// Delete a PSK based on an identifier. - fn delete_psk>( + async fn delete_psk>( &self, psk_id: &PskKey, ) -> Result<(), Self::Error>;