From 7bb93b71a84e727a00af6a1b4db854401c183656 Mon Sep 17 00:00:00 2001 From: Sean Lawlor Date: Tue, 3 Jan 2023 23:01:25 -0500 Subject: [PATCH] Adding mock support for the Database trait in AKD (#333) * Adding mock support for the Database trait in AKD Related to: #332 * adding a mocked database test * Move to Arc in StorageManager so we can clone it cleanly. * Make storage manager create and manage the Arc * Bump version for contract changes --- akd/Cargo.toml | 4 +- akd/src/storage/manager/mod.rs | 24 ++- akd/src/storage/manager/tests.rs | 26 ++- akd/src/storage/mod.rs | 6 +- akd/src/storage/tests.rs | 19 +- akd/src/tests.rs | 175 +++++++++++++++++- akd_client/Cargo.toml | 2 +- akd_core/Cargo.toml | 2 +- akd_local_auditor/Cargo.toml | 2 +- akd_mysql/Cargo.toml | 6 +- akd_mysql/src/mysql_db_tests.rs | 4 +- akd_test_tools/Cargo.toml | 6 +- .../examples/example_tests.rs | 10 +- .../src/fixture_generator/generator.rs | 10 +- integration_tests/src/memory_tests.rs | 1 - integration_tests/src/mysql_tests.rs | 11 +- poc/src/main.rs | 8 +- 17 files changed, 261 insertions(+), 55 deletions(-) diff --git a/akd/Cargo.toml b/akd/Cargo.toml index 8a0e5f6a..36fa3687 100644 --- a/akd/Cargo.toml +++ b/akd/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "akd" -version = "0.8.5" +version = "0.8.6" authors = ["Harjasleen Malvai ", "Kevin Lewi ", "Sean Lawlor "] description = "An implementation of an auditable key directory" license = "MIT OR Apache-2.0" @@ -62,6 +62,8 @@ once_cell = { version = "1" } ctor = "0.1" tokio-test = "0.4" tokio = { version = "1.21", features = ["rt", "sync", "time", "macros"] } +mockall = "0.11" +futures = "0.3" # To enable the public-test feature in tests akd = { path = ".", features = ["public-tests"], default-features = false } diff --git a/akd/src/storage/manager/mod.rs b/akd/src/storage/manager/mod.rs index b14c18d9..8a184d6a 100644 --- a/akd/src/storage/manager/mod.rs +++ b/akd/src/storage/manager/mod.rs @@ -51,16 +51,26 @@ mod tests; /// Represents the manager of the storage mediums, including caching /// and transactional operations (creating the transaction, committing it, etc) -#[derive(Clone)] pub struct StorageManager { cache: Option, transaction: Transaction, /// The underlying database managed by this storage manager - pub db: Db, + db: Arc, metrics: [Arc; NUM_METRICS], } +impl Clone for StorageManager { + fn clone(&self) -> Self { + Self { + cache: self.cache.clone(), + transaction: self.transaction.clone(), + db: self.db.clone(), + metrics: self.metrics.clone(), + } + } +} + unsafe impl Sync for StorageManager {} unsafe impl Send for StorageManager {} @@ -70,7 +80,7 @@ impl StorageManager { Self { cache: None, transaction: Transaction::new(), - db, + db: Arc::new(db), metrics: [0; NUM_METRICS].map(|_| Arc::new(AtomicU64::new(0))), } } @@ -89,11 +99,17 @@ impl StorageManager { cache_clean_frequency, )), transaction: Transaction::new(), - db, + db: Arc::new(db), metrics: [0; NUM_METRICS].map(|_| Arc::new(AtomicU64::new(0))), } } + /// Retrieve a reference to the database implementation + #[cfg(any(test, feature = "public-tests"))] + pub fn get_db(&self) -> Arc { + self.db.clone() + } + /// Returns whether the storage manager has a cache pub fn has_cache(&self) -> bool { self.cache.is_some() diff --git a/akd/src/storage/manager/tests.rs b/akd/src/storage/manager/tests.rs index 72c6163c..60872d8e 100644 --- a/akd/src/storage/manager/tests.rs +++ b/akd/src/storage/manager/tests.rs @@ -18,7 +18,7 @@ use crate::*; #[tokio::test] async fn test_storage_manager_transaction() { let db = AsyncInMemoryDatabase::new(); - let storage_manager = StorageManager::new_no_cache(db.clone()); + let storage_manager = StorageManager::new_no_cache(db); assert!( storage_manager.begin_transaction(), @@ -67,7 +67,11 @@ async fn test_storage_manager_transaction() { // there should be no items in the db, as they should all be in the transaction log assert_eq!( Ok(0), - db.batch_get_all_direct().await.map(|items| items.len()) + storage_manager + .db + .batch_get_all_direct() + .await + .map(|items| items.len()) ); assert_eq!(11, storage_manager.transaction.count()); @@ -101,7 +105,11 @@ async fn test_storage_manager_transaction() { // now the records should be in the database and the transaction log empty assert_eq!( Ok(11), - db.batch_get_all_direct().await.map(|items| items.len()) + storage_manager + .db + .batch_get_all_direct() + .await + .map(|items| items.len()) ); assert_eq!(0, storage_manager.transaction.count()); } @@ -109,7 +117,8 @@ async fn test_storage_manager_transaction() { #[tokio::test] async fn test_storage_manager_cache_populated_by_batch_set() { let db = AsyncInMemoryDatabase::new(); - let storage_manager = StorageManager::new(db.clone(), None, None, None); + + let storage_manager = StorageManager::new(db, None, None, None); let mut records = (0..10) .into_iter() @@ -153,7 +162,7 @@ async fn test_storage_manager_cache_populated_by_batch_set() { .expect("Failed to set batch of records"); // flush the database - db.clear().await; + storage_manager.db.clear().await; // test a retrieval still gets data (from the cache) let key = NodeKey(NodeLabel { @@ -190,7 +199,7 @@ async fn test_storage_manager_cache_populated_by_batch_set() { #[tokio::test] async fn test_storage_manager_cache_populated_by_batch_get() { let db = AsyncInMemoryDatabase::new(); - let storage_manager = StorageManager::new(db.clone(), None, None, None); + let storage_manager = StorageManager::new(db, None, None, None); let mut keys = vec![]; let mut records = (0..10) @@ -235,12 +244,13 @@ async fn test_storage_manager_cache_populated_by_batch_get() { .await .expect("Failed to set batch of records"); + let db_arc = storage_manager.get_db(); // flush the cache by destroying the storage manager drop(storage_manager); // re-create the storage manager, and run a batch_get of the same data keys to populate the cache let storage_manager = StorageManager::new( - db.clone(), + Arc::try_unwrap(db_arc).expect("Failed to grab arc"), Some(std::time::Duration::from_secs(1000)), None, None, @@ -252,7 +262,7 @@ async fn test_storage_manager_cache_populated_by_batch_get() { .expect("Failed to get a batch of records"); // flush the database - db.clear().await; + storage_manager.db.clear().await; // test a retrieval still gets data (from the cache) let key = NodeKey(NodeLabel { diff --git a/akd/src/storage/mod.rs b/akd/src/storage/mod.rs index 3e5b7f4f..d0690580 100644 --- a/akd/src/storage/mod.rs +++ b/akd/src/storage/mod.rs @@ -43,7 +43,7 @@ pub enum DbSetState { /// Storable represents an _item_ which can be stored in the storage layer #[cfg(feature = "serde_serialization")] -pub trait Storable: Clone + Serialize + DeserializeOwned + Sync { +pub trait Storable: Clone + Serialize + DeserializeOwned + Sync + 'static { /// This particular storage will have a key type type StorageKey: Clone + Serialize + Eq + Hash + Send + Sync + std::fmt::Debug; @@ -68,7 +68,7 @@ pub trait Storable: Clone + Serialize + DeserializeOwned + Sync { /// Storable represents an _item_ which can be stored in the storage layer #[cfg(not(feature = "serde_serialization"))] -pub trait Storable: Clone + Sync { +pub trait Storable: Clone + Sync + 'static { /// This particular storage will have a key type type StorageKey: Clone + Eq + Hash + Send + Sync + std::fmt::Debug; @@ -93,7 +93,7 @@ pub trait Storable: Clone + Sync { /// A database implementation backing storage for the AKD #[async_trait] -pub trait Database: Clone + Send + Sync { +pub trait Database: Send + Sync { /// Set a record in the database async fn set(&self, record: DbRecord) -> Result<(), StorageError>; diff --git a/akd/src/storage/tests.rs b/akd/src/storage/tests.rs index a28be4f3..789b1729 100644 --- a/akd/src/storage/tests.rs +++ b/akd/src/storage/tests.rs @@ -35,7 +35,7 @@ mod memory_storage_tests { #[serial] async fn test_in_memory_db() { let db = AsyncInMemoryDatabase::new(); - crate::storage::tests::run_test_cases_for_storage_impl(&db).await; + crate::storage::tests::run_test_cases_for_storage_impl(db).await; } } @@ -43,14 +43,15 @@ mod memory_storage_tests { /// Run the storage-layer test suite for a given storage implementation. /// This is public because it can be used by other implemented storage layers /// for consistency checks (e.g. mysql, memcached, etc) -pub async fn run_test_cases_for_storage_impl(db: &S) { - test_get_and_set_item(db).await; - test_user_data(db).await; - test_transactions(db).await; - test_batch_get_items(db).await; +pub async fn run_test_cases_for_storage_impl(db: S) -> StorageManager { + test_get_and_set_item(&db).await; + test_user_data(&db).await; + test_batch_get_items(&db).await; - let manager = StorageManager::new_no_cache(db.clone()); + let manager = StorageManager::new_no_cache(db); + test_transactions(&manager).await; test_tombstoning_data(&manager).await.unwrap(); + manager } // *** New Test Helper Functions *** // @@ -312,9 +313,7 @@ async fn test_batch_get_items(storage: &Ns) { } } -async fn test_transactions(db: &S) { - let storage = crate::storage::manager::StorageManager::new_no_cache(db.clone()); - +async fn test_transactions(storage: &StorageManager) { let mut rand_users: Vec> = vec![]; for _ in 0..20 { let str: String = thread_rng() diff --git a/akd/src/tests.rs b/akd/src/tests.rs index d70c53b3..8f08e444 100644 --- a/akd/src/tests.rs +++ b/akd/src/tests.rs @@ -8,16 +8,124 @@ //! Contains the tests for the high-level API (directory, auditor, client) +use std::collections::HashMap; + use crate::{ auditor::audit_verify, client::{key_history_verify, lookup_verify}, directory::{Directory, PublishCorruption}, ecvrf::{HardCodedAkdVRF, VRFKeyStorage}, - errors::AkdError, - storage::{manager::StorageManager, memory::AsyncInMemoryDatabase, types::DbRecord, Database}, - AkdLabel, AkdValue, HistoryParams, HistoryVerificationParams, VerifyResult, + errors::{AkdError, StorageError}, + storage::{ + manager::StorageManager, + memory::AsyncInMemoryDatabase, + types::{DbRecord, KeyData, ValueState, ValueStateRetrievalFlag}, + Database, DbSetState, Storable, + }, + tree_node::TreeNodeWithPreviousValue, + AkdLabel, AkdValue, Azks, HistoryParams, HistoryVerificationParams, VerifyResult, }; +#[derive(Clone)] +pub struct LocalDatabase; + +unsafe impl Send for LocalDatabase {} +unsafe impl Sync for LocalDatabase {} + +mockall::mock! { + pub LocalDatabase { + + } + impl Clone for LocalDatabase { + fn clone(&self) -> Self; + } + #[async_trait::async_trait] + impl Database for LocalDatabase { + async fn set(&self, record: DbRecord) -> Result<(), StorageError>; + async fn batch_set( + &self, + records: Vec, + state: DbSetState, + ) -> Result<(), StorageError>; + async fn get(&self, id: &St::StorageKey) -> Result; + async fn batch_get( + &self, + ids: &[St::StorageKey], + ) -> Result, StorageError>; + async fn get_user_data(&self, username: &AkdLabel) -> Result; + async fn get_user_state( + &self, + username: &AkdLabel, + flag: ValueStateRetrievalFlag, + ) -> Result; + async fn get_user_state_versions( + &self, + usernames: &[AkdLabel], + flag: ValueStateRetrievalFlag, + ) -> Result, StorageError>; + } +} + +fn setup_mocked_db(db: &mut MockLocalDatabase, test_db: &AsyncInMemoryDatabase) { + // ===== Set ===== // + let tmp_db = test_db.clone(); + db.expect_set() + .returning(move |record| futures::executor::block_on(tmp_db.set(record))); + + // ===== Batch Set ===== // + let tmp_db = test_db.clone(); + db.expect_batch_set().returning(move |record, other| { + futures::executor::block_on(tmp_db.batch_set(record, other)) + }); + + // ===== Get ===== // + let tmp_db = test_db.clone(); + db.expect_get::() + .returning(move |key| futures::executor::block_on(tmp_db.get::(key))); + + let tmp_db = test_db.clone(); + db.expect_get::() + .returning(move |key| { + futures::executor::block_on(tmp_db.get::(key)) + }); + + let tmp_db = test_db.clone(); + db.expect_get::() + .returning(move |key| futures::executor::block_on(tmp_db.get::(key))); + + // ===== Batch Get ===== // + let tmp_db = test_db.clone(); + db.expect_batch_get::() + .returning(move |key| futures::executor::block_on(tmp_db.batch_get::(key))); + + let tmp_db = test_db.clone(); + db.expect_batch_get::() + .returning(move |key| { + futures::executor::block_on(tmp_db.batch_get::(key)) + }); + + let tmp_db = test_db.clone(); + db.expect_batch_get::() + .returning(move |key| futures::executor::block_on(tmp_db.batch_get::(key))); + + // ===== Get User Data ===== // + let tmp_db = test_db.clone(); + db.expect_get_user_data() + .returning(move |arg| futures::executor::block_on(tmp_db.get_user_data(arg))); + + // ===== Get User State ===== // + let tmp_db = test_db.clone(); + db.expect_get_user_state() + .returning(move |arg, flag| futures::executor::block_on(tmp_db.get_user_state(arg, flag))); + + // ===== Get User State Versions ===== // + let tmp_db = test_db.clone(); + db.expect_get_user_state_versions() + .returning(move |arg, flag| { + futures::executor::block_on(tmp_db.get_user_state_versions(arg, flag)) + }); +} + // A simple test to ensure that the empty tree hashes to the correct value #[tokio::test] async fn test_empty_tree_root_hash() -> Result<(), AkdError> { @@ -1057,6 +1165,67 @@ async fn test_tombstoned_key_history() -> Result<(), AkdError> { Ok(()) } +#[tokio::test] +async fn test_publish_op_makes_no_get_requests() { + let test_db = AsyncInMemoryDatabase::new(); + + let mut db = MockLocalDatabase { + ..Default::default() + }; + setup_mocked_db(&mut db, &test_db); + + let storage = StorageManager::new_no_cache(db); + let vrf = HardCodedAkdVRF {}; + let akd = Directory::<_, _>::new(storage, vrf, false) + .await + .expect("Failed to create directory"); + + // Create a set with 2 updates, (label, value) pairs + // ("hello10", "hello10") + // ("hello11", "hello11") + let mut updates = vec![]; + for i in 0..1 { + updates.push(( + AkdLabel(format!("hello1{}", i).as_bytes().to_vec()), + AkdValue(format!("hello1{}", i).as_bytes().to_vec()), + )); + } + // Publish the updates. Now the akd's epoch will be 1. + akd.publish(updates) + .await + .expect("Failed to do initial publish"); + + // create a new mock, this time which explodes on any "get" of tree-nodes (shouldn't happen). It is still backed by the same + // async in-mem db so all previous data should be there + let mut db2 = MockLocalDatabase { + ..Default::default() + }; + setup_mocked_db(&mut db2, &test_db); + db2.expect_get::() + .returning(|_| Err(StorageError::Other("Boom!".to_string()))); + + let storage = StorageManager::new_no_cache(db2); + let vrf = HardCodedAkdVRF {}; + let akd = Directory::<_, _>::new(storage, vrf, false) + .await + .expect("Failed to create directory"); + + // create more updates + let mut updates = vec![]; + for i in 0..1 { + updates.push(( + AkdLabel(format!("hello1{}", i).as_bytes().to_vec()), + AkdValue(format!("hello1{}", i + 1).as_bytes().to_vec()), + )); + } + + // try to publish again, this time with the "boom" returning from any mocked get-calls + // on tree nodes + akd.publish(updates) + .await + .expect("Failed to do subsequent publish"); +} + // Test coverage on issue #144, verification failures with // small trees (<4 nodes) in both the tests below // Note that the use of a VRF means that that the label diff --git a/akd_client/Cargo.toml b/akd_client/Cargo.toml index b24738f7..119d013b 100644 --- a/akd_client/Cargo.toml +++ b/akd_client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "akd_client" -version = "0.8.5" +version = "0.8.6" authors = ["Harjasleen Malvai ", "Kevin Lewi ", "Sean Lawlor "] description = "Client verification companion for the auditable key directory with limited dependencies." license = "MIT OR Apache-2.0" diff --git a/akd_core/Cargo.toml b/akd_core/Cargo.toml index a3d879bb..149e704b 100644 --- a/akd_core/Cargo.toml +++ b/akd_core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "akd_core" -version = "0.8.5" +version = "0.8.6" authors = ["Sean Lawlor "] description = "Core utilities for the auditable-key-directory suite of crates (akd and akd_client)" license = "MIT OR Apache-2.0" diff --git a/akd_local_auditor/Cargo.toml b/akd_local_auditor/Cargo.toml index c0960bc8..8f173876 100644 --- a/akd_local_auditor/Cargo.toml +++ b/akd_local_auditor/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "akd_local_auditor" default-run = "akd_local_auditor" -version = "0.8.5" +version = "0.0.0" authors = ["Sean Lawlor "] edition = "2018" publish = false diff --git a/akd_mysql/Cargo.toml b/akd_mysql/Cargo.toml index f3427eb1..25015964 100644 --- a/akd_mysql/Cargo.toml +++ b/akd_mysql/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "akd_mysql" -version = "0.8.5" +version = "0.8.6" authors = ["Harjasleen Malvai ", "Kevin Lewi ", "Sean Lawlor "] description = "A MySQL storage layer implementation for an auditable key directory (AKD)" license = "MIT OR Apache-2.0" @@ -25,9 +25,9 @@ async-recursion = "0.3" mysql_async = "0.31" mysql_common = "0.29.1" log = { version = "0.4.8", features = ["kv_unstable"] } -akd = { path = "../akd", version = "0.8.5", features = ["serde_serialization"], default-features = false } +akd = { path = "../akd", version = "0.8.6", features = ["serde_serialization"], default-features = false } [dev-dependencies] criterion = "0.3" serial_test = "0.5" -akd = { path = "../akd", version = "0.8.5", features = ["blake3", "public-tests"], default-features = false } +akd = { path = "../akd", version = "0.8.6", features = ["blake3", "public-tests"], default-features = false } diff --git a/akd_mysql/src/mysql_db_tests.rs b/akd_mysql/src/mysql_db_tests.rs index 1876914a..b34131ab 100644 --- a/akd_mysql/src/mysql_db_tests.rs +++ b/akd_mysql/src/mysql_db_tests.rs @@ -45,10 +45,10 @@ async fn test_mysql_db() { } // The test cases - akd::storage::tests::run_test_cases_for_storage_impl(&mysql_db).await; + let manager = akd::storage::tests::run_test_cases_for_storage_impl(mysql_db.clone()).await; // clean the test infra - if let Err(mysql_async::Error::Server(error)) = mysql_db.drop_tables().await { + if let Err(mysql_async::Error::Server(error)) = manager.get_db().drop_tables().await { println!( "ERROR: Failed to clean MySQL test database with error {}", error diff --git a/akd_test_tools/Cargo.toml b/akd_test_tools/Cargo.toml index 9e2f8bc5..2a7c57ff 100644 --- a/akd_test_tools/Cargo.toml +++ b/akd_test_tools/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "akd_test_tools" -version = "0.8.5" +version = "0.8.6" authors = ["Evan Au ", "Sean Lawlor "] description = "Test utilities and tooling" license = "MIT OR Apache-2.0" @@ -22,9 +22,9 @@ serde = "1.0" async-trait = "0.1" thread-id = "3" -akd = { path = "../akd", features = ["serde_serialization"], version = "0.8.5" } +akd = { path = "../akd", features = ["serde_serialization"], version = "0.8.6" } [dev-dependencies] assert_fs="1" -akd = { path = "../akd", features = ["public-tests", "rand", "serde_serialization"], version = "0.8.5" } +akd = { path = "../akd", features = ["public-tests", "rand", "serde_serialization"], version = "0.8.6" } diff --git a/akd_test_tools/src/fixture_generator/examples/example_tests.rs b/akd_test_tools/src/fixture_generator/examples/example_tests.rs index aa8cc78e..f47f1837 100644 --- a/akd_test_tools/src/fixture_generator/examples/example_tests.rs +++ b/akd_test_tools/src/fixture_generator/examples/example_tests.rs @@ -35,8 +35,8 @@ async fn test_use_fixture() { .await .unwrap(); let vrf = HardCodedAkdVRF {}; - let storage_manager = StorageManager::new_no_cache(db.clone()); - let akd = Directory::<_, _>::new(storage_manager, vrf, false) + let storage_manager = StorageManager::new_no_cache(db); + let akd = Directory::<_, _>::new(storage_manager.clone(), vrf, false) .await .unwrap(); @@ -46,7 +46,11 @@ async fn test_use_fixture() { // assert final directory state let final_state = reader.read_state(epochs[1]).unwrap(); - let records = db.batch_get_all_direct().await.unwrap(); + let records = storage_manager + .get_db() + .batch_get_all_direct() + .await + .unwrap(); assert_eq!(final_state.records.len(), records.len()); assert!(records.iter().all(|r| final_state.records.contains(r))); } diff --git a/akd_test_tools/src/fixture_generator/generator.rs b/akd_test_tools/src/fixture_generator/generator.rs index 813f05af..17ad27c4 100644 --- a/akd_test_tools/src/fixture_generator/generator.rs +++ b/akd_test_tools/src/fixture_generator/generator.rs @@ -121,8 +121,8 @@ pub(crate) async fn generate(args: Args) { // initialize directory let db = akd::storage::memory::AsyncInMemoryDatabase::new(); let vrf = akd::ecvrf::HardCodedAkdVRF {}; - let storage_manager = StorageManager::new_no_cache(db.clone()); - let akd = Directory::<_, _>::new(storage_manager, vrf, false) + let storage_manager = StorageManager::new_no_cache(db); + let akd = Directory::<_, _>::new(storage_manager.clone(), vrf, false) .await .unwrap(); @@ -169,7 +169,11 @@ pub(crate) async fn generate(args: Args) { let comment = format!("{} {}", STATE_COMMENT, epoch); let state = State { epoch, - records: db.batch_get_all_direct().await.unwrap(), + records: storage_manager + .get_db() + .batch_get_all_direct() + .await + .unwrap(), }; writer.write_line(); writer.write_comment(&comment); diff --git a/integration_tests/src/memory_tests.rs b/integration_tests/src/memory_tests.rs index 4cc9a16c..7475361a 100644 --- a/integration_tests/src/memory_tests.rs +++ b/integration_tests/src/memory_tests.rs @@ -7,7 +7,6 @@ use akd::{ecvrf::HardCodedAkdVRF, storage::StorageManager}; use log::info; - type InMemoryDb = akd::storage::memory::AsyncInMemoryDatabase; #[tokio::test] diff --git a/integration_tests/src/mysql_tests.rs b/integration_tests/src/mysql_tests.rs index 3a87c1c0..a5712dd4 100644 --- a/integration_tests/src/mysql_tests.rs +++ b/integration_tests/src/mysql_tests.rs @@ -58,7 +58,8 @@ async fn test_directory_operations() { storage_manager.log_metrics(log::Level::Trace).await; // clean the test infra - if let Err(mysql_async::Error::Server(error)) = mysql_db.drop_tables().await { + if let Err(mysql_async::Error::Server(error)) = storage_manager.get_db().drop_tables().await + { error!( "ERROR: Failed to clean MySQL test database with error {}", error @@ -119,7 +120,8 @@ async fn test_directory_operations_with_caching() { storage_manager.log_metrics(log::Level::Trace).await; // clean the test infra - if let Err(mysql_async::Error::Server(error)) = mysql_db.drop_tables().await { + if let Err(mysql_async::Error::Server(error)) = storage_manager.get_db().drop_tables().await + { error!( "ERROR: Failed to clean MySQL test database with error {}", error @@ -169,12 +171,13 @@ async fn test_lookups() { } let vrf = HardCodedAkdVRF {}; - let storage_manager = StorageManager::new(mysql_db.clone(), None, None, None); + let storage_manager = StorageManager::new(mysql_db, None, None, None); crate::test_util::test_lookups::<_, HardCodedAkdVRF>(&storage_manager, &vrf, 50, 5, 100) .await; // clean the test infra - if let Err(mysql_async::Error::Server(error)) = mysql_db.drop_tables().await { + if let Err(mysql_async::Error::Server(error)) = storage_manager.get_db().drop_tables().await + { error!( "ERROR: Failed to clean MySQL test database with error {}", error diff --git a/poc/src/main.rs b/poc/src/main.rs index 60d55519..084f1927 100644 --- a/poc/src/main.rs +++ b/poc/src/main.rs @@ -404,7 +404,7 @@ async fn process_input( OtherMode::Flush => { println!("======= One-off flushing of the database ======= "); if let Some(mysql_db) = db { - if let Err(error) = mysql_db.db.delete_data().await { + if let Err(error) = mysql_db.get_db().delete_data().await { error!("Error flushing database: {}", error); } else { info!("Database flushed."); @@ -414,7 +414,7 @@ async fn process_input( OtherMode::Drop => { println!("======= Dropping database ======= "); if let Some(mysql_db) = db { - if let Err(error) = mysql_db.db.drop_tables().await { + if let Err(error) = mysql_db.get_db().drop_tables().await { error!("Error dropping database: {}", error); } else { info!("Database dropped."); @@ -448,7 +448,7 @@ async fn process_input( Command::Flush => { println!("Flushing the database..."); if let Some(mysql_db) = &db { - if let Err(error) = mysql_db.db.delete_data().await { + if let Err(error) = mysql_db.get_db().delete_data().await { println!("Error flushing database: {}", error); } else { println!( @@ -464,7 +464,7 @@ async fn process_input( } println!("===== Auditable Key Directory Information ====="); if let Some(mysql) = &db { - println!(" Database properties ({})", mysql.db); + println!(" Database properties ({})", mysql.get_db()); } else { println!(" Connected to an in-memory database"); }