diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 74689b9..7c46da5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -57,9 +57,6 @@ jobs: - name: Test with db-int-key run: cargo test --lib --no-default-features --features "db-int-key" --verbose - - name: Test with LMDB enabled - run: cargo test --all --features "lmdb" --verbose - - name: Test in release mode run: cargo test --release --all --verbose diff --git a/Cargo.toml b/Cargo.toml index 366435a..d88655d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ documentation = "https://docs.rs/rkv" edition = "2018" exclude = ["/tests/envs/*"] homepage = "https://github.com/mozilla/rkv" -keywords = ["lmdb", "database", "storage"] +keywords = ["database", "storage"] license = "Apache-2.0" name = "rkv" readme = "README.md" @@ -19,37 +19,11 @@ repository = "https://github.com/mozilla/rkv" version = "0.19.0" rust-version = "1.66" -[[bin]] -name = "rand" -path = "src/bin/rand.rs" -required-features = ["lmdb"] - -[[bin]] -name = "dump" -path = "src/bin/dump.rs" -required-features = ["lmdb"] - -[[test]] -name = "env-all" -required-features = ["lmdb"] - -[[test]] -name = "env-lmdb" -required-features = ["lmdb"] - -[[test]] -name = "env-migration" -required-features = ["lmdb"] - [features] -lmdb = ["lmdb-rkv"] db-dup-sort = [] db-int-key = [] default = ["db-dup-sort", "db-int-key"] no-canonicalize-path = [] -with-asan = ["lmdb", "lmdb-rkv/with-asan"] -with-fuzzer = ["lmdb", "lmdb-rkv/with-fuzzer"] -with-fuzzer-no-link = ["lmdb", "lmdb-rkv/with-fuzzer-no-link"] [dependencies] arrayref = "0.3" @@ -58,7 +32,6 @@ bitflags = {version = "2.4.1", features = ["serde"]} byteorder = "1" id-arena = "2.2" lazy_static = "1.1" -lmdb-rkv = { version = "0.14", optional = true } log = "0.4.20" ordered-float = "3.0.0" paste = "1.0.6" diff --git a/README.md b/README.md index bc50d1f..3911e9f 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ [![Documentation](https://docs.rs/rkv/badge.svg)](https://docs.rs/rkv/) [![Crate](https://img.shields.io/crates/v/rkv.svg)](https://crates.io/crates/rkv) -The [rkv Rust crate](https://crates.io/crates/rkv) is a simple, humane, typed key-value storage solution. It supports multiple backend engines with varying guarantees, such as [LMDB](http://www.lmdb.tech/doc/) for performance, or "SafeMode" for reliability. +The [rkv Rust crate](https://crates.io/crates/rkv) is a simple, humane, typed key-value storage solution. ## ⚠️ Warning ⚠️ @@ -22,8 +22,6 @@ let shared_rkv = manager.get_or_create(path, Rkv::new::).unwrap(); The "SafeMode" backend performs well, with two caveats: the entire database is stored in memory, and write transactions are synchronously written to disk (only on commit). -In the future, it will be advisable to switch to a different backend with better performance guarantees. We're working on either fixing some LMDB crashes, or offering more choices of backend engines (e.g. SQLite). - ## Use Comprehensive information about using rkv is available in its [online documentation](https://docs.rs/rkv/), which can also be generated for local consumption: @@ -46,8 +44,6 @@ There are several features that you can opt-in and out of when using rkv: By default, `db-dup-sort` and `db-int-key` features offer high level database APIs which allow multiple values per key, and optimizations around integer-based keys respectively. Opt out of these default features when specifying the rkv dependency in your Cargo.toml file to disable them; doing so avoids a certain amount of overhead required to support them. -To aid fuzzing efforts, `with-asan`, `with-fuzzer`, and `with-fuzzer-no-link` configure the build scripts responsible with compiling the underlying backing engines (e.g. LMDB) to build with these LLMV features enabled. Please refer to the official LLVM/Clang documentation on them for more informatiuon. These features are also disabled by default. - ## Test Test this project as you would test other Rust crates: diff --git a/src/backend.rs b/src/backend.rs index 63fa08a..72dae28 100644 --- a/src/backend.rs +++ b/src/backend.rs @@ -9,26 +9,11 @@ // specific language governing permissions and limitations under the License. mod common; -#[cfg(feature = "lmdb")] -mod impl_lmdb; mod impl_safe; mod traits; pub use common::*; pub use traits::*; - -#[cfg(feature = "lmdb")] -pub use impl_lmdb::{ - ArchMigrateError as LmdbArchMigrateError, ArchMigrateResult as LmdbArchMigrateResult, - ArchMigrator as LmdbArchMigrator, DatabaseFlagsImpl as LmdbDatabaseFlags, - DatabaseImpl as LmdbDatabase, EnvironmentBuilderImpl as Lmdb, - EnvironmentFlagsImpl as LmdbEnvironmentFlags, EnvironmentImpl as LmdbEnvironment, - ErrorImpl as LmdbError, InfoImpl as LmdbInfo, IterImpl as LmdbIter, - RoCursorImpl as LmdbRoCursor, RoTransactionImpl as LmdbRoTransaction, - RwCursorImpl as LmdbRwCursor, RwTransactionImpl as LmdbRwTransaction, StatImpl as LmdbStat, - WriteFlagsImpl as LmdbWriteFlags, -}; - pub use impl_safe::{ DatabaseFlagsImpl as SafeModeDatabaseFlags, DatabaseImpl as SafeModeDatabase, EnvironmentBuilderImpl as SafeMode, EnvironmentFlagsImpl as SafeModeEnvironmentFlags, diff --git a/src/backend/impl_lmdb/arch_migrator.rs b/src/backend/impl_lmdb/arch_migrator.rs deleted file mode 100644 index 7814260..0000000 --- a/src/backend/impl_lmdb/arch_migrator.rs +++ /dev/null @@ -1,998 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. -#![allow(dead_code)] // TODO: Get rid of unused struct members -#![allow(clippy::upper_case_acronyms)] // TODO: Consider renaming things like `BRANCH` - -//! A utility for migrating data from one LMDB environment to another. Notably, this tool -//! can migrate data from an enviroment created with a different bit-depth than the -//! current rkv consumer, which enables the consumer to retrieve data from an environment -//! that can't be read directly using the rkv APIs. -//! -//! The utility supports both 32-bit and 64-bit LMDB source environments, and it -//! automatically migrates data in both the default database and any named (sub) -//! databases. It also migrates the source environment's "map size" and "max DBs" -//! configuration options to the destination environment. -//! -//! The destination environment must be at the rkv consumer's bit depth and should be -//! empty of data. It can be an empty directory, in which case the utility will create a -//! new LMDB environment within the directory. -//! -//! The tool currently has these limitations: -//! -//! 1. It doesn't support migration from environments created with -//! `EnvironmentFlags::NO_SUB_DIR`. To migrate such an environment, create a -//! temporary directory, copy the environment's data file to a file called data.mdb in -//! the temporary directory, then migrate the temporary directory as the source -//! environment. -//! 2. It doesn't support migration from databases created with DatabaseFlags::DUP_SORT` -//! (with or without `DatabaseFlags::DUP_FIXED`). -//! 3. It doesn't account for existing data in the destination environment, which means -//! that it can overwrite data (causing data loss) or fail to migrate data if the -//! destination environment contains existing data. -//! -//! ## Basic Usage -//! -//! Call `Migrator::new()` with the path to the source environment to create a `Migrator` -//! instance; then call the instance's `migrate()` method with the path to the destination -//! environment to migrate data from the source to the destination environment. For -//! example, this snippet migrates data from the tests/envs/ref_env_32 environment to a -//! new environment in a temporary directory: -//! -//! ``` -//! use rkv::migrator::LmdbArchMigrator as Migrator; -//! use std::path::Path; -//! use tempfile::tempdir; -//! let mut migrator = Migrator::new(Path::new("tests/envs/ref_env_32")).unwrap(); -//! migrator.migrate(&tempdir().unwrap().path()).unwrap(); -//! ``` -//! -//! Both `Migrator::new()` and `migrate()` return a `MigrateResult` that is either an -//! `Ok()` result or an `Err`, where `MigrateError` is an enum whose -//! variants identify specific kinds of migration failures. - -use std::{ - collections::{BTreeMap, HashMap}, - convert::TryFrom, - fs::File, - io::{Cursor, Read, Seek, SeekFrom, Write}, - path::{Path, PathBuf}, - rc::Rc, - str, -}; - -use bitflags::bitflags; -use byteorder::{LittleEndian, ReadBytesExt}; -use lmdb::{DatabaseFlags, Environment, Transaction, WriteFlags}; - -pub use super::arch_migrator_error::MigrateError; - -const PAGESIZE: u16 = 4096; - -// The magic number is 0xBEEFC0DE, which is 0xDEC0EFBE in little-endian. It appears at -// offset 12 on 32-bit systems and 16 on 64-bit systems. We don't support big-endian -// migration, but presumably we could do so by detecting the order of the bytes. -const MAGIC: [u8; 4] = [0xDE, 0xC0, 0xEF, 0xBE]; - -pub type MigrateResult = Result; - -bitflags! { - #[derive(Default, PartialEq, Eq, Debug, Clone, Copy)] - struct PageFlags: u16 { - const BRANCH = 0x01; - const LEAF = 0x02; - const OVERFLOW = 0x04; - const META = 0x08; - const DIRTY = 0x10; - const LEAF2 = 0x20; - const SUBP = 0x40; - const LOOSE = 0x4000; - const KEEP = 0x8000; - } -} - -bitflags! { - #[derive(Default, PartialEq, Eq, Debug, Clone, Copy)] - struct NodeFlags: u16 { - const BIGDATA = 0x01; - const SUBDATA = 0x02; - const DUPDATA = 0x04; - } -} - -// The bit depth of the executable that created an LMDB environment. The Migrator -// determines this automatically based on the location of the magic number in data.mdb. -#[derive(Clone, Copy, PartialEq)] -enum Bits { - U32, - U64, -} - -impl Bits { - // The size of usize for the bit-depth represented by the enum variant. - fn size(self) -> usize { - match self { - Bits::U32 => 4, - Bits::U64 => 8, - } - } -} - -// The equivalent of PAGEHDRSZ in LMDB, except that this one varies by bits. -fn page_header_size(bits: Bits) -> u64 { - match bits { - Bits::U32 => 12, - Bits::U64 => 16, - } -} - -// The equivalent of P_INVALID in LMDB, except that this one varies by bits. -fn validate_page_num(page_num: u64, bits: Bits) -> MigrateResult<()> { - let invalid_page_num = match bits { - Bits::U32 => u64::from(!0u32), - Bits::U64 => !0u64, - }; - - if page_num == invalid_page_num { - return Err(MigrateError::InvalidPageNum); - } - - Ok(()) -} - -#[derive(Clone, Debug, Default)] -struct Database { - md_pad: u32, - md_flags: DatabaseFlags, - md_depth: u16, - md_branch_pages: u64, - md_leaf_pages: u64, - md_overflow_pages: u64, - md_entries: u64, - md_root: u64, -} - -impl Database { - fn new(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult { - Ok(Database { - md_pad: cursor.read_u32::()?, - md_flags: DatabaseFlags::from_bits(cursor.read_u16::()?.into()) - .ok_or(MigrateError::InvalidDatabaseBits)?, - md_depth: cursor.read_u16::()?, - md_branch_pages: cursor.read_uint::(bits.size())?, - md_leaf_pages: cursor.read_uint::(bits.size())?, - md_overflow_pages: cursor.read_uint::(bits.size())?, - md_entries: cursor.read_uint::(bits.size())?, - md_root: cursor.read_uint::(bits.size())?, - }) - } -} - -#[derive(Debug, Default)] -struct Databases { - free: Database, - main: Database, -} - -#[derive(Debug, Default)] -struct MetaData { - mm_magic: u32, - mm_version: u32, - mm_address: u64, - mm_mapsize: u64, - mm_dbs: Databases, - mm_last_pg: u64, - mm_txnid: u64, -} - -#[derive(Debug)] -enum LeafNode { - Regular { - mn_lo: u16, - mn_hi: u16, - mn_flags: NodeFlags, - mn_ksize: u16, - mv_size: u32, - key: Vec, - value: Vec, - }, - BigData { - mn_lo: u16, - mn_hi: u16, - mn_flags: NodeFlags, - mn_ksize: u16, - mv_size: u32, - key: Vec, - overflow_pgno: u64, - }, - SubData { - mn_lo: u16, - mn_hi: u16, - mn_flags: NodeFlags, - mn_ksize: u16, - mv_size: u32, - key: Vec, - value: Vec, - db: Database, - }, -} - -#[derive(Debug, Default)] -struct BranchNode { - mp_pgno: u64, - mn_ksize: u16, - mn_data: Vec, -} - -#[derive(Debug)] -enum PageHeader { - Regular { - mp_pgno: u64, - mp_flags: PageFlags, - pb_lower: u16, - pb_upper: u16, - }, - Overflow { - mp_pgno: u64, - mp_flags: PageFlags, - pb_pages: u32, - }, -} - -#[derive(Debug)] -enum Page { - META(MetaData), - LEAF(Vec), - BRANCH(Vec), -} - -impl Page { - fn new(buf: Vec, bits: Bits) -> MigrateResult { - let mut cursor = std::io::Cursor::new(&buf[..]); - - match Self::parse_page_header(&mut cursor, bits)? { - PageHeader::Regular { - mp_flags, pb_lower, .. - } => { - if mp_flags.contains(PageFlags::LEAF2) || mp_flags.contains(PageFlags::SUBP) { - // We don't yet support DUPFIXED and DUPSORT databases. - return Err(MigrateError::UnsupportedPageHeaderVariant); - } - - if mp_flags.contains(PageFlags::META) { - let meta_data = Self::parse_meta_data(&mut cursor, bits)?; - Ok(Page::META(meta_data)) - } else if mp_flags.contains(PageFlags::LEAF) { - let nodes = Self::parse_leaf_nodes(&mut cursor, pb_lower, bits)?; - Ok(Page::LEAF(nodes)) - } else if mp_flags.contains(PageFlags::BRANCH) { - let nodes = Self::parse_branch_nodes(&mut cursor, pb_lower, bits)?; - Ok(Page::BRANCH(nodes)) - } else { - Err(MigrateError::UnexpectedPageHeaderVariant) - } - } - PageHeader::Overflow { .. } => { - // There isn't anything to do, nor should we try to instantiate - // a page of this type, as we only access them when reading - // a value that is too large to fit into a leaf node. - Err(MigrateError::UnexpectedPageHeaderVariant) - } - } - } - - fn parse_page_header(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult { - let mp_pgno = cursor.read_uint::(bits.size())?; - let _mp_pad = cursor.read_u16::()?; - let mp_flags = PageFlags::from_bits(cursor.read_u16::()?) - .ok_or(MigrateError::InvalidPageBits)?; - - if mp_flags.contains(PageFlags::OVERFLOW) { - let pb_pages = cursor.read_u32::()?; - Ok(PageHeader::Overflow { - mp_pgno, - mp_flags, - pb_pages, - }) - } else { - let pb_lower = cursor.read_u16::()?; - let pb_upper = cursor.read_u16::()?; - Ok(PageHeader::Regular { - mp_pgno, - mp_flags, - pb_lower, - pb_upper, - }) - } - } - - fn parse_meta_data(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult { - cursor.seek(SeekFrom::Start(page_header_size(bits)))?; - - Ok(MetaData { - mm_magic: cursor.read_u32::()?, - mm_version: cursor.read_u32::()?, - mm_address: cursor.read_uint::(bits.size())?, - mm_mapsize: cursor.read_uint::(bits.size())?, - mm_dbs: Databases { - free: Database::new(cursor, bits)?, - main: Database::new(cursor, bits)?, - }, - mm_last_pg: cursor.read_uint::(bits.size())?, - mm_txnid: cursor.read_uint::(bits.size())?, - }) - } - - fn parse_leaf_nodes( - cursor: &mut Cursor<&[u8]>, - pb_lower: u16, - bits: Bits, - ) -> MigrateResult> { - cursor.set_position(page_header_size(bits)); - let num_keys = Self::num_keys(pb_lower, bits); - let mp_ptrs = Self::parse_mp_ptrs(cursor, num_keys)?; - - let mut leaf_nodes = Vec::with_capacity(num_keys as usize); - - for mp_ptr in mp_ptrs { - cursor.set_position(u64::from(mp_ptr)); - leaf_nodes.push(Self::parse_leaf_node(cursor, bits)?); - } - - Ok(leaf_nodes) - } - - fn parse_leaf_node(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult { - // The order of the mn_lo and mn_hi fields is endian-dependent and would be - // reversed in an LMDB environment created on a big-endian system. - let mn_lo = cursor.read_u16::()?; - let mn_hi = cursor.read_u16::()?; - - let mn_flags = NodeFlags::from_bits(cursor.read_u16::()?) - .ok_or(MigrateError::InvalidNodeBits)?; - let mn_ksize = cursor.read_u16::()?; - - let start = usize::try_from(cursor.position())?; - let end = usize::try_from(cursor.position() + u64::from(mn_ksize))?; - let key = cursor.get_ref()[start..end].to_vec(); - cursor.set_position(end as u64); - - let mv_size = Self::leaf_node_size(mn_lo, mn_hi); - if mn_flags.contains(NodeFlags::BIGDATA) { - let overflow_pgno = cursor.read_uint::(bits.size())?; - Ok(LeafNode::BigData { - mn_lo, - mn_hi, - mn_flags, - mn_ksize, - mv_size, - key, - overflow_pgno, - }) - } else if mn_flags.contains(NodeFlags::SUBDATA) { - let start = usize::try_from(cursor.position())?; - let end = usize::try_from(cursor.position() + u64::from(mv_size))?; - let value = cursor.get_ref()[start..end].to_vec(); - let mut cursor = std::io::Cursor::new(&value[..]); - let db = Database::new(&mut cursor, bits)?; - validate_page_num(db.md_root, bits)?; - Ok(LeafNode::SubData { - mn_lo, - mn_hi, - mn_flags, - mn_ksize, - mv_size, - key, - value, - db, - }) - } else { - let start = usize::try_from(cursor.position())?; - let end = usize::try_from(cursor.position() + u64::from(mv_size))?; - let value = cursor.get_ref()[start..end].to_vec(); - Ok(LeafNode::Regular { - mn_lo, - mn_hi, - mn_flags, - mn_ksize, - mv_size, - key, - value, - }) - } - } - - fn leaf_node_size(mn_lo: u16, mn_hi: u16) -> u32 { - u32::from(mn_lo) + ((u32::from(mn_hi)) << 16) - } - - fn parse_branch_nodes( - cursor: &mut Cursor<&[u8]>, - pb_lower: u16, - bits: Bits, - ) -> MigrateResult> { - let num_keys = Self::num_keys(pb_lower, bits); - let mp_ptrs = Self::parse_mp_ptrs(cursor, num_keys)?; - - let mut branch_nodes = Vec::with_capacity(num_keys as usize); - - for mp_ptr in mp_ptrs { - cursor.set_position(u64::from(mp_ptr)); - branch_nodes.push(Self::parse_branch_node(cursor, bits)?) - } - - Ok(branch_nodes) - } - - fn parse_branch_node(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult { - // The order of the mn_lo and mn_hi fields is endian-dependent and would be - // reversed in an LMDB environment created on a big-endian system. - let mn_lo = cursor.read_u16::()?; - let mn_hi = cursor.read_u16::()?; - - let mn_flags = cursor.read_u16::()?; - - // Branch nodes overload the mn_lo, mn_hi, and mn_flags fields to store the page - // number, so we derive the number from those fields. - let mp_pgno = Self::branch_node_page_num(mn_lo, mn_hi, mn_flags, bits); - - let mn_ksize = cursor.read_u16::()?; - - let position = cursor.position(); - let start = usize::try_from(position)?; - let end = usize::try_from(position + u64::from(mn_ksize))?; - let mn_data = cursor.get_ref()[start..end].to_vec(); - cursor.set_position(end as u64); - - Ok(BranchNode { - mp_pgno, - mn_ksize, - mn_data, - }) - } - - fn branch_node_page_num(mn_lo: u16, mn_hi: u16, mn_flags: u16, bits: Bits) -> u64 { - let mut page_num = u64::from(u32::from(mn_lo) + (u32::from(mn_hi) << 16)); - if bits == Bits::U64 { - page_num += u64::from(mn_flags) << 32; - } - page_num - } - - fn parse_mp_ptrs(cursor: &mut Cursor<&[u8]>, num_keys: u64) -> MigrateResult> { - let mut mp_ptrs = Vec::with_capacity(num_keys as usize); - for _ in 0..num_keys { - mp_ptrs.push(cursor.read_u16::()?); - } - Ok(mp_ptrs) - } - - fn num_keys(pb_lower: u16, bits: Bits) -> u64 { - (u64::from(pb_lower) - page_header_size(bits)) >> 1 - } -} - -pub struct Migrator { - file: File, - bits: Bits, -} - -impl Migrator { - /// Create a new Migrator for the LMDB environment at the given path. This tries to - /// open the data.mdb file in the environment and determine the bit depth of the - /// executable that created it, so it can fail and return an Err if the file can't be - /// opened or the depth determined. - pub fn new(path: &Path) -> MigrateResult { - let mut path = PathBuf::from(path); - path.push("data.mdb"); - let mut file = File::open(&path)?; - - file.seek(SeekFrom::Start(page_header_size(Bits::U32)))?; - let mut buf = [0; 4]; - file.read_exact(&mut buf)?; - - let bits = if buf == MAGIC { - Bits::U32 - } else { - file.seek(SeekFrom::Start(page_header_size(Bits::U64)))?; - file.read_exact(&mut buf)?; - if buf == MAGIC { - Bits::U64 - } else { - return Err(MigrateError::IndeterminateBitDepth); - } - }; - - Ok(Migrator { file, bits }) - } - - /// Dump the data in one of the databases in the LMDB environment. If the `database` - /// paremeter is None, then we dump the data in the main database. If it's the name - /// of a subdatabase, then we dump the data in that subdatabase. - /// - /// Note that the output isn't identical to that of the `mdb_dump` utility, since - /// `mdb_dump` includes subdatabase key/value pairs when dumping the main database, - /// and those values are architecture-dependent, since they contain pointer-sized - /// data. - /// - /// If we wanted to support identical output, we could parameterize inclusion of - /// subdatabase pairs in get_pairs() and include them when dumping data, while - /// continuing to exclude them when migrating data. - pub fn dump(&mut self, database: Option<&str>, mut out: T) -> MigrateResult<()> { - let meta_data = self.get_meta_data()?; - let root_page_num = meta_data.mm_dbs.main.md_root; - let root_page = Rc::new(self.get_page(root_page_num)?); - - let pairs; - if let Some(database) = database { - let subdbs = self.get_subdbs(root_page)?; - let database = subdbs - .get(database.as_bytes()) - .ok_or_else(|| MigrateError::DatabaseNotFound(database.to_string()))?; - let root_page_num = database.md_root; - let root_page = Rc::new(self.get_page(root_page_num)?); - pairs = self.get_pairs(root_page)?; - } else { - pairs = self.get_pairs(root_page)?; - } - - out.write_all(b"VERSION=3\n")?; - out.write_all(b"format=bytevalue\n")?; - if let Some(database) = database { - writeln!(out, "database={database}")?; - } - out.write_all(b"type=btree\n")?; - writeln!(out, "mapsize={}", meta_data.mm_mapsize)?; - out.write_all(b"maxreaders=126\n")?; - out.write_all(b"db_pagesize=4096\n")?; - out.write_all(b"HEADER=END\n")?; - - for (key, value) in pairs { - out.write_all(b" ")?; - for byte in key { - write!(out, "{byte:02x}")?; - } - out.write_all(b"\n")?; - out.write_all(b" ")?; - for byte in value { - write!(out, "{byte:02x}")?; - } - out.write_all(b"\n")?; - } - - out.write_all(b"DATA=END\n")?; - - Ok(()) - } - - /// Migrate all data in all of databases in the existing LMDB environment to a new - /// environment. This includes all key/value pairs in the main database that aren't - /// metadata about subdatabases and all key/value pairs in all subdatabases. - /// - /// We also set the map size and maximum databases of the new environment to their - /// values for the existing environment. But we don't set other metadata, and we - /// don't check that the new environment is empty before migrating data. - /// - /// Thus it's possible for this to overwrite existing data or fail to migrate data if - /// the new environment isn't empty. It's the consumer's responsibility to ensure - /// that data can be safely migrated to the new environment. In general, this means - /// that environment should be empty. - pub fn migrate(&mut self, dest: &Path) -> MigrateResult<()> { - let meta_data = self.get_meta_data()?; - let root_page_num = meta_data.mm_dbs.main.md_root; - validate_page_num(root_page_num, self.bits)?; - let root_page = Rc::new(self.get_page(root_page_num)?); - let subdbs = self.get_subdbs(Rc::clone(&root_page))?; - - let env = Environment::new() - .set_map_size(meta_data.mm_mapsize as usize) - .set_max_dbs(subdbs.len() as u32) - .open(dest)?; - - // Create the databases before we open a read-write transaction, since database - // creation requires its own read-write transaction, which would hang while - // awaiting completion of an existing one. - env.create_db(None, meta_data.mm_dbs.main.md_flags)?; - for (subdb_name, subdb_info) in &subdbs { - env.create_db(Some(str::from_utf8(subdb_name)?), subdb_info.md_flags)?; - } - - // Now open the read-write transaction that we'll use to migrate all the data. - let mut txn = env.begin_rw_txn()?; - - // Migrate the main database. - let pairs = self.get_pairs(root_page)?; - let db = env.open_db(None)?; - for (key, value) in pairs { - // If we knew that the target database was empty, we could specify - // WriteFlags::APPEND to speed up the migration. - txn.put(db, &key, &value, WriteFlags::empty())?; - } - - // Migrate subdatabases. - for (subdb_name, subdb_info) in &subdbs { - let root_page = Rc::new(self.get_page(subdb_info.md_root)?); - let pairs = self.get_pairs(root_page)?; - let db = env.open_db(Some(str::from_utf8(subdb_name)?))?; - for (key, value) in pairs { - // If we knew that the target database was empty, we could specify - // WriteFlags::APPEND to speed up the migration. - txn.put(db, &key, &value, WriteFlags::empty())?; - } - } - - txn.commit()?; - - Ok(()) - } - - fn get_subdbs(&mut self, root_page: Rc) -> MigrateResult, Database>> { - let mut subdbs = HashMap::new(); - let mut pages = vec![root_page]; - - while let Some(page) = pages.pop() { - match &*page { - Page::BRANCH(nodes) => { - for branch in nodes { - pages.push(Rc::new(self.get_page(branch.mp_pgno)?)); - } - } - Page::LEAF(nodes) => { - for leaf in nodes { - if let LeafNode::SubData { key, db, .. } = leaf { - subdbs.insert(key.to_vec(), db.clone()); - }; - } - } - _ => { - return Err(MigrateError::UnexpectedPageVariant); - } - } - } - - Ok(subdbs) - } - - fn get_pairs(&mut self, root_page: Rc) -> MigrateResult, Vec>> { - let mut pairs = BTreeMap::new(); - let mut pages = vec![root_page]; - - while let Some(page) = pages.pop() { - match &*page { - Page::BRANCH(nodes) => { - for branch in nodes { - pages.push(Rc::new(self.get_page(branch.mp_pgno)?)); - } - } - Page::LEAF(nodes) => { - for leaf in nodes { - match leaf { - LeafNode::Regular { key, value, .. } => { - pairs.insert(key.to_vec(), value.to_vec()); - } - LeafNode::BigData { - mv_size, - key, - overflow_pgno, - .. - } => { - // Perhaps we could reduce memory consumption during a - // migration by waiting to read big data until it's time - // to write it to the new database. - let value = self.read_data( - *overflow_pgno * u64::from(PAGESIZE) - + page_header_size(self.bits), - *mv_size as usize, - )?; - pairs.insert(key.to_vec(), value); - } - LeafNode::SubData { .. } => { - // We don't include subdatabase leaves in pairs, since - // there's no architecture-neutral representation of them, - // and in any case they're meta-data that should get - // recreated when we migrate the subdatabases themselves. - // - // If we wanted to create identical dumps to those - // produced by `mdb_dump`, however, we could allow - // consumers to specify that they'd like to include these - // records. - } - }; - } - } - _ => { - return Err(MigrateError::UnexpectedPageVariant); - } - } - } - - Ok(pairs) - } - - fn read_data(&mut self, offset: u64, size: usize) -> MigrateResult> { - self.file.seek(SeekFrom::Start(offset))?; - let mut buf: Vec = vec![0; size]; - self.file.read_exact(&mut buf[0..size])?; - Ok(buf.to_vec()) - } - - fn get_page(&mut self, page_no: u64) -> MigrateResult { - Page::new( - self.read_data(page_no * u64::from(PAGESIZE), usize::from(PAGESIZE))?, - self.bits, - ) - } - - fn get_meta_data(&mut self) -> MigrateResult { - let (page0, page1) = (self.get_page(0)?, self.get_page(1)?); - - match (page0, page1) { - (Page::META(meta0), Page::META(meta1)) => { - let meta = if meta1.mm_txnid > meta0.mm_txnid { - meta1 - } else { - meta0 - }; - if meta.mm_magic != 0xBE_EF_C0_DE { - return Err(MigrateError::InvalidMagicNum); - } - if meta.mm_version != 1 && meta.mm_version != 999 { - return Err(MigrateError::InvalidDataVersion); - } - Ok(meta) - } - _ => Err(MigrateError::UnexpectedPageVariant), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - use std::{env, fs, mem::size_of}; - - use lmdb::{Environment, Error as LmdbError}; - use tempfile::{tempdir, tempfile}; - - fn compare_files(ref_file: &mut File, new_file: &mut File) -> MigrateResult<()> { - ref_file.seek(SeekFrom::Start(0))?; - new_file.seek(SeekFrom::Start(0))?; - - let ref_buf = &mut [0; 1024]; - let new_buf = &mut [0; 1024]; - - loop { - match ref_file.read(ref_buf) { - Err(err) => panic!("{}", err), - Ok(ref_len) => match new_file.read(new_buf) { - Err(err) => panic!("{}", err), - Ok(new_len) => { - assert_eq!(ref_len, new_len); - if ref_len == 0 { - break; - }; - assert_eq!(ref_buf[0..ref_len], new_buf[0..new_len]); - } - }, - } - } - - Ok(()) - } - - #[test] - fn test_dump_32() -> MigrateResult<()> { - let cwd = env::current_dir()?; - let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?; - let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_32"].iter().collect(); - - // Dump data from the test env to a new dump file. - let mut migrator = Migrator::new(&test_env_path)?; - let mut new_dump_file = tempfile()?; - migrator.dump(None, &new_dump_file)?; - - // Open the reference dump file. - let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump.txt"].iter().collect(); - let mut ref_dump_file = File::open(ref_dump_file_path)?; - - // Compare the new dump file to the reference dump file. - compare_files(&mut ref_dump_file, &mut new_dump_file)?; - - Ok(()) - } - - #[test] - fn test_dump_32_subdb() -> MigrateResult<()> { - let cwd = env::current_dir()?; - let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?; - let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_32"].iter().collect(); - - // Dump data from the test env to a new dump file. - let mut migrator = Migrator::new(&test_env_path)?; - let mut new_dump_file = tempfile()?; - migrator.dump(Some("subdb"), &new_dump_file)?; - - // Open the reference dump file. - let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump_subdb.txt"] - .iter() - .collect(); - let mut ref_dump_file = File::open(ref_dump_file_path)?; - - // Compare the new dump file to the reference dump file. - compare_files(&mut ref_dump_file, &mut new_dump_file)?; - - Ok(()) - } - - #[test] - fn test_dump_64() -> MigrateResult<()> { - let cwd = env::current_dir()?; - let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?; - let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_64"].iter().collect(); - - // Dump data from the test env to a new dump file. - let mut migrator = Migrator::new(&test_env_path)?; - let mut new_dump_file = tempfile()?; - migrator.dump(None, &new_dump_file)?; - - // Open the reference dump file. - let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump.txt"].iter().collect(); - let mut ref_dump_file = File::open(ref_dump_file_path)?; - - // Compare the new dump file to the reference dump file. - compare_files(&mut ref_dump_file, &mut new_dump_file)?; - - Ok(()) - } - - #[test] - fn test_dump_64_subdb() -> MigrateResult<()> { - let cwd = env::current_dir()?; - let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?; - let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_64"].iter().collect(); - - // Dump data from the test env to a new dump file. - let mut migrator = Migrator::new(&test_env_path)?; - let mut new_dump_file = tempfile()?; - migrator.dump(Some("subdb"), &new_dump_file)?; - - // Open the reference dump file. - let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump_subdb.txt"] - .iter() - .collect(); - let mut ref_dump_file = File::open(ref_dump_file_path)?; - - // Compare the new dump file to the reference dump file. - compare_files(&mut ref_dump_file, &mut new_dump_file)?; - - Ok(()) - } - - #[test] - fn test_migrate_64() -> MigrateResult<()> { - let cwd = env::current_dir()?; - let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?; - let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_64"].iter().collect(); - - // Migrate data from the old env to a new one. - let new_env = tempdir()?; - let mut migrator = Migrator::new(&test_env_path)?; - migrator.migrate(new_env.path())?; - - // Dump data from the new env to a new dump file. - let mut migrator = Migrator::new(new_env.path())?; - let mut new_dump_file = tempfile()?; - migrator.dump(Some("subdb"), &new_dump_file)?; - - // Open the reference dump file. - let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump_subdb.txt"] - .iter() - .collect(); - let mut ref_dump_file = File::open(ref_dump_file_path)?; - - // Compare the new dump file to the reference dump file. - compare_files(&mut ref_dump_file, &mut new_dump_file)?; - - Ok(()) - } - - #[test] - fn test_migrate_32() -> MigrateResult<()> { - let cwd = env::current_dir()?; - let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?; - let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_32"].iter().collect(); - - // Migrate data from the old env to a new one. - let new_env = tempdir()?; - let mut migrator = Migrator::new(&test_env_path)?; - migrator.migrate(new_env.path())?; - - // Dump data from the new env to a new dump file. - let mut migrator = Migrator::new(new_env.path())?; - let mut new_dump_file = tempfile()?; - migrator.dump(Some("subdb"), &new_dump_file)?; - - // Open the reference dump file. - let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump_subdb.txt"] - .iter() - .collect(); - let mut ref_dump_file = File::open(ref_dump_file_path)?; - - // Compare the new dump file to the reference dump file. - compare_files(&mut ref_dump_file, &mut new_dump_file)?; - - Ok(()) - } - - #[test] - fn test_migrate_and_replace() -> MigrateResult<()> { - let test_env_name = match size_of::() { - 4 => "ref_env_64", - 8 => "ref_env_32", - _ => panic!("only 32- and 64-bit depths are supported"), - }; - - let cwd = env::current_dir()?; - let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?; - let test_env_path: PathBuf = [cwd, "tests", "envs", test_env_name].iter().collect(); - - let old_env = tempdir()?; - fs::copy( - test_env_path.join("data.mdb"), - old_env.path().join("data.mdb"), - )?; - fs::copy( - test_env_path.join("lock.mdb"), - old_env.path().join("lock.mdb"), - )?; - - // Confirm that it isn't possible to open the old environment with LMDB. - assert_eq!( - match Environment::new().open(old_env.path()) { - Err(err) => err, - _ => panic!("opening the environment should have failed"), - }, - LmdbError::Invalid - ); - - // Migrate data from the old env to a new one. - let new_env = tempdir()?; - let mut migrator = Migrator::new(old_env.path())?; - migrator.migrate(new_env.path())?; - - // Dump data from the new env to a new dump file. - let mut migrator = Migrator::new(new_env.path())?; - let mut new_dump_file = tempfile()?; - migrator.dump(Some("subdb"), &new_dump_file)?; - - // Open the reference dump file. - let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump_subdb.txt"] - .iter() - .collect(); - let mut ref_dump_file = File::open(ref_dump_file_path)?; - - // Compare the new dump file to the reference dump file. - compare_files(&mut ref_dump_file, &mut new_dump_file)?; - - // Overwrite the old env's files with the new env's files and confirm that it's now - // possible to open the old env with LMDB. - fs::copy( - new_env.path().join("data.mdb"), - old_env.path().join("data.mdb"), - )?; - fs::copy( - new_env.path().join("lock.mdb"), - old_env.path().join("lock.mdb"), - )?; - assert!(Environment::new().open(old_env.path()).is_ok()); - - Ok(()) - } -} diff --git a/src/backend/impl_lmdb/arch_migrator_error.rs b/src/backend/impl_lmdb/arch_migrator_error.rs deleted file mode 100644 index e23bb49..0000000 --- a/src/backend/impl_lmdb/arch_migrator_error.rs +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -use std::{io, num, str}; - -use thiserror::Error; - -#[derive(Debug, Error)] -pub enum MigrateError { - #[error("database not found: {0:?}")] - DatabaseNotFound(String), - - #[error("{0}")] - FromString(String), - - #[error("couldn't determine bit depth")] - IndeterminateBitDepth, - - #[error("I/O error: {0:?}")] - IoError(#[from] io::Error), - - #[error("invalid DatabaseFlags bits")] - InvalidDatabaseBits, - - #[error("invalid data version")] - InvalidDataVersion, - - #[error("invalid magic number")] - InvalidMagicNum, - - #[error("invalid NodeFlags bits")] - InvalidNodeBits, - - #[error("invalid PageFlags bits")] - InvalidPageBits, - - #[error("invalid page number")] - InvalidPageNum, - - #[error("lmdb backend error: {0}")] - LmdbError(#[from] lmdb::Error), - - #[error("string conversion error")] - StringConversionError, - - #[error("TryFromInt error: {0:?}")] - TryFromIntError(#[from] num::TryFromIntError), - - #[error("unexpected Page variant")] - UnexpectedPageVariant, - - #[error("unexpected PageHeader variant")] - UnexpectedPageHeaderVariant, - - #[error("unsupported PageHeader variant")] - UnsupportedPageHeaderVariant, - - #[error("UTF8 error: {0:?}")] - Utf8Error(#[from] str::Utf8Error), -} - -impl From<&str> for MigrateError { - fn from(e: &str) -> MigrateError { - MigrateError::FromString(e.to_string()) - } -} - -impl From for MigrateError { - fn from(e: String) -> MigrateError { - MigrateError::FromString(e) - } -} diff --git a/src/backend/impl_lmdb/cursor.rs b/src/backend/impl_lmdb/cursor.rs deleted file mode 100644 index 760abce..0000000 --- a/src/backend/impl_lmdb/cursor.rs +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -use lmdb::Cursor; - -use super::IterImpl; -use crate::backend::traits::BackendRoCursor; - -#[derive(Debug)] -pub struct RoCursorImpl<'c>(pub(crate) lmdb::RoCursor<'c>); - -impl<'c> BackendRoCursor<'c> for RoCursorImpl<'c> { - type Iter = IterImpl<'c, lmdb::RoCursor<'c>>; - - fn into_iter(self) -> Self::Iter { - // We call RoCursor.iter() instead of RoCursor.iter_start() because - // the latter panics when there are no items in the store, whereas the - // former returns an iterator that yields no items. And since we create - // the Cursor and don't change its position, we can be sure that a call - // to Cursor.iter() will start at the beginning. - IterImpl::new(self.0, lmdb::RoCursor::iter) - } - - fn into_iter_from(self, key: K) -> Self::Iter - where - K: AsRef<[u8]> + 'c, - { - IterImpl::new(self.0, |cursor| cursor.iter_from(key)) - } - - fn into_iter_dup_of(self, key: K) -> Self::Iter - where - K: AsRef<[u8]> + 'c, - { - IterImpl::new(self.0, |cursor| cursor.iter_dup_of(key)) - } -} - -#[derive(Debug)] -pub struct RwCursorImpl<'c>(pub(crate) lmdb::RwCursor<'c>); - -impl<'c> BackendRoCursor<'c> for RwCursorImpl<'c> { - type Iter = IterImpl<'c, lmdb::RwCursor<'c>>; - - fn into_iter(self) -> Self::Iter { - IterImpl::new(self.0, lmdb::RwCursor::iter) - } - - fn into_iter_from(self, key: K) -> Self::Iter - where - K: AsRef<[u8]> + 'c, - { - IterImpl::new(self.0, |cursor| cursor.iter_from(key)) - } - - fn into_iter_dup_of(self, key: K) -> Self::Iter - where - K: AsRef<[u8]> + 'c, - { - IterImpl::new(self.0, |cursor| cursor.iter_dup_of(key)) - } -} diff --git a/src/backend/impl_lmdb/database.rs b/src/backend/impl_lmdb/database.rs deleted file mode 100644 index 8edee5c..0000000 --- a/src/backend/impl_lmdb/database.rs +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -use crate::backend::traits::BackendDatabase; - -#[derive(Debug, Eq, PartialEq, Copy, Clone)] -pub struct DatabaseImpl(pub(crate) lmdb::Database); - -impl BackendDatabase for DatabaseImpl {} diff --git a/src/backend/impl_lmdb/environment.rs b/src/backend/impl_lmdb/environment.rs deleted file mode 100644 index a8a72ae..0000000 --- a/src/backend/impl_lmdb/environment.rs +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -use std::{ - fs, - path::{Path, PathBuf}, -}; - -use lmdb::Error as LmdbError; - -use super::{ - DatabaseFlagsImpl, DatabaseImpl, EnvironmentFlagsImpl, ErrorImpl, InfoImpl, RoTransactionImpl, - RwTransactionImpl, StatImpl, -}; -use crate::backend::common::RecoveryStrategy; -use crate::backend::traits::{ - BackendEnvironment, BackendEnvironmentBuilder, BackendInfo, BackendIter, BackendRoCursor, - BackendRoCursorTransaction, BackendStat, -}; - -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub struct EnvironmentBuilderImpl { - builder: lmdb::EnvironmentBuilder, - env_path_type: EnvironmentPathType, - env_lock_type: EnvironmentLockType, - env_db_type: EnvironmentDefaultDbType, - make_dir_if_needed: bool, -} - -impl<'b> BackendEnvironmentBuilder<'b> for EnvironmentBuilderImpl { - type Environment = EnvironmentImpl; - type Error = ErrorImpl; - type Flags = EnvironmentFlagsImpl; - - fn new() -> EnvironmentBuilderImpl { - EnvironmentBuilderImpl { - builder: lmdb::Environment::new(), - env_path_type: EnvironmentPathType::SubDir, - env_lock_type: EnvironmentLockType::Lockfile, - env_db_type: EnvironmentDefaultDbType::SingleDatabase, - make_dir_if_needed: false, - } - } - - fn set_flags(&mut self, flags: T) -> &mut Self - where - T: Into, - { - let flags = flags.into(); - if flags.0 == lmdb::EnvironmentFlags::NO_SUB_DIR { - self.env_path_type = EnvironmentPathType::NoSubDir; - } - if flags.0 == lmdb::EnvironmentFlags::NO_LOCK { - self.env_lock_type = EnvironmentLockType::NoLockfile; - } - self.builder.set_flags(flags.0); - self - } - - fn set_max_readers(&mut self, max_readers: u32) -> &mut Self { - self.builder.set_max_readers(max_readers); - self - } - - fn set_max_dbs(&mut self, max_dbs: u32) -> &mut Self { - if max_dbs > 0 { - self.env_db_type = EnvironmentDefaultDbType::MultipleNamedDatabases - } - self.builder.set_max_dbs(max_dbs); - self - } - - fn set_map_size(&mut self, size: usize) -> &mut Self { - self.builder.set_map_size(size); - self - } - - fn set_make_dir_if_needed(&mut self, make_dir_if_needed: bool) -> &mut Self { - self.make_dir_if_needed = make_dir_if_needed; - self - } - - /// **UNIMPLEMENTED.** Will panic at runtime. - fn set_corruption_recovery_strategy(&mut self, _strategy: RecoveryStrategy) -> &mut Self { - // Unfortunately, when opening a database, LMDB doesn't handle all the ways it could have - // been corrupted. Prefer using the `SafeMode` backend if this is important. - unimplemented!(); - } - - fn open(&self, path: &Path) -> Result { - match self.env_path_type { - EnvironmentPathType::NoSubDir => { - if !path.is_file() { - return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into())); - } - } - EnvironmentPathType::SubDir => { - if !path.is_dir() { - if !self.make_dir_if_needed { - return Err(ErrorImpl::UnsuitableEnvironmentPath(path.into())); - } - fs::create_dir_all(path)?; - } - } - } - - self.builder - .open(path) - .map_err(ErrorImpl::LmdbError) - .and_then(|lmdbenv| { - EnvironmentImpl::new( - path, - self.env_path_type, - self.env_lock_type, - self.env_db_type, - lmdbenv, - ) - }) - } -} - -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub enum EnvironmentPathType { - SubDir, - NoSubDir, -} - -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub enum EnvironmentLockType { - Lockfile, - NoLockfile, -} - -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub enum EnvironmentDefaultDbType { - SingleDatabase, - MultipleNamedDatabases, -} - -#[derive(Debug)] -pub struct EnvironmentImpl { - path: PathBuf, - env_path_type: EnvironmentPathType, - env_lock_type: EnvironmentLockType, - env_db_type: EnvironmentDefaultDbType, - lmdbenv: lmdb::Environment, -} - -impl EnvironmentImpl { - pub(crate) fn new( - path: &Path, - env_path_type: EnvironmentPathType, - env_lock_type: EnvironmentLockType, - env_db_type: EnvironmentDefaultDbType, - lmdbenv: lmdb::Environment, - ) -> Result { - Ok(EnvironmentImpl { - path: path.to_path_buf(), - env_path_type, - env_lock_type, - env_db_type, - lmdbenv, - }) - } -} - -impl<'e> BackendEnvironment<'e> for EnvironmentImpl { - type Database = DatabaseImpl; - type Error = ErrorImpl; - type Flags = DatabaseFlagsImpl; - type Info = InfoImpl; - type RoTransaction = RoTransactionImpl<'e>; - type RwTransaction = RwTransactionImpl<'e>; - type Stat = StatImpl; - - fn get_dbs(&self) -> Result>, Self::Error> { - if self.env_db_type == EnvironmentDefaultDbType::SingleDatabase { - return Ok(vec![None]); - } - let db = self - .lmdbenv - .open_db(None) - .map(DatabaseImpl) - .map_err(ErrorImpl::LmdbError)?; - let reader = self.begin_ro_txn()?; - let cursor = reader.open_ro_cursor(&db)?; - let mut iter = cursor.into_iter(); - let mut store = vec![]; - while let Some(result) = iter.next() { - let (key, _) = result?; - let name = String::from_utf8(key.to_owned()) - .map_err(|_| ErrorImpl::LmdbError(lmdb::Error::Corrupted))?; - store.push(Some(name)); - } - Ok(store) - } - - fn open_db(&self, name: Option<&str>) -> Result { - self.lmdbenv - .open_db(name) - .map(DatabaseImpl) - .map_err(ErrorImpl::LmdbError) - } - - fn create_db( - &self, - name: Option<&str>, - flags: Self::Flags, - ) -> Result { - self.lmdbenv - .create_db(name, flags.0) - .map(DatabaseImpl) - .map_err(ErrorImpl::LmdbError) - } - - fn begin_ro_txn(&'e self) -> Result { - self.lmdbenv - .begin_ro_txn() - .map(RoTransactionImpl) - .map_err(ErrorImpl::LmdbError) - } - - fn begin_rw_txn(&'e self) -> Result { - self.lmdbenv - .begin_rw_txn() - .map(RwTransactionImpl) - .map_err(ErrorImpl::LmdbError) - } - - fn sync(&self, force: bool) -> Result<(), Self::Error> { - self.lmdbenv.sync(force).map_err(ErrorImpl::LmdbError) - } - - fn stat(&self) -> Result { - self.lmdbenv - .stat() - .map(StatImpl) - .map_err(ErrorImpl::LmdbError) - } - - fn info(&self) -> Result { - self.lmdbenv - .info() - .map(InfoImpl) - .map_err(ErrorImpl::LmdbError) - } - - fn freelist(&self) -> Result { - self.lmdbenv.freelist().map_err(ErrorImpl::LmdbError) - } - - fn load_ratio(&self) -> Result, Self::Error> { - let stat = self.stat()?; - let info = self.info()?; - let freelist = self.freelist()?; - - let last_pgno = info.last_pgno() + 1; // pgno is 0 based. - let total_pgs = info.map_size() / stat.page_size(); - if freelist > last_pgno { - return Err(ErrorImpl::LmdbError(LmdbError::Corrupted)); - } - let used_pgs = last_pgno - freelist; - Ok(Some(used_pgs as f32 / total_pgs as f32)) - } - - fn set_map_size(&self, size: usize) -> Result<(), Self::Error> { - self.lmdbenv - .set_map_size(size) - .map_err(ErrorImpl::LmdbError) - } - - fn get_files_on_disk(&self) -> Vec { - let mut store = vec![]; - - if self.env_path_type == EnvironmentPathType::NoSubDir { - // The option NO_SUB_DIR could change the default directory layout; therefore this should - // probably return the path used to create environment, along with the custom lockfile - // when available. - unimplemented!(); - } - - let mut db_filename = self.path.clone(); - db_filename.push("data.mdb"); - store.push(db_filename); - - if self.env_lock_type == EnvironmentLockType::Lockfile { - let mut lock_filename = self.path.clone(); - lock_filename.push("lock.mdb"); - store.push(lock_filename); - } - - store - } -} diff --git a/src/backend/impl_lmdb/error.rs b/src/backend/impl_lmdb/error.rs deleted file mode 100644 index a2fd8a7..0000000 --- a/src/backend/impl_lmdb/error.rs +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -use std::{fmt, io, path::PathBuf}; - -use crate::{backend::traits::BackendError, error::StoreError}; - -#[derive(Debug)] -pub enum ErrorImpl { - LmdbError(lmdb::Error), - UnsuitableEnvironmentPath(PathBuf), - IoError(io::Error), -} - -impl BackendError for ErrorImpl {} - -impl fmt::Display for ErrorImpl { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match self { - ErrorImpl::LmdbError(e) => e.fmt(fmt), - ErrorImpl::UnsuitableEnvironmentPath(_) => write!(fmt, "UnsuitableEnvironmentPath"), - ErrorImpl::IoError(e) => e.fmt(fmt), - } - } -} - -impl Into for ErrorImpl { - fn into(self) -> StoreError { - match self { - ErrorImpl::LmdbError(lmdb::Error::Corrupted) => StoreError::DatabaseCorrupted, - ErrorImpl::LmdbError(lmdb::Error::NotFound) => StoreError::KeyValuePairNotFound, - ErrorImpl::LmdbError(lmdb::Error::BadValSize) => StoreError::KeyValuePairBadSize, - ErrorImpl::LmdbError(lmdb::Error::Invalid) => StoreError::FileInvalid, - ErrorImpl::LmdbError(lmdb::Error::MapFull) => StoreError::MapFull, - ErrorImpl::LmdbError(lmdb::Error::DbsFull) => StoreError::DbsFull, - ErrorImpl::LmdbError(lmdb::Error::ReadersFull) => StoreError::ReadersFull, - ErrorImpl::LmdbError(error) => StoreError::LmdbError(error), - ErrorImpl::UnsuitableEnvironmentPath(path) => { - StoreError::UnsuitableEnvironmentPath(path) - } - ErrorImpl::IoError(error) => StoreError::IoError(error), - } - } -} - -impl From for ErrorImpl { - fn from(e: io::Error) -> ErrorImpl { - ErrorImpl::IoError(e) - } -} diff --git a/src/backend/impl_lmdb/flags.rs b/src/backend/impl_lmdb/flags.rs deleted file mode 100644 index 7a16bb7..0000000 --- a/src/backend/impl_lmdb/flags.rs +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -use crate::backend::{ - common::{DatabaseFlags, EnvironmentFlags, WriteFlags}, - traits::{BackendDatabaseFlags, BackendEnvironmentFlags, BackendFlags, BackendWriteFlags}, -}; - -#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)] -pub struct EnvironmentFlagsImpl(pub(crate) lmdb::EnvironmentFlags); - -impl BackendFlags for EnvironmentFlagsImpl { - fn empty() -> EnvironmentFlagsImpl { - EnvironmentFlagsImpl(lmdb::EnvironmentFlags::empty()) - } -} - -impl BackendEnvironmentFlags for EnvironmentFlagsImpl { - fn set(&mut self, flag: EnvironmentFlags, value: bool) { - self.0.set(flag.into(), value) - } -} - -impl Into for EnvironmentFlags { - fn into(self) -> EnvironmentFlagsImpl { - EnvironmentFlagsImpl(self.into()) - } -} - -impl Into for EnvironmentFlags { - fn into(self) -> lmdb::EnvironmentFlags { - match self { - EnvironmentFlags::FIXED_MAP => lmdb::EnvironmentFlags::FIXED_MAP, - EnvironmentFlags::NO_SUB_DIR => lmdb::EnvironmentFlags::NO_SUB_DIR, - EnvironmentFlags::WRITE_MAP => lmdb::EnvironmentFlags::WRITE_MAP, - EnvironmentFlags::READ_ONLY => lmdb::EnvironmentFlags::READ_ONLY, - EnvironmentFlags::NO_META_SYNC => lmdb::EnvironmentFlags::NO_META_SYNC, - EnvironmentFlags::NO_SYNC => lmdb::EnvironmentFlags::NO_SYNC, - EnvironmentFlags::MAP_ASYNC => lmdb::EnvironmentFlags::MAP_ASYNC, - EnvironmentFlags::NO_TLS => lmdb::EnvironmentFlags::NO_TLS, - EnvironmentFlags::NO_LOCK => lmdb::EnvironmentFlags::NO_LOCK, - EnvironmentFlags::NO_READAHEAD => lmdb::EnvironmentFlags::NO_READAHEAD, - EnvironmentFlags::NO_MEM_INIT => lmdb::EnvironmentFlags::NO_MEM_INIT, - } - } -} - -#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)] -pub struct DatabaseFlagsImpl(pub(crate) lmdb::DatabaseFlags); - -impl BackendFlags for DatabaseFlagsImpl { - fn empty() -> DatabaseFlagsImpl { - DatabaseFlagsImpl(lmdb::DatabaseFlags::empty()) - } -} - -impl BackendDatabaseFlags for DatabaseFlagsImpl { - fn set(&mut self, flag: DatabaseFlags, value: bool) { - self.0.set(flag.into(), value) - } -} - -impl Into for DatabaseFlags { - fn into(self) -> DatabaseFlagsImpl { - DatabaseFlagsImpl(self.into()) - } -} - -impl Into for DatabaseFlags { - fn into(self) -> lmdb::DatabaseFlags { - match self { - DatabaseFlags::REVERSE_KEY => lmdb::DatabaseFlags::REVERSE_KEY, - #[cfg(feature = "db-dup-sort")] - DatabaseFlags::DUP_SORT => lmdb::DatabaseFlags::DUP_SORT, - #[cfg(feature = "db-dup-sort")] - DatabaseFlags::DUP_FIXED => lmdb::DatabaseFlags::DUP_FIXED, - #[cfg(feature = "db-int-key")] - DatabaseFlags::INTEGER_KEY => lmdb::DatabaseFlags::INTEGER_KEY, - DatabaseFlags::INTEGER_DUP => lmdb::DatabaseFlags::INTEGER_DUP, - DatabaseFlags::REVERSE_DUP => lmdb::DatabaseFlags::REVERSE_DUP, - } - } -} - -#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)] -pub struct WriteFlagsImpl(pub(crate) lmdb::WriteFlags); - -impl BackendFlags for WriteFlagsImpl { - fn empty() -> WriteFlagsImpl { - WriteFlagsImpl(lmdb::WriteFlags::empty()) - } -} - -impl BackendWriteFlags for WriteFlagsImpl { - fn set(&mut self, flag: WriteFlags, value: bool) { - self.0.set(flag.into(), value) - } -} - -impl Into for WriteFlags { - fn into(self) -> WriteFlagsImpl { - WriteFlagsImpl(self.into()) - } -} - -impl Into for WriteFlags { - fn into(self) -> lmdb::WriteFlags { - match self { - WriteFlags::NO_OVERWRITE => lmdb::WriteFlags::NO_OVERWRITE, - WriteFlags::NO_DUP_DATA => lmdb::WriteFlags::NO_DUP_DATA, - WriteFlags::CURRENT => lmdb::WriteFlags::CURRENT, - WriteFlags::APPEND => lmdb::WriteFlags::APPEND, - WriteFlags::APPEND_DUP => lmdb::WriteFlags::APPEND_DUP, - } - } -} diff --git a/src/backend/impl_lmdb/info.rs b/src/backend/impl_lmdb/info.rs deleted file mode 100644 index 6188065..0000000 --- a/src/backend/impl_lmdb/info.rs +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -use crate::backend::traits::BackendInfo; - -pub struct InfoImpl(pub(crate) lmdb::Info); - -impl BackendInfo for InfoImpl { - fn map_size(&self) -> usize { - self.0.map_size() - } - - fn last_pgno(&self) -> usize { - self.0.last_pgno() - } - - fn last_txnid(&self) -> usize { - self.0.last_txnid() - } - - fn max_readers(&self) -> usize { - self.0.max_readers() as usize - } - - fn num_readers(&self) -> usize { - self.0.num_readers() as usize - } -} diff --git a/src/backend/impl_lmdb/iter.rs b/src/backend/impl_lmdb/iter.rs deleted file mode 100644 index 519d361..0000000 --- a/src/backend/impl_lmdb/iter.rs +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -use super::ErrorImpl; -use crate::backend::traits::BackendIter; - -pub struct IterImpl<'i, C> { - // LMDB semantics dictate that a cursor must be valid for the entire lifetime - // of an iterator. In other words, cursors must not be dropped while an - // iterator built from it is alive. Unfortunately, the LMDB crate API does - // not express this through the type system, so we must enforce it somehow. - #[allow(dead_code)] - cursor: C, - iter: lmdb::Iter<'i>, -} - -impl<'i, C> IterImpl<'i, C> { - pub(crate) fn new( - mut cursor: C, - to_iter: impl FnOnce(&mut C) -> lmdb::Iter<'i>, - ) -> IterImpl<'i, C> { - let iter = to_iter(&mut cursor); - IterImpl { cursor, iter } - } -} - -impl<'i, C> BackendIter<'i> for IterImpl<'i, C> { - type Error = ErrorImpl; - - #[allow(clippy::type_complexity)] - fn next(&mut self) -> Option> { - self.iter.next().map(|e| e.map_err(ErrorImpl::LmdbError)) - } -} diff --git a/src/backend/impl_lmdb/stat.rs b/src/backend/impl_lmdb/stat.rs deleted file mode 100644 index b0de8c5..0000000 --- a/src/backend/impl_lmdb/stat.rs +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -use crate::backend::traits::BackendStat; - -pub struct StatImpl(pub(crate) lmdb::Stat); - -impl BackendStat for StatImpl { - fn page_size(&self) -> usize { - self.0.page_size() as usize - } - - fn depth(&self) -> usize { - self.0.depth() as usize - } - - fn branch_pages(&self) -> usize { - self.0.branch_pages() - } - - fn leaf_pages(&self) -> usize { - self.0.leaf_pages() - } - - fn overflow_pages(&self) -> usize { - self.0.overflow_pages() - } - - fn entries(&self) -> usize { - self.0.entries() - } -} diff --git a/src/backend/impl_lmdb/transaction.rs b/src/backend/impl_lmdb/transaction.rs deleted file mode 100644 index 2175276..0000000 --- a/src/backend/impl_lmdb/transaction.rs +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -use lmdb::Transaction; - -use super::{DatabaseImpl, ErrorImpl, RoCursorImpl, WriteFlagsImpl}; -use crate::backend::traits::{ - BackendRoCursorTransaction, BackendRoTransaction, BackendRwCursorTransaction, - BackendRwTransaction, -}; - -#[derive(Debug)] -pub struct RoTransactionImpl<'t>(pub(crate) lmdb::RoTransaction<'t>); - -impl<'t> BackendRoTransaction for RoTransactionImpl<'t> { - type Database = DatabaseImpl; - type Error = ErrorImpl; - - fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> { - self.0.get(db.0, &key).map_err(ErrorImpl::LmdbError) - } - - fn abort(self) { - self.0.abort() - } -} - -impl<'t> BackendRoCursorTransaction<'t> for RoTransactionImpl<'t> { - type RoCursor = RoCursorImpl<'t>; - - fn open_ro_cursor(&'t self, db: &Self::Database) -> Result { - self.0 - .open_ro_cursor(db.0) - .map(RoCursorImpl) - .map_err(ErrorImpl::LmdbError) - } -} - -#[derive(Debug)] -pub struct RwTransactionImpl<'t>(pub(crate) lmdb::RwTransaction<'t>); - -impl<'t> BackendRwTransaction for RwTransactionImpl<'t> { - type Database = DatabaseImpl; - type Error = ErrorImpl; - type Flags = WriteFlagsImpl; - - fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> { - self.0.get(db.0, &key).map_err(ErrorImpl::LmdbError) - } - - fn put( - &mut self, - db: &Self::Database, - key: &[u8], - value: &[u8], - flags: Self::Flags, - ) -> Result<(), Self::Error> { - self.0 - .put(db.0, &key, &value, flags.0) - .map_err(ErrorImpl::LmdbError) - } - - #[cfg(not(feature = "db-dup-sort"))] - fn del(&mut self, db: &Self::Database, key: &[u8]) -> Result<(), Self::Error> { - self.0.del(db.0, &key, None).map_err(ErrorImpl::LmdbError) - } - - #[cfg(feature = "db-dup-sort")] - fn del( - &mut self, - db: &Self::Database, - key: &[u8], - value: Option<&[u8]>, - ) -> Result<(), Self::Error> { - self.0.del(db.0, &key, value).map_err(ErrorImpl::LmdbError) - } - - fn clear_db(&mut self, db: &Self::Database) -> Result<(), Self::Error> { - self.0.clear_db(db.0).map_err(ErrorImpl::LmdbError) - } - - fn commit(self) -> Result<(), Self::Error> { - self.0.commit().map_err(ErrorImpl::LmdbError) - } - - fn abort(self) { - self.0.abort() - } -} - -impl<'t> BackendRwCursorTransaction<'t> for RwTransactionImpl<'t> { - type RoCursor = RoCursorImpl<'t>; - - fn open_ro_cursor(&'t self, db: &Self::Database) -> Result { - self.0 - .open_ro_cursor(db.0) - .map(RoCursorImpl) - .map_err(ErrorImpl::LmdbError) - } -} diff --git a/src/bin/dump.rs b/src/bin/dump.rs deleted file mode 100644 index c0e12b9..0000000 --- a/src/bin/dump.rs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -use std::{env::args, io, path::Path}; - -use rkv::migrator::{LmdbArchMigrateError, LmdbArchMigrator}; - -fn main() -> Result<(), LmdbArchMigrateError> { - let mut cli_args = args(); - let mut db_name = None; - let mut env_path = None; - - // The first arg is the name of the program, which we can ignore. - cli_args.next(); - - while let Some(arg) = cli_args.next() { - if &arg[0..1] == "-" { - match &arg[1..] { - "s" => { - db_name = match cli_args.next() { - None => return Err("-s must be followed by database name".into()), - Some(str) => Some(str), - }; - } - str => return Err(format!("arg -{str} not recognized").into()), - } - } else { - if env_path.is_some() { - return Err("must provide only one path to the LMDB environment".into()); - } - env_path = Some(arg); - } - } - - let env_path = env_path.ok_or("must provide a path to the LMDB environment")?; - let mut migrator = LmdbArchMigrator::new(Path::new(&env_path))?; - migrator.dump(db_name.as_deref(), io::stdout()).unwrap(); - - Ok(()) -} diff --git a/src/bin/rand.rs b/src/bin/rand.rs deleted file mode 100644 index 56d578c..0000000 --- a/src/bin/rand.rs +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -//! A command-line utility to create an LMDB environment containing random data. -//! It requires one flag, `-s path/to/environment`, which specifies the location -//! where the tool should create the environment. Optionally, you may specify -//! the number of key/value pairs to create via the `-n ` flag -//! (for which the default value is 50). - -use std::{env::args, fs, fs::File, io::Read, path::Path}; - -use rkv::{ - backend::{BackendEnvironmentBuilder, Lmdb}, - Rkv, StoreOptions, Value, -}; - -fn main() { - let mut args = args(); - let mut database = None; - let mut path = None; - let mut num_pairs = 50; - - // The first arg is the name of the program, which we can ignore. - args.next(); - - while let Some(arg) = args.next() { - if &arg[0..1] == "-" { - match &arg[1..] { - "s" => { - database = match args.next() { - None => panic!("-s must be followed by database arg"), - Some(str) => Some(str), - }; - } - "n" => { - num_pairs = match args.next() { - None => panic!("-s must be followed by number of pairs"), - Some(str) => str.parse().expect("number"), - }; - } - str => panic!("arg -{} not recognized", str), - } - } else { - if path.is_some() { - panic!("must provide only one path to the LMDB environment"); - } - path = Some(arg); - } - } - - if path.is_none() { - panic!("must provide a path to the LMDB environment"); - } - - let path = path.unwrap(); - fs::create_dir_all(&path).expect("dir created"); - - let mut builder = Rkv::environment_builder::(); - builder.set_max_dbs(2); - // Allocate enough map to accommodate the largest random collection. - // We currently do this by allocating twice the maximum possible size - // of the pairs (assuming maximum key and value sizes). - builder.set_map_size((511 + 65535) * num_pairs * 2); - let rkv = Rkv::from_builder(Path::new(&path), builder).expect("Rkv"); - let store = rkv - .open_single(database.as_deref(), StoreOptions::create()) - .expect("opened"); - let mut writer = rkv.write().expect("writer"); - - // Generate random values for the number of keys and key/value lengths. - // On Linux, "Just use /dev/urandom!" . - // On macOS it doesn't matter (/dev/random and /dev/urandom are identical). - let mut random = File::open("/dev/urandom").unwrap(); - let mut nums = [0u8; 4]; - random.read_exact(&mut nums).unwrap(); - - // Generate 0–255 pairs. - for _ in 0..num_pairs { - // Generate key and value lengths. The key must be 1–511 bytes long. - // The value length can be 0 and is essentially unbounded; we generate - // value lengths of 0–0xffff (65535). - // NB: the modulus method for generating a random number within a range - // introduces distribution skew, but we don't need it to be perfect. - let key_len = ((u16::from(nums[0]) + (u16::from(nums[1]) << 8)) % 511 + 1) as usize; - let value_len = (u16::from(nums[2]) + (u16::from(nums[3]) << 8)) as usize; - - let mut key: Vec = vec![0; key_len]; - random.read_exact(&mut key[0..key_len]).unwrap(); - - let mut value: Vec = vec![0; value_len]; - random.read_exact(&mut value[0..value_len]).unwrap(); - - store - .put(&mut writer, key, &Value::Blob(&value)) - .expect("wrote"); - } - - writer.commit().expect("committed"); -} diff --git a/src/env.rs b/src/env.rs index 32e4fe5..c5f3942 100644 --- a/src/env.rs +++ b/src/env.rs @@ -174,10 +174,6 @@ where self.env .create_db(name.into(), opts.flags) .map_err(|e| match e.into() { - #[cfg(feature = "lmdb")] - StoreError::LmdbError(lmdb::Error::BadRslot) => { - StoreError::open_during_transaction() - } StoreError::SafeModeError(SafeModeError::DbsIllegalOpen) => { StoreError::open_during_transaction() } @@ -185,10 +181,6 @@ where }) } else { self.env.open_db(name.into()).map_err(|e| match e.into() { - #[cfg(feature = "lmdb")] - StoreError::LmdbError(lmdb::Error::BadRslot) => { - StoreError::open_during_transaction() - } StoreError::SafeModeError(SafeModeError::DbsIllegalOpen) => { StoreError::open_during_transaction() } diff --git a/src/error.rs b/src/error.rs index f50dda7..6f3fbe3 100644 --- a/src/error.rs +++ b/src/error.rs @@ -74,10 +74,6 @@ pub enum StoreError { #[error("data error: {0:?}")] DataError(#[from] DataError), - #[cfg(feature = "lmdb")] - #[error("lmdb backend error: {0}")] - LmdbError(lmdb::Error), - #[error("safe mode backend error: {0}")] SafeModeError(SafeModeError), diff --git a/src/lib.rs b/src/lib.rs index 0c8951e..b78d906 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -209,8 +209,6 @@ mod manager; mod readwrite; pub mod backend; -#[cfg(feature = "lmdb")] -pub mod migrator; pub mod store; pub mod value; @@ -218,8 +216,6 @@ pub use backend::{DatabaseFlags, EnvironmentFlags, WriteFlags}; pub use env::Rkv; pub use error::{DataError, MigrateError, StoreError}; pub use manager::Manager; -#[cfg(feature = "lmdb")] -pub use migrator::Migrator; pub use readwrite::{Readable, Reader, Writer}; pub use store::{keys::EncodableKey, single::SingleStore, CloseOptions, Options as StoreOptions}; pub use value::{OwnedValue, Value}; diff --git a/src/manager.rs b/src/manager.rs index 0455644..3194506 100644 --- a/src/manager.rs +++ b/src/manager.rs @@ -18,8 +18,6 @@ use std::{ use lazy_static::lazy_static; -#[cfg(feature = "lmdb")] -use crate::backend::LmdbEnvironment; use crate::{ backend::{BackendEnvironment, BackendEnvironmentBuilder, SafeModeEnvironment}, error::{CloseError, StoreError}, @@ -32,11 +30,6 @@ type Result = result::Result; type CloseResult = result::Result; type SharedRkv = Arc>>; -#[cfg(feature = "lmdb")] -lazy_static! { - static ref MANAGER_LMDB: RwLock> = RwLock::new(Manager::new()); -} - lazy_static! { static ref MANAGER_SAFE_MODE: RwLock> = RwLock::new(Manager::new()); @@ -177,13 +170,6 @@ where } } -#[cfg(feature = "lmdb")] -impl Manager { - pub fn singleton() -> &'static RwLock> { - &MANAGER_LMDB - } -} - impl Manager { pub fn singleton() -> &'static RwLock> { &MANAGER_SAFE_MODE @@ -199,14 +185,12 @@ mod tests { use tempfile::Builder; - #[cfg(feature = "lmdb")] - use backend::Lmdb; + use backend::SafeMode; /// Test that one can mutate managed Rkv instances in surprising ways. - #[cfg(feature = "lmdb")] #[test] - fn test_mutate_managed_rkv() { - let mut manager = Manager::::new(); + fn test_mutate_managed_rkv_safemode() { + let mut manager = Manager::::new(); let root1 = Builder::new() .prefix("test_mutate_managed_rkv_1") @@ -215,7 +199,7 @@ mod tests { fs::create_dir_all(root1.path()).expect("dir created"); let path1 = root1.path(); let arc = manager - .get_or_create(path1, Rkv::new::) + .get_or_create(path1, Rkv::new::) .expect("created"); // Arc> has interior mutability, so we can replace arc's Rkv instance with a new @@ -228,7 +212,7 @@ mod tests { let path2 = root2.path(); { let mut rkv = arc.write().expect("guard"); - let rkv2 = Rkv::new::(path2).expect("Rkv"); + let rkv2 = Rkv::new::(path2).expect("Rkv"); *rkv = rkv2; } @@ -240,7 +224,7 @@ mod tests { // Meanwhile, a new Arc for path2 has a different pointer, even though its Rkv's path is // the same as arc's current path. let path2_arc = manager - .get_or_create(path2, Rkv::new::) + .get_or_create(path2, Rkv::new::) .expect("success"); assert!(!Arc::ptr_eq(&path2_arc, &arc)); } diff --git a/tests/env-all.rs b/tests/env-all.rs deleted file mode 100644 index a8d93c7..0000000 --- a/tests/env-all.rs +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -use std::fs; - -use tempfile::Builder; - -use rkv::{ - backend::{Lmdb, SafeMode}, - Rkv, StoreOptions, Value, -}; - -#[test] -fn test_open_safe_same_dir_as_lmdb() { - let root = Builder::new() - .prefix("test_open_safe_same_dir_as_lmdb") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - // Create database of type A and save to disk. - { - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo", &Value::I64(1234)) - .expect("wrote"); - sk.put(&mut writer, "bar", &Value::Bool(true)) - .expect("wrote"); - sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")) - .expect("wrote"); - assert_eq!( - sk.get(&writer, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - sk.get(&writer, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - sk.get(&writer, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - writer.commit().expect("committed"); - k.sync(true).expect("synced"); - } - // Verify that database of type A was written to disk. - { - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k - .open_single("sk", StoreOptions::default()) - .expect("opened"); - - let reader = k.read().expect("reader"); - assert_eq!( - sk.get(&reader, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - sk.get(&reader, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - sk.get(&reader, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - } - // Create database of type B and verify that it is empty. - { - let k = Rkv::new::(root.path()).expect("new succeeded"); - let _ = k - .open_single("sk", StoreOptions::default()) - .expect_err("not opened"); - } - // Verify that database of type A wasn't changed. - { - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k - .open_single("sk", StoreOptions::default()) - .expect("opened"); - - let reader = k.read().expect("reader"); - assert_eq!( - sk.get(&reader, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - sk.get(&reader, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - sk.get(&reader, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - } - // Create database of type B and save to disk (type A exists at the same path). - { - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo1", &Value::I64(5678)) - .expect("wrote"); - sk.put(&mut writer, "bar1", &Value::Bool(false)) - .expect("wrote"); - sk.put(&mut writer, "baz1", &Value::Str("héllo~ yöu")) - .expect("wrote"); - assert_eq!( - sk.get(&writer, "foo1").expect("read"), - Some(Value::I64(5678)) - ); - assert_eq!( - sk.get(&writer, "bar1").expect("read"), - Some(Value::Bool(false)) - ); - assert_eq!( - sk.get(&writer, "baz1").expect("read"), - Some(Value::Str("héllo~ yöu")) - ); - writer.commit().expect("committed"); - k.sync(true).expect("synced"); - } - // Verify that database of type B was written to disk. - { - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k - .open_single("sk", StoreOptions::default()) - .expect("opened"); - - let reader = k.read().expect("reader"); - assert_eq!( - sk.get(&reader, "foo1").expect("read"), - Some(Value::I64(5678)) - ); - assert_eq!( - sk.get(&reader, "bar1").expect("read"), - Some(Value::Bool(false)) - ); - assert_eq!( - sk.get(&reader, "baz1").expect("read"), - Some(Value::Str("héllo~ yöu")) - ); - } - // Verify that database of type A still wasn't changed. - { - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k - .open_single("sk", StoreOptions::default()) - .expect("opened"); - - let reader = k.read().expect("reader"); - assert_eq!( - sk.get(&reader, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - sk.get(&reader, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - sk.get(&reader, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - } -} - -#[test] -fn test_open_lmdb_same_dir_as_safe() { - let root = Builder::new() - .prefix("test_open_lmdb_same_dir_as_safe") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - // Create database of type A and save to disk. - { - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo", &Value::I64(1234)) - .expect("wrote"); - sk.put(&mut writer, "bar", &Value::Bool(true)) - .expect("wrote"); - sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")) - .expect("wrote"); - assert_eq!( - sk.get(&writer, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - sk.get(&writer, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - sk.get(&writer, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - writer.commit().expect("committed"); - k.sync(true).expect("synced"); - } - // Verify that database of type A was written to disk. - { - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k - .open_single("sk", StoreOptions::default()) - .expect("opened"); - - let reader = k.read().expect("reader"); - assert_eq!( - sk.get(&reader, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - sk.get(&reader, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - sk.get(&reader, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - } - // Create database of type B and verify that it is empty. - { - let k = Rkv::new::(root.path()).expect("new succeeded"); - let _ = k - .open_single("sk", StoreOptions::default()) - .expect_err("not opened"); - } - // Verify that database of type A wasn't changed. - { - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k - .open_single("sk", StoreOptions::default()) - .expect("opened"); - - let reader = k.read().expect("reader"); - assert_eq!( - sk.get(&reader, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - sk.get(&reader, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - sk.get(&reader, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - } - // Create database of type B and save to disk (type A exists at the same path). - { - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo1", &Value::I64(5678)) - .expect("wrote"); - sk.put(&mut writer, "bar1", &Value::Bool(false)) - .expect("wrote"); - sk.put(&mut writer, "baz1", &Value::Str("héllo~ yöu")) - .expect("wrote"); - assert_eq!( - sk.get(&writer, "foo1").expect("read"), - Some(Value::I64(5678)) - ); - assert_eq!( - sk.get(&writer, "bar1").expect("read"), - Some(Value::Bool(false)) - ); - assert_eq!( - sk.get(&writer, "baz1").expect("read"), - Some(Value::Str("héllo~ yöu")) - ); - writer.commit().expect("committed"); - k.sync(true).expect("synced"); - } - // Verify that database of type B was written to disk. - { - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k - .open_single("sk", StoreOptions::default()) - .expect("opened"); - - let reader = k.read().expect("reader"); - assert_eq!( - sk.get(&reader, "foo1").expect("read"), - Some(Value::I64(5678)) - ); - assert_eq!( - sk.get(&reader, "bar1").expect("read"), - Some(Value::Bool(false)) - ); - assert_eq!( - sk.get(&reader, "baz1").expect("read"), - Some(Value::Str("héllo~ yöu")) - ); - } - // Verify that database of type A still wasn't changed. - { - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k - .open_single("sk", StoreOptions::default()) - .expect("opened"); - - let reader = k.read().expect("reader"); - assert_eq!( - sk.get(&reader, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - sk.get(&reader, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - sk.get(&reader, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - } -} diff --git a/tests/env-lmdb.rs b/tests/env-lmdb.rs deleted file mode 100644 index e3aca39..0000000 --- a/tests/env-lmdb.rs +++ /dev/null @@ -1,1647 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -// TODO: change this back to `clippy::cognitive_complexity` when Clippy stable -// deprecates `clippy::cyclomatic_complexity`. -#![allow(clippy::complexity)] - -use std::{ - fs, - path::Path, - str, - sync::{Arc, RwLock}, - thread, -}; - -use byteorder::{ByteOrder, LittleEndian}; -use tempfile::Builder; - -use rkv::{ - backend::{ - BackendEnvironmentBuilder, BackendInfo, BackendStat, Lmdb, LmdbDatabase, LmdbEnvironment, - LmdbRwTransaction, - }, - EnvironmentFlags, Rkv, SingleStore, StoreError, StoreOptions, Value, Writer, -}; - -fn check_rkv(k: &Rkv) { - let _ = k - .open_single(None, StoreOptions::create()) - .expect("created default"); - - let s = k.open_single("s", StoreOptions::create()).expect("opened"); - let reader = k.read().expect("reader"); - - let result = s.get(&reader, "foo"); - assert_eq!(None, result.expect("success but no value")); -} - -// The default size is 1MB. -const DEFAULT_SIZE: usize = 1024 * 1024; - -/// We can't open a directory that doesn't exist. -#[test] -fn test_open_fails() { - let root = Builder::new() - .prefix("test_open_fails") - .tempdir() - .expect("tempdir"); - assert!(root.path().exists()); - - let nope = root.path().join("nope/"); - assert!(!nope.exists()); - - let pb = nope.to_path_buf(); - match Rkv::new::(nope.as_path()).err() { - Some(StoreError::UnsuitableEnvironmentPath(p)) => { - assert_eq!(pb, p); - } - _ => panic!("expected error"), - }; -} - -#[test] -fn test_open() { - let root = Builder::new() - .prefix("test_open") - .tempdir() - .expect("tempdir"); - println!("Root path: {:?}", root.path()); - fs::create_dir_all(root.path()).expect("dir created"); - assert!(root.path().is_dir()); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - check_rkv(&k); -} - -#[test] -fn test_open_from_builder() { - let root = Builder::new() - .prefix("test_open_from_builder") - .tempdir() - .expect("tempdir"); - println!("Root path: {:?}", root.path()); - fs::create_dir_all(root.path()).expect("dir created"); - assert!(root.path().is_dir()); - - let mut builder = Rkv::environment_builder::(); - builder.set_max_dbs(2); - - let k = Rkv::from_builder(root.path(), builder).expect("rkv"); - check_rkv(&k); -} - -#[test] -fn test_open_from_builder_with_no_subdir_1() { - let root = Builder::new() - .prefix("test_open_from_builder") - .tempdir() - .expect("tempdir"); - println!("Root path: {:?}", root.path()); - fs::create_dir_all(root.path()).expect("dir created"); - assert!(root.path().is_dir()); - - { - let mut builder = Rkv::environment_builder::(); - builder.set_max_dbs(2); - - let k = Rkv::from_builder(root.path(), builder).expect("rkv"); - check_rkv(&k); - } - { - let mut builder = Rkv::environment_builder::(); - builder.set_flags(EnvironmentFlags::NO_SUB_DIR); - builder.set_max_dbs(2); - - let mut datamdb = root.path().to_path_buf(); - datamdb.push("data.mdb"); - - let k = Rkv::from_builder(&datamdb, builder).expect("rkv"); - check_rkv(&k); - } -} - -#[test] -#[should_panic(expected = "rkv: UnsuitableEnvironmentPath")] -fn test_open_from_builder_with_no_subdir_2() { - let root = Builder::new() - .prefix("test_open_from_builder") - .tempdir() - .expect("tempdir"); - println!("Root path: {:?}", root.path()); - fs::create_dir_all(root.path()).expect("dir created"); - assert!(root.path().is_dir()); - - { - let mut builder = Rkv::environment_builder::(); - builder.set_max_dbs(2); - - let k = Rkv::from_builder(root.path(), builder).expect("rkv"); - check_rkv(&k); - } - { - let mut builder = Rkv::environment_builder::(); - builder.set_flags(EnvironmentFlags::NO_SUB_DIR); - builder.set_max_dbs(2); - - let mut datamdb = root.path().to_path_buf(); - datamdb.push("bogus.mdb"); - - let k = Rkv::from_builder(&datamdb, builder).expect("rkv"); - check_rkv(&k); - } -} - -#[test] -fn test_open_from_builder_with_dir_1() { - let root = Builder::new() - .prefix("test_open_from_builder") - .tempdir() - .expect("tempdir"); - println!("Root path: {:?}", root.path()); - - let mut builder = Rkv::environment_builder::(); - builder.set_max_dbs(2); - builder.set_make_dir_if_needed(true); - - let k = Rkv::from_builder(root.path(), builder).expect("rkv"); - check_rkv(&k); -} - -#[test] -#[should_panic(expected = "rkv: UnsuitableEnvironmentPath(\"bogus\")")] -fn test_open_from_builder_with_dir_2() { - let root = Path::new("bogus"); - println!("Root path: {root:?}"); - assert!(!root.is_dir()); - - let mut builder = Rkv::environment_builder::(); - builder.set_max_dbs(2); - - let k = Rkv::from_builder(root, builder).expect("rkv"); - check_rkv(&k); -} - -#[test] -#[should_panic(expected = "opened: DbsFull")] -fn test_create_with_capacity_1() { - let root = Builder::new() - .prefix("test_create_with_capacity") - .tempdir() - .expect("tempdir"); - println!("Root path: {:?}", root.path()); - fs::create_dir_all(root.path()).expect("dir created"); - assert!(root.path().is_dir()); - - let k = Rkv::with_capacity::(root.path(), 1).expect("rkv"); - check_rkv(&k); - - // This errors with "opened: DbsFull" because we specified a capacity of one (database), - // and check_rkv already opened one (plus the default database, which doesn't count - // against the limit). - let _zzz = k - .open_single("zzz", StoreOptions::create()) - .expect("opened"); -} - -#[test] -fn test_create_with_capacity_2() { - let root = Builder::new() - .prefix("test_create_with_capacity") - .tempdir() - .expect("tempdir"); - println!("Root path: {:?}", root.path()); - fs::create_dir_all(root.path()).expect("dir created"); - assert!(root.path().is_dir()); - - let k = Rkv::with_capacity::(root.path(), 1).expect("rkv"); - check_rkv(&k); - - // This doesn't error with "opened: DbsFull" with because even though we specified a - // capacity of one (database), and check_rkv already opened one, the default database - // doesn't count against the limit. - let _zzz = k.open_single(None, StoreOptions::create()).expect("opened"); -} - -#[test] -#[should_panic(expected = "opened: DbsFull")] -fn test_open_with_capacity_1() { - let root = Builder::new() - .prefix("test_open_with_capacity") - .tempdir() - .expect("tempdir"); - println!("Root path: {:?}", root.path()); - fs::create_dir_all(root.path()).expect("dir created"); - assert!(root.path().is_dir()); - - let k = Rkv::with_capacity::(root.path(), 1).expect("rkv"); - check_rkv(&k); - - let _zzz = k - .open_single("zzz", StoreOptions::default()) - .expect("opened"); -} - -#[test] -fn test_open_with_capacity_2() { - let root = Builder::new() - .prefix("test_open_with_capacity") - .tempdir() - .expect("tempdir"); - println!("Root path: {:?}", root.path()); - fs::create_dir_all(root.path()).expect("dir created"); - assert!(root.path().is_dir()); - - let k = Rkv::with_capacity::(root.path(), 1).expect("rkv"); - check_rkv(&k); - - let _zzz = k - .open_single(None, StoreOptions::default()) - .expect("opened"); -} - -#[test] -fn test_list_dbs_1() { - let root = Builder::new() - .prefix("test_list_dbs") - .tempdir() - .expect("tempdir"); - println!("Root path: {:?}", root.path()); - fs::create_dir_all(root.path()).expect("dir created"); - assert!(root.path().is_dir()); - - let k = Rkv::with_capacity::(root.path(), 1).expect("rkv"); - check_rkv(&k); - - let dbs = k.get_dbs().unwrap(); - assert_eq!(dbs, vec![Some("s".to_owned())]); -} - -#[test] -fn test_list_dbs_2() { - let root = Builder::new() - .prefix("test_list_dbs") - .tempdir() - .expect("tempdir"); - println!("Root path: {:?}", root.path()); - fs::create_dir_all(root.path()).expect("dir created"); - assert!(root.path().is_dir()); - - let k = Rkv::with_capacity::(root.path(), 2).expect("rkv"); - check_rkv(&k); - - let _ = k - .open_single("zzz", StoreOptions::create()) - .expect("opened"); - - let dbs = k.get_dbs().unwrap(); - assert_eq!(dbs, vec![Some("s".to_owned()), Some("zzz".to_owned())]); -} - -#[test] -fn test_list_dbs_3() { - let root = Builder::new() - .prefix("test_list_dbs") - .tempdir() - .expect("tempdir"); - println!("Root path: {:?}", root.path()); - fs::create_dir_all(root.path()).expect("dir created"); - assert!(root.path().is_dir()); - - let k = Rkv::with_capacity::(root.path(), 0).expect("rkv"); - - let _ = k.open_single(None, StoreOptions::create()).expect("opened"); - - let dbs = k.get_dbs().unwrap(); - assert_eq!(dbs, vec![None]); -} - -fn get_larger_than_default_map_size_value() -> usize { - // The LMDB C library and lmdb Rust crate docs for setting the map size - // - // - // both say that the default map size is 10,485,760 bytes, i.e. 10MiB. - // - // But the DEFAULT_MAPSIZE define in the LMDB code - // https://github.com/LMDB/lmdb/blob/26c7df88e44e31623d0802a564f24781acdefde3/libraries/liblmdb/mdb.c#L729 - // sets the default map size to 1,048,576 bytes, i.e. 1MiB. - // - DEFAULT_SIZE + 1 /* 1,048,576 + 1 bytes, i.e. 1MiB + 1 byte */ -} - -#[test] -#[should_panic(expected = "wrote: MapFull")] -fn test_exceed_map_size() { - let root = Builder::new() - .prefix("test_exceed_map_size") - .tempdir() - .expect("tempdir"); - println!("Root path: {:?}", root.path()); - fs::create_dir_all(root.path()).expect("dir created"); - assert!(root.path().is_dir()); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k - .open_single("test", StoreOptions::create()) - .expect("opened"); - - // Writing a large enough value should cause LMDB to fail on MapFull. - // We write a string that is larger than the default map size. - let val = "x".repeat(get_larger_than_default_map_size_value()); - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo", &Value::Str(&val)) - .expect("wrote"); -} - -#[test] -#[should_panic(expected = "wrote: KeyValuePairBadSize")] -fn test_exceed_key_size_limit() { - let root = Builder::new() - .prefix("test_exceed_key_size_limit") - .tempdir() - .expect("tempdir"); - println!("Root path: {:?}", root.path()); - fs::create_dir_all(root.path()).expect("dir created"); - assert!(root.path().is_dir()); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k - .open_single("test", StoreOptions::create()) - .expect("opened"); - - let key = "k".repeat(512); - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, key, &Value::Str("val")).expect("wrote"); -} - -#[test] -fn test_increase_map_size() { - let root = Builder::new() - .prefix("test_open_with_map_size") - .tempdir() - .expect("tempdir"); - println!("Root path: {:?}", root.path()); - fs::create_dir_all(root.path()).expect("dir created"); - assert!(root.path().is_dir()); - - let mut builder = Rkv::environment_builder::(); - // Set the map size to the size of the value we'll store in it + 100KiB, - // which ensures that there's enough space for the value and metadata. - builder.set_map_size( - get_larger_than_default_map_size_value() + 100 * 1024, /* 100KiB */ - ); - builder.set_max_dbs(2); - let k = Rkv::from_builder(root.path(), builder).unwrap(); - let sk = k - .open_single("test", StoreOptions::create()) - .expect("opened"); - let val = "x".repeat(get_larger_than_default_map_size_value()); - - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo", &Value::Str(&val)) - .expect("wrote"); - writer.commit().expect("committed"); - - let reader = k.read().unwrap(); - assert_eq!( - sk.get(&reader, "foo").expect("read"), - Some(Value::Str(&val)) - ); -} - -#[test] -fn test_round_trip_and_transactions() { - let root = Builder::new() - .prefix("test_round_trip_and_transactions") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - { - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo", &Value::I64(1234)) - .expect("wrote"); - sk.put(&mut writer, "noo", &Value::F64(1234.0.into())) - .expect("wrote"); - sk.put(&mut writer, "bar", &Value::Bool(true)) - .expect("wrote"); - sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")) - .expect("wrote"); - assert_eq!( - sk.get(&writer, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - sk.get(&writer, "noo").expect("read"), - Some(Value::F64(1234.0.into())) - ); - assert_eq!( - sk.get(&writer, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - sk.get(&writer, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - - // Isolation. Reads won't return values. - let r = &k.read().unwrap(); - assert_eq!(sk.get(r, "foo").expect("read"), None); - assert_eq!(sk.get(r, "bar").expect("read"), None); - assert_eq!(sk.get(r, "baz").expect("read"), None); - } - - // Dropped: tx rollback. Reads will still return nothing. - - { - let r = &k.read().unwrap(); - assert_eq!(sk.get(r, "foo").expect("read"), None); - assert_eq!(sk.get(r, "bar").expect("read"), None); - assert_eq!(sk.get(r, "baz").expect("read"), None); - } - - { - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo", &Value::I64(1234)) - .expect("wrote"); - sk.put(&mut writer, "bar", &Value::Bool(true)) - .expect("wrote"); - sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")) - .expect("wrote"); - assert_eq!( - sk.get(&writer, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - sk.get(&writer, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - sk.get(&writer, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - - writer.commit().expect("committed"); - } - - // Committed. Reads will succeed. - - { - let r = k.read().unwrap(); - assert_eq!(sk.get(&r, "foo").expect("read"), Some(Value::I64(1234))); - assert_eq!(sk.get(&r, "bar").expect("read"), Some(Value::Bool(true))); - assert_eq!( - sk.get(&r, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - } - - { - let mut writer = k.write().expect("writer"); - sk.delete(&mut writer, "foo").expect("deleted"); - sk.delete(&mut writer, "bar").expect("deleted"); - sk.delete(&mut writer, "baz").expect("deleted"); - assert_eq!(sk.get(&writer, "foo").expect("read"), None); - assert_eq!(sk.get(&writer, "bar").expect("read"), None); - assert_eq!(sk.get(&writer, "baz").expect("read"), None); - - // Isolation. Reads still return values. - let r = k.read().unwrap(); - assert_eq!(sk.get(&r, "foo").expect("read"), Some(Value::I64(1234))); - assert_eq!(sk.get(&r, "bar").expect("read"), Some(Value::Bool(true))); - assert_eq!( - sk.get(&r, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - } - - // Dropped: tx rollback. Reads will still return values. - - { - let r = k.read().unwrap(); - assert_eq!(sk.get(&r, "foo").expect("read"), Some(Value::I64(1234))); - assert_eq!(sk.get(&r, "bar").expect("read"), Some(Value::Bool(true))); - assert_eq!( - sk.get(&r, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - } - - { - let mut writer = k.write().expect("writer"); - sk.delete(&mut writer, "foo").expect("deleted"); - sk.delete(&mut writer, "bar").expect("deleted"); - sk.delete(&mut writer, "baz").expect("deleted"); - assert_eq!(sk.get(&writer, "foo").expect("read"), None); - assert_eq!(sk.get(&writer, "bar").expect("read"), None); - assert_eq!(sk.get(&writer, "baz").expect("read"), None); - - writer.commit().expect("committed"); - } - - // Committed. Reads will succeed but return None to indicate a missing value. - - { - let r = k.read().unwrap(); - assert_eq!(sk.get(&r, "foo").expect("read"), None); - assert_eq!(sk.get(&r, "bar").expect("read"), None); - assert_eq!(sk.get(&r, "baz").expect("read"), None); - } -} - -#[test] -fn test_single_store_clear() { - let root = Builder::new() - .prefix("test_single_store_clear") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - { - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo", &Value::I64(1234)) - .expect("wrote"); - sk.put(&mut writer, "bar", &Value::Bool(true)) - .expect("wrote"); - sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")) - .expect("wrote"); - writer.commit().expect("committed"); - } - - { - let mut writer = k.write().expect("writer"); - sk.clear(&mut writer).expect("cleared"); - writer.commit().expect("committed"); - } - - { - let r = k.read().unwrap(); - let iter = sk.iter_start(&r).expect("iter"); - assert_eq!(iter.count(), 0); - } -} - -#[test] -#[should_panic(expected = "KeyValuePairNotFound")] -fn test_single_store_delete_nonexistent() { - let root = Builder::new() - .prefix("test_single_store_delete_nonexistent") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - let mut writer = k.write().expect("writer"); - sk.delete(&mut writer, "bogus").unwrap(); -} - -#[test] -#[cfg(feature = "db-dup-sort")] -fn test_multi_put_get_del() { - let root = Builder::new() - .prefix("test_multi_put_get_del") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let multistore = k.open_multi("multistore", StoreOptions::create()).unwrap(); - - let mut writer = k.write().unwrap(); - multistore - .put(&mut writer, "str1", &Value::Str("str1 foo")) - .unwrap(); - multistore - .put(&mut writer, "str1", &Value::Str("str1 bar")) - .unwrap(); - multistore - .put(&mut writer, "str2", &Value::Str("str2 foo")) - .unwrap(); - multistore - .put(&mut writer, "str2", &Value::Str("str2 bar")) - .unwrap(); - multistore - .put(&mut writer, "str3", &Value::Str("str3 foo")) - .unwrap(); - multistore - .put(&mut writer, "str3", &Value::Str("str3 bar")) - .unwrap(); - writer.commit().unwrap(); - - let writer = k.write().unwrap(); - { - let mut iter = multistore.get(&writer, "str1").unwrap(); - let (id, val) = iter.next().unwrap().unwrap(); - assert_eq!((id, val), (&b"str1"[..], Value::Str("str1 bar"))); - let (id, val) = iter.next().unwrap().unwrap(); - assert_eq!((id, val), (&b"str1"[..], Value::Str("str1 foo"))); - } - writer.commit().unwrap(); - - let mut writer = k.write().unwrap(); - multistore - .delete(&mut writer, "str1", &Value::Str("str1 foo")) - .unwrap(); - assert_eq!( - multistore.get_first(&writer, "str1").unwrap(), - Some(Value::Str("str1 bar")) - ); - multistore - .delete(&mut writer, "str2", &Value::Str("str2 bar")) - .unwrap(); - assert_eq!( - multistore.get_first(&writer, "str2").unwrap(), - Some(Value::Str("str2 foo")) - ); - multistore.delete_all(&mut writer, "str3").unwrap(); - assert_eq!(multistore.get_first(&writer, "str3").unwrap(), None); - writer.commit().unwrap(); -} - -#[test] -#[cfg(feature = "db-dup-sort")] -fn test_multiple_store_clear() { - let root = Builder::new() - .prefix("test_multiple_store_clear") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let multistore = k - .open_multi("multistore", StoreOptions::create()) - .expect("opened"); - - { - let mut writer = k.write().expect("writer"); - multistore - .put(&mut writer, "str1", &Value::Str("str1 foo")) - .unwrap(); - multistore - .put(&mut writer, "str1", &Value::Str("str1 bar")) - .unwrap(); - multistore - .put(&mut writer, "str2", &Value::Str("str2 foo")) - .unwrap(); - multistore - .put(&mut writer, "str2", &Value::Str("str2 bar")) - .unwrap(); - multistore - .put(&mut writer, "str3", &Value::Str("str3 foo")) - .unwrap(); - multistore - .put(&mut writer, "str3", &Value::Str("str3 bar")) - .unwrap(); - writer.commit().expect("committed"); - } - - { - let mut writer = k.write().expect("writer"); - multistore.clear(&mut writer).expect("cleared"); - writer.commit().expect("committed"); - } - - { - let r = k.read().unwrap(); - assert_eq!(multistore.get_first(&r, "str1").expect("read"), None); - assert_eq!(multistore.get_first(&r, "str2").expect("read"), None); - assert_eq!(multistore.get_first(&r, "str3").expect("read"), None); - } -} - -#[test] -fn test_open_store_for_read() { - let root = Builder::new() - .prefix("test_open_store_for_read") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - - // First create the store, and start a write transaction on it. - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo", &Value::Str("bar")) - .expect("write"); - - // Open the same store for read, note that the write transaction is still in progress, - // it should not block the reader though. - let sk_readonly = k - .open_single("sk", StoreOptions::default()) - .expect("opened"); - writer.commit().expect("commit"); - - // Now the write transaction is committed, any followed reads should see its change. - let reader = k.read().expect("reader"); - assert_eq!( - sk_readonly.get(&reader, "foo").expect("read"), - Some(Value::Str("bar")) - ); -} - -#[test] -#[should_panic(expected = "open a missing store")] -fn test_open_a_missing_store() { - let root = Builder::new() - .prefix("test_open_a_missing_store") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let _sk = k - .open_single("sk", StoreOptions::default()) - .expect("open a missing store"); -} - -#[test] -#[should_panic(expected = "new failed: FileInvalid")] -fn test_open_a_broken_store() { - let root = Builder::new() - .prefix("test_open_a_missing_store") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let dbfile = root.path().join("data.mdb"); - fs::write(dbfile, "bogus").expect("dbfile created"); - - let _ = Rkv::new::(root.path()).expect("new failed"); -} - -#[test] -fn test_open_fail_with_badrslot() { - let root = Builder::new() - .prefix("test_open_fail_with_badrslot") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - - // First create the store - let _sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - // Open a reader on this store - let _reader = k.read().expect("reader"); - - // Open the same store for read while the reader is in progress will panic - let store = k.open_single("sk", StoreOptions::default()); - match store { - Err(StoreError::OpenAttemptedDuringTransaction(_thread_id)) => (), - _ => panic!("should panic"), - } -} - -#[test] -fn test_read_before_write_num() { - let root = Builder::new() - .prefix("test_read_before_write_num") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - // Test reading a number, modifying it, and then writing it back. - // We have to be done with the Value::I64 before calling Writer::put, - // as the Value::I64 borrows an immutable reference to the Writer. - // So we extract and copy its primitive value. - - fn get_existing_foo( - store: SingleStore, - writer: &Writer, - ) -> Option { - match store.get(writer, "foo").expect("read") { - Some(Value::I64(val)) => Some(val), - _ => None, - } - } - - let mut writer = k.write().expect("writer"); - let mut existing = get_existing_foo(sk, &writer).unwrap_or(99); - existing += 1; - sk.put(&mut writer, "foo", &Value::I64(existing)) - .expect("success"); - - let updated = get_existing_foo(sk, &writer).unwrap_or(99); - assert_eq!(updated, 100); - writer.commit().expect("commit"); -} - -#[test] -fn test_read_before_write_str() { - let root = Builder::new() - .prefix("test_read_before_write_str") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - // Test reading a string, modifying it, and then writing it back. - // We have to be done with the Value::Str before calling Writer::put, - // as the Value::Str (and its underlying &str) borrows an immutable - // reference to the Writer. So we copy it to a String. - - fn get_existing_foo( - store: SingleStore, - writer: &Writer, - ) -> Option { - match store.get(writer, "foo").expect("read") { - Some(Value::Str(val)) => Some(val.to_string()), - _ => None, - } - } - - let mut writer = k.write().expect("writer"); - let mut existing = get_existing_foo(sk, &writer).unwrap_or_default(); - existing.push('…'); - sk.put(&mut writer, "foo", &Value::Str(&existing)) - .expect("write"); - - let updated = get_existing_foo(sk, &writer).unwrap_or_default(); - assert_eq!(updated, "…"); - writer.commit().expect("commit"); -} - -#[test] -fn test_concurrent_read_transactions_prohibited() { - let root = Builder::new() - .prefix("test_concurrent_reads_prohibited") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let _first = k.read().expect("reader"); - let second = k.read(); - - match second { - Err(StoreError::ReadTransactionAlreadyExists(t)) => { - println!("Thread was {t:?}"); - } - Err(e) => { - println!("Got error {e:?}"); - } - _ => { - panic!("Expected error."); - } - } -} - -#[test] -fn test_isolation() { - let root = Builder::new() - .prefix("test_isolation") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let s = k.open_single("s", StoreOptions::create()).expect("opened"); - - // Add one field. - { - let mut writer = k.write().expect("writer"); - s.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote"); - writer.commit().expect("committed"); - } - - { - let reader = k.read().unwrap(); - assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); - } - - // Establish a long-lived reader that outlasts a writer. - let reader = k.read().expect("reader"); - assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); - - // Start a write transaction. - let mut writer = k.write().expect("writer"); - s.put(&mut writer, "foo", &Value::I64(999)).expect("wrote"); - - // The reader and writer are isolated. - assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); - assert_eq!(s.get(&writer, "foo").expect("read"), Some(Value::I64(999))); - - // If we commit the writer, we still have isolation. - writer.commit().expect("committed"); - assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234))); - - // A new reader sees the committed value. Note that LMDB doesn't allow two - // read transactions to exist in the same thread, so we abort the previous one. - reader.abort(); - let reader = k.read().expect("reader"); - assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(999))); -} - -#[test] -fn test_blob() { - let root = Builder::new() - .prefix("test_round_trip_blob") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - let mut writer = k.write().expect("writer"); - assert_eq!(sk.get(&writer, "foo").expect("read"), None); - sk.put(&mut writer, "foo", &Value::Blob(&[1, 2, 3, 4])) - .expect("wrote"); - assert_eq!( - sk.get(&writer, "foo").expect("read"), - Some(Value::Blob(&[1, 2, 3, 4])) - ); - - fn u16_to_u8(src: &[u16]) -> Vec { - let mut dst = vec![0; 2 * src.len()]; - LittleEndian::write_u16_into(src, &mut dst); - dst - } - - fn u8_to_u16(src: &[u8]) -> Vec { - let mut dst = vec![0; src.len() / 2]; - LittleEndian::read_u16_into(src, &mut dst); - dst - } - - // When storing UTF-16 strings as blobs, we'll need to convert - // their [u16] backing storage to [u8]. Test that converting, writing, - // reading, and converting back works as expected. - let u16_array = [1000, 10000, 54321, 65535]; - assert_eq!(sk.get(&writer, "bar").expect("read"), None); - sk.put(&mut writer, "bar", &Value::Blob(&u16_to_u8(&u16_array))) - .expect("wrote"); - let u8_array = match sk.get(&writer, "bar").expect("read") { - Some(Value::Blob(val)) => val, - _ => &[], - }; - assert_eq!(u8_to_u16(u8_array), u16_array); -} - -#[test] -fn test_sync() { - let root = Builder::new() - .prefix("test_sync") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let mut builder = Rkv::environment_builder::(); - builder.set_max_dbs(1); - builder.set_flags(EnvironmentFlags::NO_SYNC); - { - let k = Rkv::from_builder(root.path(), builder).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - { - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo", &Value::I64(1234)) - .expect("wrote"); - writer.commit().expect("committed"); - k.sync(true).expect("synced"); - } - } - let k = Rkv::from_builder(root.path(), builder).expect("new succeeded"); - let sk = k - .open_single("sk", StoreOptions::default()) - .expect("opened"); - let reader = k.read().expect("reader"); - assert_eq!( - sk.get(&reader, "foo").expect("read"), - Some(Value::I64(1234)) - ); -} - -#[test] -#[cfg(feature = "db-int-key")] -fn test_stat() { - let root = Builder::new() - .prefix("test_stat") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - for i in 0..5 { - let sk = k - .open_integer(&format!("sk{i}")[..], StoreOptions::create()) - .expect("opened"); - { - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, i, &Value::I64(i64::from(i))) - .expect("wrote"); - writer.commit().expect("committed"); - } - } - assert_eq!(k.stat().expect("stat").depth(), 1); - assert_eq!(k.stat().expect("stat").entries(), 5); - assert_eq!(k.stat().expect("stat").branch_pages(), 0); - assert_eq!(k.stat().expect("stat").leaf_pages(), 1); -} - -#[test] -fn test_info() { - let root = Builder::new() - .prefix("test_info") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo", &Value::Str("bar")) - .expect("wrote"); - writer.commit().expect("commited"); - - let info = k.info().expect("info"); - - // The default size is 1MB. - assert_eq!(info.map_size(), DEFAULT_SIZE); - // Should greater than 0 after the write txn. - assert!(info.last_pgno() > 0); - // A txn to open_single + a txn to write. - assert_eq!(info.last_txnid(), 2); - // The default max readers is 126. - assert_eq!(info.max_readers(), 126); - assert_eq!(info.num_readers(), 0); - - // A new reader should increment the reader counter. - let _reader = k.read().expect("reader"); - let info = k.info().expect("info"); - - assert_eq!(info.num_readers(), 1); -} - -#[test] -fn test_load_ratio() { - let root = Builder::new() - .prefix("test_load_ratio") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo", &Value::Str("bar")) - .expect("wrote"); - writer.commit().expect("commited"); - let ratio = k.load_ratio().expect("ratio").unwrap(); - assert!(ratio > 0.0_f32 && ratio < 1.0_f32); - - // Put data to database should increase the load ratio. - let mut writer = k.write().expect("writer"); - sk.put( - &mut writer, - "bar", - &Value::Str(&"more-than-4KB".repeat(1000)), - ) - .expect("wrote"); - writer.commit().expect("commited"); - let new_ratio = k.load_ratio().expect("ratio").unwrap(); - assert!(new_ratio > ratio); - - // Clear the database so that all the used pages should go to freelist, hence the ratio - // should decrease. - let mut writer = k.write().expect("writer"); - sk.clear(&mut writer).expect("clear"); - writer.commit().expect("commited"); - let after_clear_ratio = k.load_ratio().expect("ratio").unwrap(); - assert!(after_clear_ratio < new_ratio); -} - -#[test] -fn test_set_map_size() { - let root = Builder::new() - .prefix("test_size_map_size") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - assert_eq!(k.info().expect("info").map_size(), DEFAULT_SIZE); - - k.set_map_size(2 * DEFAULT_SIZE).expect("resized"); - - // Should be able to write. - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo", &Value::Str("bar")) - .expect("wrote"); - writer.commit().expect("commited"); - - assert_eq!(k.info().expect("info").map_size(), 2 * DEFAULT_SIZE); -} - -#[test] -fn test_iter() { - let root = Builder::new() - .prefix("test_iter") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - // An iterator over an empty store returns no values. - { - let reader = k.read().unwrap(); - let mut iter = sk.iter_start(&reader).unwrap(); - assert!(iter.next().is_none()); - } - - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo", &Value::I64(1234)) - .expect("wrote"); - sk.put(&mut writer, "noo", &Value::F64(1234.0.into())) - .expect("wrote"); - sk.put(&mut writer, "bar", &Value::Bool(true)) - .expect("wrote"); - sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")) - .expect("wrote"); - sk.put(&mut writer, "héllò, töűrîst", &Value::Str("Emil.RuleZ!")) - .expect("wrote"); - sk.put(&mut writer, "你好,遊客", &Value::Str("米克規則")) - .expect("wrote"); - writer.commit().expect("committed"); - - let reader = k.read().unwrap(); - - // Reader.iter() returns (key, value) tuples ordered by key. - let mut iter = sk.iter_start(&reader).unwrap(); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "bar"); - assert_eq!(val, Value::Bool(true)); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "baz"); - assert_eq!(val, Value::Str("héllo, yöu")); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "foo"); - assert_eq!(val, Value::I64(1234)); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst"); - assert_eq!(val, Value::Str("Emil.RuleZ!")); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Value::F64(1234.0.into())); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Value::Str("米克規則")); - assert!(iter.next().is_none()); - - // Iterators don't loop. Once one returns None, additional calls - // to its next() method will always return None. - assert!(iter.next().is_none()); - - // Reader.iter_from() begins iteration at the first key equal to - // or greater than the given key. - let mut iter = sk.iter_from(&reader, "moo").unwrap(); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Value::F64(1234.0.into())); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Value::Str("米克規則")); - assert!(iter.next().is_none()); - - // Reader.iter_from() works as expected when the given key is a prefix - // of a key in the store. - let mut iter = sk.iter_from(&reader, "no").unwrap(); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Value::F64(1234.0.into())); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Value::Str("米克規則")); - assert!(iter.next().is_none()); -} - -#[test] -fn test_iter_from_key_greater_than_existing() { - let root = Builder::new() - .prefix("test_iter_from_key_greater_than_existing") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - let k = Rkv::new::(root.path()).expect("new succeeded"); - let sk = k.open_single("sk", StoreOptions::create()).expect("opened"); - - let mut writer = k.write().expect("writer"); - sk.put(&mut writer, "foo", &Value::I64(1234)) - .expect("wrote"); - sk.put(&mut writer, "noo", &Value::F64(1234.0.into())) - .expect("wrote"); - sk.put(&mut writer, "bar", &Value::Bool(true)) - .expect("wrote"); - sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")) - .expect("wrote"); - writer.commit().expect("committed"); - - let reader = k.read().unwrap(); - let mut iter = sk.iter_from(&reader, "nuu").unwrap(); - assert!(iter.next().is_none()); -} - -#[test] -fn test_multiple_store_read_write() { - let root = Builder::new() - .prefix("test_multiple_store_read_write") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let s1 = k - .open_single("store_1", StoreOptions::create()) - .expect("opened"); - let s2 = k - .open_single("store_2", StoreOptions::create()) - .expect("opened"); - let s3 = k - .open_single("store_3", StoreOptions::create()) - .expect("opened"); - - let mut writer = k.write().expect("writer"); - s1.put(&mut writer, "foo", &Value::Str("bar")) - .expect("wrote"); - s2.put(&mut writer, "foo", &Value::I64(123)).expect("wrote"); - s3.put(&mut writer, "foo", &Value::Bool(true)) - .expect("wrote"); - - assert_eq!( - s1.get(&writer, "foo").expect("read"), - Some(Value::Str("bar")) - ); - assert_eq!(s2.get(&writer, "foo").expect("read"), Some(Value::I64(123))); - assert_eq!( - s3.get(&writer, "foo").expect("read"), - Some(Value::Bool(true)) - ); - - writer.commit().expect("committed"); - - let reader = k.read().expect("unbound_reader"); - assert_eq!( - s1.get(&reader, "foo").expect("read"), - Some(Value::Str("bar")) - ); - assert_eq!(s2.get(&reader, "foo").expect("read"), Some(Value::I64(123))); - assert_eq!( - s3.get(&reader, "foo").expect("read"), - Some(Value::Bool(true)) - ); - reader.abort(); - - // test delete across multiple stores - let mut writer = k.write().expect("writer"); - s1.delete(&mut writer, "foo").expect("deleted"); - s2.delete(&mut writer, "foo").expect("deleted"); - s3.delete(&mut writer, "foo").expect("deleted"); - writer.commit().expect("committed"); - - let reader = k.read().expect("reader"); - assert_eq!(s1.get(&reader, "key").expect("value"), None); - assert_eq!(s2.get(&reader, "key").expect("value"), None); - assert_eq!(s3.get(&reader, "key").expect("value"), None); -} - -#[test] -fn test_multiple_store_iter() { - let root = Builder::new() - .prefix("test_multiple_store_iter") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let k = Rkv::new::(root.path()).expect("new succeeded"); - let s1 = k - .open_single("store_1", StoreOptions::create()) - .expect("opened"); - let s2 = k - .open_single("store_2", StoreOptions::create()) - .expect("opened"); - - let mut writer = k.write().expect("writer"); - // Write to "s1" - s1.put(&mut writer, "foo", &Value::I64(1234)) - .expect("wrote"); - s1.put(&mut writer, "noo", &Value::F64(1234.0.into())) - .expect("wrote"); - s1.put(&mut writer, "bar", &Value::Bool(true)) - .expect("wrote"); - s1.put(&mut writer, "baz", &Value::Str("héllo, yöu")) - .expect("wrote"); - s1.put(&mut writer, "héllò, töűrîst", &Value::Str("Emil.RuleZ!")) - .expect("wrote"); - s1.put(&mut writer, "你好,遊客", &Value::Str("米克規則")) - .expect("wrote"); - // &mut writer to "s2" - s2.put(&mut writer, "foo", &Value::I64(1234)) - .expect("wrote"); - s2.put(&mut writer, "noo", &Value::F64(1234.0.into())) - .expect("wrote"); - s2.put(&mut writer, "bar", &Value::Bool(true)) - .expect("wrote"); - s2.put(&mut writer, "baz", &Value::Str("héllo, yöu")) - .expect("wrote"); - s2.put(&mut writer, "héllò, töűrîst", &Value::Str("Emil.RuleZ!")) - .expect("wrote"); - s2.put(&mut writer, "你好,遊客", &Value::Str("米克規則")) - .expect("wrote"); - writer.commit().expect("committed"); - - let reader = k.read().unwrap(); - - // Iterate through the whole store in "s1" - let mut iter = s1.iter_start(&reader).unwrap(); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "bar"); - assert_eq!(val, Value::Bool(true)); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "baz"); - assert_eq!(val, Value::Str("héllo, yöu")); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "foo"); - assert_eq!(val, Value::I64(1234)); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst"); - assert_eq!(val, Value::Str("Emil.RuleZ!")); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Value::F64(1234.0.into())); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Value::Str("米克規則")); - assert!(iter.next().is_none()); - - // Iterate through the whole store in "s2" - let mut iter = s2.iter_start(&reader).unwrap(); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "bar"); - assert_eq!(val, Value::Bool(true)); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "baz"); - assert_eq!(val, Value::Str("héllo, yöu")); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "foo"); - assert_eq!(val, Value::I64(1234)); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst"); - assert_eq!(val, Value::Str("Emil.RuleZ!")); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Value::F64(1234.0.into())); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Value::Str("米克規則")); - assert!(iter.next().is_none()); - - // Iterate from a given key in "s1" - let mut iter = s1.iter_from(&reader, "moo").unwrap(); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Value::F64(1234.0.into())); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Value::Str("米克規則")); - assert!(iter.next().is_none()); - - // Iterate from a given key in "s2" - let mut iter = s2.iter_from(&reader, "moo").unwrap(); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Value::F64(1234.0.into())); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Value::Str("米克規則")); - assert!(iter.next().is_none()); - - // Iterate from a given prefix in "s1" - let mut iter = s1.iter_from(&reader, "no").unwrap(); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Value::F64(1234.0.into())); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Value::Str("米克規則")); - assert!(iter.next().is_none()); - - // Iterate from a given prefix in "s2" - let mut iter = s2.iter_from(&reader, "no").unwrap(); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "noo"); - assert_eq!(val, Value::F64(1234.0.into())); - let (key, val) = iter.next().unwrap().unwrap(); - assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客"); - assert_eq!(val, Value::Str("米克規則")); - assert!(iter.next().is_none()); -} - -#[test] -fn test_store_multiple_thread() { - let root = Builder::new() - .prefix("test_multiple_thread") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let rkv_arc = Arc::new(RwLock::new( - Rkv::new::(root.path()).expect("new succeeded"), - )); - let store = rkv_arc - .read() - .unwrap() - .open_single("test", StoreOptions::create()) - .expect("opened"); - - let num_threads = 10; - let mut write_handles = Vec::with_capacity(num_threads as usize); - let mut read_handles = Vec::with_capacity(num_threads as usize); - - // Note that this isn't intended to demonstrate a good use of threads. - // For this shape of data, it would be more performant to write/read - // all values using one transaction in a single thread. The point here - // is just to confirm that a store can be shared by multiple threads. - - // For each KV pair, spawn a thread that writes it to the store. - for i in 0..num_threads { - let rkv_arc = rkv_arc.clone(); - write_handles.push(thread::spawn(move || { - let rkv = rkv_arc.write().expect("rkv"); - let mut writer = rkv.write().expect("writer"); - store - .put(&mut writer, i.to_string(), &Value::U64(i)) - .expect("written"); - writer.commit().unwrap(); - })); - } - for handle in write_handles { - handle.join().expect("joined"); - } - - // For each KV pair, spawn a thread that reads it from the store - // and returns its value. - for i in 0..num_threads { - let rkv_arc = rkv_arc.clone(); - read_handles.push(thread::spawn(move || { - let rkv = rkv_arc.read().expect("rkv"); - let reader = rkv.read().expect("reader"); - let value = match store.get(&reader, i.to_string()) { - Ok(Some(Value::U64(value))) => value, - Ok(Some(_)) => panic!("value type unexpected"), - Ok(None) => panic!("value not found"), - Err(err) => panic!("{}", err), - }; - assert_eq!(value, i); - value - })); - } - - // Sum the values returned from the threads and confirm that they're - // equal to the sum of values written to the threads. - let thread_sum: u64 = read_handles - .into_iter() - .map(|handle| handle.join().expect("value")) - .sum(); - assert_eq!(thread_sum, (0..num_threads).sum()); -} - -#[test] -fn test_use_value_as_key() { - let root = Builder::new() - .prefix("test_use_value_as_key") - .tempdir() - .expect("tempdir"); - let rkv = Rkv::new::(root.path()).expect("new succeeded"); - let store = rkv - .open_single("store", StoreOptions::create()) - .expect("opened"); - - { - let mut writer = rkv.write().expect("writer"); - store - .put(&mut writer, "foo", &Value::Str("bar")) - .expect("wrote"); - store - .put(&mut writer, "bar", &Value::Str("baz")) - .expect("wrote"); - writer.commit().expect("committed"); - } - - // It's possible to retrieve a value with a Reader and then use it - // as a key with a Writer. - { - let reader = &rkv.read().unwrap(); - if let Some(Value::Str(key)) = store.get(reader, "foo").expect("read") { - let mut writer = rkv.write().expect("writer"); - store.delete(&mut writer, key).expect("deleted"); - writer.commit().expect("committed"); - } - } - - { - let mut writer = rkv.write().expect("writer"); - store - .put(&mut writer, "bar", &Value::Str("baz")) - .expect("wrote"); - writer.commit().expect("committed"); - } - - // You can also retrieve a Value with a Writer and then use it as a key - // with the same Writer if you copy the value to an owned type - // so the Writer isn't still being borrowed by the retrieved value - // when you try to borrow the Writer again to modify that value. - { - let mut writer = rkv.write().expect("writer"); - if let Some(Value::Str(value)) = store.get(&writer, "foo").expect("read") { - let key = value.to_owned(); - store.delete(&mut writer, key).expect("deleted"); - writer.commit().expect("committed"); - } - } - - { - let name1 = rkv - .open_single("name1", StoreOptions::create()) - .expect("opened"); - let name2 = rkv - .open_single("name2", StoreOptions::create()) - .expect("opened"); - let mut writer = rkv.write().expect("writer"); - name1 - .put(&mut writer, "key1", &Value::Str("bar")) - .expect("wrote"); - name1 - .put(&mut writer, "bar", &Value::Str("baz")) - .expect("wrote"); - name2 - .put(&mut writer, "key2", &Value::Str("bar")) - .expect("wrote"); - name2 - .put(&mut writer, "bar", &Value::Str("baz")) - .expect("wrote"); - writer.commit().expect("committed"); - } - - // You can also iterate (store, key) pairs to retrieve foreign keys, - // then iterate those foreign keys to modify/delete them. - // - // You need to open the stores in advance, since opening a store - // uses a write transaction internally, so opening them while a writer - // is extant will hang. - // - // And you need to copy the values to an owned type so the Writer isn't - // still being borrowed by a retrieved value when you try to borrow - // the Writer again to modify another value. - let fields = vec![ - ( - rkv.open_single("name1", StoreOptions::create()) - .expect("opened"), - "key1", - ), - ( - rkv.open_single("name2", StoreOptions::create()) - .expect("opened"), - "key2", - ), - ]; - { - let mut foreignkeys = Vec::new(); - let mut writer = rkv.write().expect("writer"); - for (store, key) in fields.iter() { - if let Some(Value::Str(value)) = store.get(&writer, key).expect("read") { - foreignkeys.push((store, value.to_owned())); - } - } - for (store, key) in foreignkeys.iter() { - store.delete(&mut writer, key).expect("deleted"); - } - writer.commit().expect("committed"); - } -} diff --git a/tests/env-migration.rs b/tests/env-migration.rs deleted file mode 100644 index 7026c25..0000000 --- a/tests/env-migration.rs +++ /dev/null @@ -1,607 +0,0 @@ -// Copyright 2018-2019 Mozilla -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -use std::{fs, path::Path}; - -use tempfile::Builder; - -use rkv::{ - backend::{Lmdb, LmdbEnvironment, SafeMode, SafeModeEnvironment}, - Manager, Migrator, Rkv, StoreOptions, Value, -}; - -macro_rules! populate_store { - ($env:expr) => { - let store = $env - .open_single("store", StoreOptions::create()) - .expect("opened"); - let mut writer = $env.write().expect("writer"); - store - .put(&mut writer, "foo", &Value::I64(1234)) - .expect("wrote"); - store - .put(&mut writer, "bar", &Value::Bool(true)) - .expect("wrote"); - store - .put(&mut writer, "baz", &Value::Str("héllo, yöu")) - .expect("wrote"); - writer.commit().expect("committed"); - }; -} - -#[test] -fn test_open_migrator_lmdb_to_safe() { - let root = Builder::new() - .prefix("test_open_migrator_lmdb_to_safe") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - // Populate source environment and persist to disk. - { - let src_env = Rkv::new::(root.path()).expect("new succeeded"); - populate_store!(&src_env); - src_env.sync(true).expect("synced"); - } - // Check if the files were written to disk. - { - let mut datamdb = root.path().to_path_buf(); - let mut lockmdb = root.path().to_path_buf(); - datamdb.push("data.mdb"); - lockmdb.push("lock.mdb"); - assert!(datamdb.exists()); - assert!(lockmdb.exists()); - } - // Verify that database was written to disk. - { - let src_env = Rkv::new::(root.path()).expect("new succeeded"); - let store = src_env - .open_single("store", StoreOptions::default()) - .expect("opened"); - let reader = src_env.read().expect("reader"); - assert_eq!( - store.get(&reader, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - store.get(&reader, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - store.get(&reader, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - } - // Open and migrate. - { - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - Migrator::open_and_migrate_lmdb_to_safe_mode(root.path(), |builder| builder, &dst_env) - .expect("migrated"); - } - // Verify that the database was indeed migrated. - { - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - let store = dst_env - .open_single("store", StoreOptions::default()) - .expect("opened"); - let reader = dst_env.read().expect("reader"); - assert_eq!( - store.get(&reader, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - store.get(&reader, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - store.get(&reader, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - } - // Check if the old files were deleted from disk. - { - let mut datamdb = root.path().to_path_buf(); - let mut lockmdb = root.path().to_path_buf(); - datamdb.push("data.mdb"); - lockmdb.push("lock.mdb"); - assert!(!datamdb.exists()); - assert!(!lockmdb.exists()); - } -} - -#[test] -fn test_open_migrator_safe_to_lmdb() { - let root = Builder::new() - .prefix("test_open_migrator_safe_to_lmdb") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - // Populate source environment and persist to disk. - { - let src_env = Rkv::new::(root.path()).expect("new succeeded"); - populate_store!(&src_env); - src_env.sync(true).expect("synced"); - } - // Check if the files were written to disk. - { - let mut safebin = root.path().to_path_buf(); - safebin.push("data.safe.bin"); - assert!(safebin.exists()); - } - // Verify that database was written to disk. - { - let src_env = Rkv::new::(root.path()).expect("new succeeded"); - let store = src_env - .open_single("store", StoreOptions::default()) - .expect("opened"); - let reader = src_env.read().expect("reader"); - assert_eq!( - store.get(&reader, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - store.get(&reader, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - store.get(&reader, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - } - // Open and migrate. - { - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - Migrator::open_and_migrate_safe_mode_to_lmdb(root.path(), |builder| builder, &dst_env) - .expect("migrated"); - } - // Verify that the database was indeed migrated. - { - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - let store = dst_env - .open_single("store", StoreOptions::default()) - .expect("opened"); - let reader = dst_env.read().expect("reader"); - assert_eq!( - store.get(&reader, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - store.get(&reader, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - store.get(&reader, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - } - // Check if the old files were deleted from disk. - { - let mut safebin = root.path().to_path_buf(); - safebin.push("data.safe.bin"); - assert!(!safebin.exists()); - } -} - -#[test] -fn test_open_migrator_round_trip() { - let root = Builder::new() - .prefix("test_open_migrator_lmdb_to_safe") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - // Populate source environment and persist to disk. - { - let src_env = Rkv::new::(root.path()).expect("new succeeded"); - populate_store!(&src_env); - src_env.sync(true).expect("synced"); - } - // Open and migrate. - { - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - Migrator::open_and_migrate_lmdb_to_safe_mode(root.path(), |builder| builder, &dst_env) - .expect("migrated"); - } - // Open and migrate back. - { - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - Migrator::open_and_migrate_safe_mode_to_lmdb(root.path(), |builder| builder, &dst_env) - .expect("migrated"); - } - // Verify that the database was indeed migrated twice. - { - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - let store = dst_env - .open_single("store", StoreOptions::default()) - .expect("opened"); - let reader = dst_env.read().expect("reader"); - assert_eq!( - store.get(&reader, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - store.get(&reader, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - store.get(&reader, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); - } - // Check if the right files are finally present on disk. - { - let mut datamdb = root.path().to_path_buf(); - let mut lockmdb = root.path().to_path_buf(); - let mut safebin = root.path().to_path_buf(); - datamdb.push("data.mdb"); - lockmdb.push("lock.mdb"); - safebin.push("data.safe.bin"); - assert!(datamdb.exists()); - assert!(lockmdb.exists()); - assert!(!safebin.exists()); - } -} - -#[test] -fn test_easy_migrator_no_dir_1() { - let root = Builder::new() - .prefix("test_easy_migrator_no_dir") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - // This won't fail with IoError even though the path is a bogus path, because this - // is the "easy mode" migration which automatically handles (ignores) this error. - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - Migrator::easy_migrate_lmdb_to_safe_mode(Path::new("bogus"), &dst_env).expect("migrated"); - - let mut datamdb = root.path().to_path_buf(); - let mut lockmdb = root.path().to_path_buf(); - let mut safebin = root.path().to_path_buf(); - datamdb.push("data.mdb"); - lockmdb.push("lock.mdb"); - safebin.push("data.safe.bin"); - assert!(!datamdb.exists()); - assert!(!lockmdb.exists()); - assert!(!safebin.exists()); // safe mode doesn't write an empty db to disk -} - -#[test] -fn test_easy_migrator_no_dir_2() { - let root = Builder::new() - .prefix("test_easy_migrator_no_dir") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - // This won't fail with IoError even though the path is a bogus path, because this - // is the "easy mode" migration which automatically handles (ignores) this error. - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - Migrator::easy_migrate_safe_mode_to_lmdb(Path::new("bogus"), &dst_env).expect("migrated"); - - let mut datamdb = root.path().to_path_buf(); - let mut lockmdb = root.path().to_path_buf(); - let mut safebin = root.path().to_path_buf(); - datamdb.push("data.mdb"); - lockmdb.push("lock.mdb"); - safebin.push("data.safe.bin"); - assert!(datamdb.exists()); // lmdb writes an empty db to disk - assert!(lockmdb.exists()); - assert!(!safebin.exists()); -} - -#[test] -fn test_easy_migrator_invalid_1() { - let root = Builder::new() - .prefix("test_easy_migrator_invalid") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let dbfile = root.path().join("data.mdb"); - fs::write(dbfile, "bogus").expect("dbfile created"); - - // This won't fail with FileInvalid even though the database is a bogus file, because this - // is the "easy mode" migration which automatically handles (ignores) this error. - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), &dst_env).expect("migrated"); - - let mut datamdb = root.path().to_path_buf(); - let mut lockmdb = root.path().to_path_buf(); - let mut safebin = root.path().to_path_buf(); - datamdb.push("data.mdb"); - lockmdb.push("lock.mdb"); - safebin.push("data.safe.bin"); - assert!(datamdb.exists()); // corrupted db isn't deleted - assert!(lockmdb.exists()); - assert!(!safebin.exists()); -} - -#[test] -fn test_easy_migrator_invalid_2() { - let root = Builder::new() - .prefix("test_easy_migrator_invalid") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let dbfile = root.path().join("data.safe.bin"); - fs::write(dbfile, "bogus").expect("dbfile created"); - - // This won't fail with FileInvalid even though the database is a bogus file, because this - // is the "easy mode" migration which automatically handles (ignores) this error. - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), &dst_env).expect("migrated"); - - let mut datamdb = root.path().to_path_buf(); - let mut lockmdb = root.path().to_path_buf(); - let mut safebin = root.path().to_path_buf(); - datamdb.push("data.mdb"); - lockmdb.push("lock.mdb"); - safebin.push("data.safe.bin"); - assert!(datamdb.exists()); // lmdb writes an empty db to disk - assert!(lockmdb.exists()); - assert!(safebin.exists()); // corrupted db isn't deleted -} - -#[test] -#[should_panic(expected = "migrated: SourceEmpty")] -fn test_migrator_lmdb_to_safe_1() { - let root = Builder::new() - .prefix("test_migrate_lmdb_to_safe") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let src_env = Rkv::new::(root.path()).expect("new succeeded"); - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated"); -} - -#[test] -#[should_panic(expected = "migrated: DestinationNotEmpty")] -fn test_migrator_lmdb_to_safe_2() { - let root = Builder::new() - .prefix("test_migrate_lmdb_to_safe") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let src_env = Rkv::new::(root.path()).expect("new succeeded"); - populate_store!(&src_env); - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - populate_store!(&dst_env); - Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated"); -} - -#[test] -fn test_migrator_lmdb_to_safe_3() { - let root = Builder::new() - .prefix("test_migrate_lmdb_to_safe") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let src_env = Rkv::new::(root.path()).expect("new succeeded"); - populate_store!(&src_env); - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated"); - - let store = dst_env - .open_single("store", StoreOptions::default()) - .expect("opened"); - let reader = dst_env.read().expect("reader"); - assert_eq!( - store.get(&reader, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - store.get(&reader, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - store.get(&reader, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); -} - -#[test] -#[should_panic(expected = "migrated: SourceEmpty")] -fn test_migrator_safe_to_lmdb_1() { - let root = Builder::new() - .prefix("test_migrate_safe_to_lmdb") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let src_env = Rkv::new::(root.path()).expect("new succeeded"); - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated"); -} - -#[test] -#[should_panic(expected = "migrated: DestinationNotEmpty")] -fn test_migrator_safe_to_lmdb_2() { - let root = Builder::new() - .prefix("test_migrate_safe_to_lmdb") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let src_env = Rkv::new::(root.path()).expect("new succeeded"); - populate_store!(&src_env); - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - populate_store!(&dst_env); - Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated"); -} - -#[test] -fn test_migrator_safe_to_lmdb_3() { - let root = Builder::new() - .prefix("test_migrate_safe_to_lmdb") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let src_env = Rkv::new::(root.path()).expect("new succeeded"); - populate_store!(&src_env); - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated"); - - let store = dst_env - .open_single("store", StoreOptions::default()) - .expect("opened"); - let reader = dst_env.read().expect("reader"); - assert_eq!( - store.get(&reader, "foo").expect("read"), - Some(Value::I64(1234)) - ); - assert_eq!( - store.get(&reader, "bar").expect("read"), - Some(Value::Bool(true)) - ); - assert_eq!( - store.get(&reader, "baz").expect("read"), - Some(Value::Str("héllo, yöu")) - ); -} - -#[test] -fn test_easy_migrator_failed_migration_1() { - let root = Builder::new() - .prefix("test_easy_migrator_failed_migration_1") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let dbfile = root.path().join("data.mdb"); - fs::write(&dbfile, "bogus").expect("bogus dbfile created"); - - // This won't fail with FileInvalid even though the database is a bogus file, because this - // is the "easy mode" migration which automatically handles (ignores) this error. - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), &dst_env).expect("migrated"); - - // Populate destination environment and persist to disk. - populate_store!(&dst_env); - dst_env.sync(true).expect("synced"); - - // Delete bogus file and create a valid source environment in its place. - fs::remove_file(&dbfile).expect("bogus dbfile removed"); - let src_env = Rkv::new::(root.path()).expect("new succeeded"); - populate_store!(&src_env); - src_env.sync(true).expect("synced"); - - // Attempt to migrate again. This should *NOT* fail with DestinationNotEmpty. - Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), &dst_env).expect("migrated"); -} - -#[test] -fn test_easy_migrator_failed_migration_2() { - let root = Builder::new() - .prefix("test_easy_migrator_failed_migration_2") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - let dbfile = root.path().join("data.safe.bin"); - fs::write(&dbfile, "bogus").expect("bogus dbfile created"); - - // This won't fail with FileInvalid even though the database is a bogus file, because this - // is the "easy mode" migration which automatically handles (ignores) this error. - let dst_env = Rkv::new::(root.path()).expect("new succeeded"); - Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), &dst_env).expect("migrated"); - - // Populate destination environment and persist to disk. - populate_store!(&dst_env); - dst_env.sync(true).expect("synced"); - - // Delete bogus file and create a valid source environment in its place. - fs::remove_file(&dbfile).expect("bogus dbfile removed"); - let src_env = Rkv::new::(root.path()).expect("new succeeded"); - populate_store!(&src_env); - src_env.sync(true).expect("synced"); - - // Attempt to migrate again. This should *NOT* fail with DestinationNotEmpty. - Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), &dst_env).expect("migrated"); -} - -fn test_easy_migrator_from_manager_failed_migration_1() { - let root = Builder::new() - .prefix("test_easy_migrator_from_manager_failed_migration_1") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - { - let mut src_manager = Manager::::singleton().write().unwrap(); - let created_src_arc = src_manager - .get_or_create(root.path(), Rkv::new::) - .unwrap(); - let src_env = created_src_arc.read().unwrap(); - populate_store!(&src_env); - src_env.sync(true).expect("synced"); - } - { - let mut dst_manager = Manager::::singleton().write().unwrap(); - let created_dst_arc_1 = dst_manager - .get_or_create(root.path(), Rkv::new::) - .unwrap(); - let dst_env_1 = created_dst_arc_1.read().unwrap(); - populate_store!(&dst_env_1); - dst_env_1.sync(true).expect("synced"); - } - - // Attempt to migrate again in a new env. This should *NOT* fail with DestinationNotEmpty. - let dst_manager = Manager::::singleton().read().unwrap(); - let created_dst_arc_2 = dst_manager.get(root.path()).unwrap().unwrap(); - let dst_env_2 = created_dst_arc_2.read().unwrap(); - Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), dst_env_2).expect("migrated"); -} - -fn test_easy_migrator_from_manager_failed_migration_2() { - let root = Builder::new() - .prefix("test_easy_migrator_from_manager_failed_migration_2") - .tempdir() - .expect("tempdir"); - fs::create_dir_all(root.path()).expect("dir created"); - - { - let mut src_manager = Manager::::singleton().write().unwrap(); - let created_src_arc = src_manager - .get_or_create(root.path(), Rkv::new::) - .unwrap(); - let src_env = created_src_arc.read().unwrap(); - populate_store!(&src_env); - src_env.sync(true).expect("synced"); - } - { - let mut dst_manager = Manager::::singleton().write().unwrap(); - let created_dst_arc_1 = dst_manager - .get_or_create(root.path(), Rkv::new::) - .unwrap(); - let dst_env_1 = created_dst_arc_1.read().unwrap(); - populate_store!(&dst_env_1); - dst_env_1.sync(true).expect("synced"); - } - - // Attempt to migrate again in a new env. This should *NOT* fail with DestinationNotEmpty. - let dst_manager = Manager::::singleton().read().unwrap(); - let created_dst_arc_2 = dst_manager.get(root.path()).unwrap().unwrap(); - let dst_env_2 = created_dst_arc_2.read().unwrap(); - Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), dst_env_2).expect("migrated"); -} - -#[test] -fn test_easy_migrator_from_manager_failed_migration() { - test_easy_migrator_from_manager_failed_migration_1(); - test_easy_migrator_from_manager_failed_migration_2(); -}