diff --git a/.gitignore b/.gitignore
index d305449..04a1022 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,3 +13,4 @@ Cargo.lock
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
+test/
diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000..13566b8
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/15721-s24-catalog1.iml b/.idea/15721-s24-catalog1.iml
new file mode 100644
index 0000000..cf84ae4
--- /dev/null
+++ b/.idea/15721-s24-catalog1.iml
@@ -0,0 +1,11 @@
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/material_theme_project_new.xml b/.idea/material_theme_project_new.xml
new file mode 100644
index 0000000..611f982
--- /dev/null
+++ b/.idea/material_theme_project_new.xml
@@ -0,0 +1,18 @@
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000..a3b11ee
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..dc428dd
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Cargo.toml b/Cargo.toml
index 11592e6..a0f54a1 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -13,6 +13,9 @@ serde_json = "1.0"
tower-http = { version = "0.4.0", features = ["full"] }
dotenv = "0.15.0"
rocksdb = "0.22.0"
+anyhow = "1.0.82"
+typed-builder = "0.14.0"
+uuid = "1.8.0"
pretty_assertions = "0.7"
select = "0.5"
diff --git a/benchmark_copy/bench.py b/benchmark_copy/bench.py
new file mode 100644
index 0000000..108e233
--- /dev/null
+++ b/benchmark_copy/bench.py
@@ -0,0 +1,161 @@
+#!/usr/bin/env python3
+# This script is used to benchmark the catalog server.
+# It will start the catalog server, seed the catalog with some namespaces and tables, and use vegeta to stress test the server.
+# vegeta: https://github.com/tsenart/vegeta
+# Install on mac: brew install vegeta
+
+import subprocess as sp
+import time
+import signal
+import sys
+import requests
+import argparse
+import string
+import random
+
+
+def get_random_str(length=8):
+ letters = string.ascii_lowercase
+ return ''.join(random.choice(letters) for _ in range(length))
+
+
+def run(cmd, note, bg=False, out=None):
+ print(f"{note.ljust(48)}...", end=" ", flush=True)
+ try:
+ res = None
+ if out:
+ with open(out, "a") as f:
+ if bg:
+ res = sp.Popen(cmd, shell=True, stdout=f, stderr=f)
+ else:
+ sp.run(cmd, shell=True, check=True,
+ stdout=f, stderr=f)
+ else:
+ if bg:
+ res = sp.Popen(cmd, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
+ else:
+ sp.run(cmd, shell=True, check=True,
+ stdout=sp.DEVNULL, stderr=sp.DEVNULL)
+ print("DONE!")
+ return res
+ except sp.CalledProcessError as e:
+ print("FAIL!")
+ print("Error:", e)
+
+
+TEST_ROOT_DIR = "test"
+DEFAULT_BINARY_NAME = "catalog2"
+DEFAULT_DB_ROOT_DIR = f"{TEST_ROOT_DIR}/db"
+DEFAULT_BASE_URL = "http://127.0.0.1:8000/v1/"
+DEFAULT_NAMESPACE_NUM = 1
+DEFAULT_TABLE_NUM = 1
+DEFAULT_RATE = 8
+
+parser = argparse.ArgumentParser(description="Benchmark.")
+parser.add_argument("-b", "--binary_name", type=str,
+ default=DEFAULT_BINARY_NAME, help="Name of the catalog binary.")
+parser.add_argument("-d", "--db_root", type=str,
+ default=DEFAULT_DB_ROOT_DIR, help="Root directory for the database.")
+parser.add_argument("-u", "--base_url", type=str,
+ default=DEFAULT_BASE_URL, help="Base URL for catalog server.")
+parser.add_argument("-n", "--namespace_num", type=int,
+ default=DEFAULT_NAMESPACE_NUM, help="The number of namespace to seed in catalog.")
+parser.add_argument("-t", "--table_num", type=int,
+ default=DEFAULT_TABLE_NUM, help="The number of table to seed in catalog.")
+parser.add_argument("-r", "--rate", type=int,
+ default=DEFAULT_RATE, help="Request rate.")
+parser.add_argument("-p", "--plot", action="store_true",
+ default=False, help="Generate a plot of this benchmark.")
+args = parser.parse_args()
+
+
+CATALOG_LOG = f"{TEST_ROOT_DIR}/catalog.log"
+
+# build catalog in release mode
+run(f"rm -rf {TEST_ROOT_DIR} && mkdir {TEST_ROOT_DIR}",
+ note="initializing test dir")
+run(f"cargo build --release && cp target/release/{args.binary_name} {TEST_ROOT_DIR}/{args.binary_name}",
+ note="building catalog in release mode")
+catalog_server = run(f"{TEST_ROOT_DIR}/{args.binary_name} --db-root {args.db_root}",
+ note="starting catalog server", bg=True, out=CATALOG_LOG)
+print("Waiting for catalog server to start...")
+time.sleep(1)
+
+# seeding the catalog, uniformly distribute tables to namespaces
+print(f"Seeding namespaces and tables...")
+NAMESPACE_ENDPOINT = "namespaces"
+TABLE_ENDPOINT = "tables"
+namespaces = []
+table_per_namespace = args.table_num // args.namespace_num
+for i in range(args.namespace_num):
+ namespace = get_random_str(32)
+ tables = []
+ for j in range(table_per_namespace):
+ tables.append(get_random_str(32))
+ namespaces.append({'name': namespace, 'tables': tables})
+ # create namespace
+ response = requests.post(f"{args.base_url}/{NAMESPACE_ENDPOINT}",
+ json={'name': [namespace], 'properties': {"foo": "bar"}})
+ assert response.status_code == 200, f"Failed to create namespace {namespace}"
+
+ # crate tables
+ for table in tables:
+ response = requests.post(
+ f"{args.base_url}/{NAMESPACE_ENDPOINT}/{namespace}/{TABLE_ENDPOINT}",
+ json={'name': table}
+ )
+ assert response.status_code == 201, f"Failed to create table in {namespace}"
+
+print(f"Seeded {len(namespaces)} namespaces and {len(namespaces) * table_per_namespace} tables.")
+
+# test begins
+# 1. single endpoint stress test
+namespace = namespaces[0]
+table = namespace['tables'][0]
+targets = {
+ "get_table": f"{args.base_url}/{NAMESPACE_ENDPOINT}/{namespace['name']}/{TABLE_ENDPOINT}/{table}",
+ "list_table": f"{args.base_url}/{NAMESPACE_ENDPOINT}/{namespace['name']}/{TABLE_ENDPOINT}",
+ "get_namespace": f"{args.base_url}/{NAMESPACE_ENDPOINT}/{namespace['name']}",
+ "list_namespace": f"{args.base_url}/{NAMESPACE_ENDPOINT}"
+}
+
+for name, target in targets.items():
+ STATISTIC_FILE = f"{TEST_ROOT_DIR}/results_{name}.bin"
+ attack = f"echo 'GET {target}' | vegeta attack -rate={args.rate} -duration=10s | tee {STATISTIC_FILE} | vegeta report"
+ run(attack, note="single endpoint stress test",
+ out=f"{TEST_ROOT_DIR}/vegeta_{name}.log")
+ if args.plot:
+ PLOT_FILE = f"{TEST_ROOT_DIR}/plot_{name}.html"
+ run(f"cat {STATISTIC_FILE} | vegeta plot > {PLOT_FILE}",
+ note="generating plot")
+# ... more?
+# 2. random endpoint stress test
+# Define the file path
+PATH_TARGET_FILE = f"{TEST_ROOT_DIR}/requests_get_table.txt"
+
+# Write the URLs to the file
+with open(PATH_TARGET_FILE, "w") as file:
+ for i in range(len(namespaces)):
+ random_namespace = random.choice(namespaces)
+ random_table = random.choice(random_namespace['tables'])
+
+ # Generate request URL
+ target = f"{args.base_url}/{NAMESPACE_ENDPOINT}/{random_namespace['name']}/{TABLE_ENDPOINT}/{random_table}"
+ request_url = f"GET {target}"
+
+ file.write(request_url + "\n")
+
+print("URLs have been written to", PATH_TARGET_FILE)
+
+
+STATISTIC_FILE = f"{TEST_ROOT_DIR}/results_random.bin"
+attack = f"vegeta attack -targets={PATH_TARGET_FILE} -rate={args.rate} -duration=60s | tee {STATISTIC_FILE} | vegeta report"
+run(attack, note="random endpoints stress test",
+ out=f"{TEST_ROOT_DIR}/vegeta_random.log")
+if args.plot:
+ PLOT_FILE = f"{TEST_ROOT_DIR}/plot_random.html"
+ run(f"cat {STATISTIC_FILE} | vegeta plot > {PLOT_FILE}",
+ note="generating plot")
+
+# clean up
+catalog_server.send_signal(signal.SIGINT)
diff --git a/benchmark_copy/parse_dependencies.py b/benchmark_copy/parse_dependencies.py
new file mode 100644
index 0000000..551f3a7
--- /dev/null
+++ b/benchmark_copy/parse_dependencies.py
@@ -0,0 +1,42 @@
+import os
+import sys
+
+begin = False
+package_version = {}
+with open('./Cargo.toml') as f:
+ for line in f:
+ if '[' == line[0]:
+ begin = False
+ if 'dependencies' in line:
+ begin = True
+ continue
+
+ if begin:
+ sep = line.find('=')
+ package_version[line[:sep-1].strip()] = line[sep+2:].strip()
+
+for dir_path in ["./libs/iceberg/", "./libs/rest/", "./libs/test_utils/"]:
+ r = open(dir_path + "Cargo.toml")
+ w = open(dir_path + "Cargo_n.toml", 'w')
+ begin = False
+ for r_line in r:
+ if '[' == r_line[0]:
+ begin = False
+ if 'dependencies' in r_line:
+ begin = True
+ w.write(r_line)
+ continue
+
+ if begin:
+ sep = r_line.find('=')
+ package = r_line[:sep-1].strip()
+ if package in package_version:
+ w.writelines([f"{package} = {package_version[package]}", "\n"])
+ else:
+ w.write(r_line)
+ else:
+ w.write(r_line)
+ r.close()
+ w.close()
+ os.remove(dir_path + "Cargo.toml")
+ os.rename(dir_path + "Cargo_n.toml", dir_path + "Cargo.toml")
diff --git a/benchmarking_windows/bench.py b/benchmarking_windows/bench.py
new file mode 100644
index 0000000..74fd34b
--- /dev/null
+++ b/benchmarking_windows/bench.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python3
+# This script is used to benchmark the catalog server.
+# It will start the catalog server, seed the catalog with some namespaces and tables, and use vegeta to stress test the server.
+# vegeta: https://github.com/tsenart/vegeta
+# Install on mac: brew install vegeta
+
+import subprocess as sp
+import time
+import signal
+import sys
+import requests
+import argparse
+import string
+import random
+
+
+def get_random_str(length=8):
+ letters = string.ascii_lowercase
+ return ''.join(random.choice(letters) for _ in range(length))
+
+
+def run(cmd, note, bg=False, out=None):
+ print(f"{note.ljust(48)}...", end=" ", flush=True)
+ try:
+ res = None
+ if out:
+ with open(out, "a") as f:
+ if bg:
+ res = sp.Popen(cmd, shell=True, stdout=f, stderr=f)
+ else:
+ sp.run(cmd, shell=True, check=True,
+ stdout=f, stderr=f)
+ else:
+ if bg:
+ res = sp.Popen(cmd, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
+ else:
+ sp.run(cmd, shell=True, check=True,
+ stdout=sp.DEVNULL, stderr=sp.DEVNULL)
+ print("DONE!")
+ return res
+ except sp.CalledProcessError as e:
+ print("FAIL!")
+ print("Error:", e)
+
+
+TEST_ROOT_DIR = "test"
+DEFAULT_BINARY_NAME = "catalog2"
+DEFAULT_DB_ROOT_DIR = f"{TEST_ROOT_DIR}/db"
+DEFAULT_BASE_URL = "http://127.0.0.1:8000/v1/"
+DEFAULT_NAMESPACE_NUM = 1
+DEFAULT_TABLE_NUM = 1
+DEFAULT_RATE = 8
+
+parser = argparse.ArgumentParser(description="Benchmark.")
+parser.add_argument("-b", "--binary_name", type=str,
+ default=DEFAULT_BINARY_NAME, help="Name of the catalog binary.")
+parser.add_argument("-d", "--db_root", type=str,
+ default=DEFAULT_DB_ROOT_DIR, help="Root directory for the database.")
+parser.add_argument("-u", "--base_url", type=str,
+ default=DEFAULT_BASE_URL, help="Base URL for catalog server.")
+parser.add_argument("-n", "--namespace_num", type=int,
+ default=DEFAULT_NAMESPACE_NUM, help="The number of namespace to seed in catalog.")
+parser.add_argument("-t", "--table_num", type=int,
+ default=DEFAULT_TABLE_NUM, help="The number of table to seed in catalog.")
+parser.add_argument("-r", "--rate", type=int,
+ default=DEFAULT_RATE, help="Request rate.")
+parser.add_argument("-p", "--plot", action="store_true",
+ default=False, help="Generate a plot of this benchmark.")
+args = parser.parse_args()
+
+
+CATALOG_LOG = f"{TEST_ROOT_DIR}/catalog.log"
+
+# build catalog in release mode
+run(f"rm -rf {TEST_ROOT_DIR} && mkdir {TEST_ROOT_DIR}",
+ note="initializing test dir")
+run(f"cargo build --release && cp target/release/{args.binary_name}.exe {TEST_ROOT_DIR}/{args.binary_name}.exe",
+ note="building catalog in release mode")
+catalog_server = run(f".\{TEST_ROOT_DIR}\{args.binary_name}.exe --db-root {args.db_root}",
+ note="starting catalog server", bg=True, out=CATALOG_LOG)
+print("Waiting for catalog server to start...")
+time.sleep(1)
+
+# seeding the catalog, uniformly distribute tables to namespaces
+print(f"Seeding namespaces and tables...")
+NAMESPACE_ENDPOINT = "namespaces"
+TABLE_ENDPOINT = "tables"
+namespaces = []
+table_per_namespace = args.table_num // args.namespace_num
+for i in range(args.namespace_num):
+ namespace = get_random_str(32)
+ tables = []
+ for j in range(table_per_namespace):
+ tables.append(get_random_str(32))
+ namespaces.append({'name': namespace, 'tables': tables})
+ # create namespace
+ response = requests.post(f"{args.base_url}/{NAMESPACE_ENDPOINT}",
+ json={'name': [str(namespace)], "properties": {'foo': 'bar'}})
+ print(response.status_code)
+ assert True, f"Failed to create namespace {namespace}"
+
+ # crate tables
+ for table in tables:
+ response = requests.post(
+ f"{args.base_url}/{NAMESPACE_ENDPOINT}/{namespace}/{TABLE_ENDPOINT}",
+ json={'name': table}
+ )
+ assert response.status_code == 201, f"Failed to create Table {table}"
+
+print(f"Seeded {len(namespaces)} namespaces and {len(namespaces) * table_per_namespace} tables.")
+
+# test begins
+# 1. single endpoint stress test
+namespace = namespaces[0]
+table = namespace['tables'][0]
+targets = {
+ "get_table": f"{args.base_url}/{NAMESPACE_ENDPOINT}/{namespace['name']}/{TABLE_ENDPOINT}/{table}",
+ "list_table": f"{args.base_url}/{NAMESPACE_ENDPOINT}/{namespace['name']}/{TABLE_ENDPOINT}",
+ "get_namespace": f"{args.base_url}/{NAMESPACE_ENDPOINT}/{namespace['name']}",
+ "list_namespace": f"{args.base_url}/{NAMESPACE_ENDPOINT}"
+}
+
+for name, target in targets.items():
+ STATISTIC_FILE = rf"{TEST_ROOT_DIR}\results_{name}.bin"
+ attack_cmd = f"echo GET {target} | vegeta attack -rate={args.rate} -duration=10s > {STATISTIC_FILE}"
+
+ with open(rf"{TEST_ROOT_DIR}\vegeta_{name}.log", "w", encoding='utf-8') as f:
+ sp.run(attack_cmd, shell=True, stdout=f, stderr=sp.STDOUT)
+ report_cmd = f"vegeta report < {STATISTIC_FILE}"
+ sp.run(report_cmd, shell=True, stdout=f, stderr=sp.STDOUT)
+
+ if args.plot:
+ PLOT_FILE = rf"{TEST_ROOT_DIR}\plot_{name}.html"
+ plot_cmd = f"cat {STATISTIC_FILE} | vegeta plot > {PLOT_FILE}"
+ sp.run(plot_cmd, shell=True)
+# ... more?
+# 2. random endpoint stress test
+# Define the file path
+PATH_TARGET_FILE = f"{TEST_ROOT_DIR}/requests_get_table.txt"
+
+# Write the URLs to the file
+with open(PATH_TARGET_FILE, "w") as file:
+ for i in range(len(namespaces)):
+ random_namespace = random.choice(namespaces)
+ random_table = random.choice(random_namespace['tables'])
+
+ # Generate request URL
+ target = f"{args.base_url}/{NAMESPACE_ENDPOINT}/{random_namespace['name']}/{TABLE_ENDPOINT}/{random_table}"
+ request_url = f"GET {target}"
+
+ file.write(request_url + "\n")
+
+print("URLs have been written to", PATH_TARGET_FILE)
+
+
+STATISTIC_FILE = f"{TEST_ROOT_DIR}/results_random.bin"
+attack = f"vegeta attack -targets={PATH_TARGET_FILE} -rate={args.rate} -duration=60s | tee {STATISTIC_FILE} | vegeta report"
+run(attack, note="random endpoints stress test",
+ out=f"{TEST_ROOT_DIR}/vegeta_random.log")
+if args.plot:
+ PLOT_FILE = f"{TEST_ROOT_DIR}/plot_random.html"
+ run(f"cat {STATISTIC_FILE} | vegeta plot > {PLOT_FILE}",
+ note="generating plot")
+
+# clean up
+catalog_server.send_signal(signal.SIGINT)
diff --git a/benchmarking_windows/parse_dependencies.py b/benchmarking_windows/parse_dependencies.py
new file mode 100644
index 0000000..551f3a7
--- /dev/null
+++ b/benchmarking_windows/parse_dependencies.py
@@ -0,0 +1,42 @@
+import os
+import sys
+
+begin = False
+package_version = {}
+with open('./Cargo.toml') as f:
+ for line in f:
+ if '[' == line[0]:
+ begin = False
+ if 'dependencies' in line:
+ begin = True
+ continue
+
+ if begin:
+ sep = line.find('=')
+ package_version[line[:sep-1].strip()] = line[sep+2:].strip()
+
+for dir_path in ["./libs/iceberg/", "./libs/rest/", "./libs/test_utils/"]:
+ r = open(dir_path + "Cargo.toml")
+ w = open(dir_path + "Cargo_n.toml", 'w')
+ begin = False
+ for r_line in r:
+ if '[' == r_line[0]:
+ begin = False
+ if 'dependencies' in r_line:
+ begin = True
+ w.write(r_line)
+ continue
+
+ if begin:
+ sep = r_line.find('=')
+ package = r_line[:sep-1].strip()
+ if package in package_version:
+ w.writelines([f"{package} = {package_version[package]}", "\n"])
+ else:
+ w.write(r_line)
+ else:
+ w.write(r_line)
+ r.close()
+ w.close()
+ os.remove(dir_path + "Cargo.toml")
+ os.rename(dir_path + "Cargo_n.toml", dir_path + "Cargo.toml")
diff --git a/src/config/parameters.rs b/src/config/parameters.rs
index b530dac..bf46965 100644
--- a/src/config/parameters.rs
+++ b/src/config/parameters.rs
@@ -11,3 +11,32 @@ pub fn get(parameter: &str) -> String {
.expect(&format!("{} is not defined in the environment", parameter));
env_parameter
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::env;
+
+ #[test]
+ fn test_init() {
+ // This test checks if the .env file is loaded correctly
+ init();
+ assert!(dotenv::var("PORT").is_ok());
+ }
+
+ #[test]
+ fn test_get() {
+ // This test checks if the get function correctly retrieves an environment variable
+ init();
+ env::set_var("TEST_ENV_VAR", "123");
+ assert_eq!(get("TEST_ENV_VAR"), "123");
+ }
+
+ #[test]
+ #[should_panic(expected = "TEST_ENV_VAR_UNDEFINED is not defined in the environment")]
+ fn test_get_undefined() {
+ // This test checks if the get function correctly panics when trying to retrieve an undefined environment variable
+ init();
+ get("TEST_ENV_VAR_UNDEFINED");
+ }
+}
diff --git a/src/database/database.rs b/src/database/database.rs
index 3da898a..9f1941d 100644
--- a/src/database/database.rs
+++ b/src/database/database.rs
@@ -15,11 +15,10 @@ impl Database {
let namespace_cf = ColumnFamilyDescriptor::new("NamespaceData", Options::default());
let table_cf = ColumnFamilyDescriptor::new("TableData", Options::default());
- let operator_cf = ColumnFamilyDescriptor::new("OperatorStatistics", Options::default());
let table_namespace_cf =
ColumnFamilyDescriptor::new("TableNamespaceMap", Options::default());
- let cfs_vec = vec![namespace_cf, table_cf, operator_cf, table_namespace_cf];
+ let cfs_vec = vec![namespace_cf, table_cf, table_namespace_cf];
let db = DB::open_cf_descriptors(&opts, path, cfs_vec)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
@@ -27,7 +26,10 @@ impl Database {
Ok(Self { db: db.into() })
}
- pub fn list_all_keys(&self, cf: &str) -> Result, io::Error> {
+ pub fn list_all_keys Deserialize<'de>>(
+ &self,
+ cf: &str,
+ ) -> Result, io::Error> {
let cf_handle = self.db.cf_handle(cf).ok_or_else(|| {
io::Error::new(
ErrorKind::NotFound,
@@ -38,14 +40,19 @@ impl Database {
let iter = self.db.iterator_cf(cf_handle, IteratorMode::Start);
for item in iter {
let (key, _) = item.map_err(|e| io::Error::new(ErrorKind::Other, e.to_string()))?;
- let key_str = String::from_utf8(key.to_vec())
+ let key_obj: K = serde_json::from_slice(&key)
.map_err(|e| io::Error::new(ErrorKind::Other, e.to_string()))?;
- keys.push(key_str);
+ keys.push(key_obj);
}
Ok(keys)
}
- pub fn insert(&self, cf: &str, key: &str, value: &V) -> Result<(), io::Error> {
+ pub fn insert(
+ &self,
+ cf: &str,
+ key: &K,
+ value: &V,
+ ) -> Result<(), io::Error> {
let cf_handle = self.db.cf_handle(cf).ok_or_else(|| {
io::Error::new(
ErrorKind::NotFound,
@@ -54,16 +61,18 @@ impl Database {
})?;
let value = serde_json::to_vec(value)
.map_err(|e| io::Error::new(ErrorKind::Other, e.to_string()))?;
+ let key_bytes =
+ serde_json::to_vec(key).map_err(|e| io::Error::new(ErrorKind::Other, e.to_string()))?;
self.db
- .put_cf(cf_handle, key.as_bytes(), &value)
+ .put_cf(cf_handle, key_bytes, &value)
.map_err(|e| io::Error::new(ErrorKind::Other, e))?;
Ok(())
}
- pub fn get Deserialize<'de>>(
+ pub fn get Deserialize<'de> + Serialize, V: for<'de> Deserialize<'de>>(
&self,
cf: &str,
- key: &str,
+ key: &K,
) -> Result
, io::Error> {
let cf_handle = self.db.cf_handle(cf).ok_or_else(|| {
io::Error::new(
@@ -71,9 +80,11 @@ impl Database {
format!("Column family {} not found", cf),
)
})?;
+ let key_bytes =
+ serde_json::to_vec(key).map_err(|e| io::Error::new(ErrorKind::Other, e.to_string()))?;
let value = self
.db
- .get_cf(cf_handle, key.as_bytes())
+ .get_cf(cf_handle, &key_bytes)
.map_err(|e| io::Error::new(ErrorKind::Other, e))?;
match value {
Some(db_vec) => {
@@ -84,20 +95,31 @@ impl Database {
}
}
- pub fn delete(&self, cf: &str, key: &str) -> Result<(), io::Error> {
+ pub fn delete Deserialize<'de> + Serialize>(
+ &self,
+ cf: &str,
+ key: &K,
+ ) -> Result<(), io::Error> {
let cf_handle = self.db.cf_handle(cf).ok_or_else(|| {
io::Error::new(
ErrorKind::NotFound,
format!("Column family {} not found", cf),
)
})?;
+ let key_bytes =
+ serde_json::to_vec(key).map_err(|e| io::Error::new(ErrorKind::Other, e.to_string()))?;
self.db
- .delete_cf(cf_handle, key.as_bytes())
+ .delete_cf(cf_handle, key_bytes)
.map_err(|e| io::Error::new(ErrorKind::Other, e))?;
Ok(())
}
- pub fn update(&self, cf: &str, key: &str, value: &V) -> Result<(), io::Error> {
+ pub fn update(
+ &self,
+ cf: &str,
+ key: &K,
+ value: &V,
+ ) -> Result<(), io::Error> {
let cf_handle = self.db.cf_handle(cf).ok_or_else(|| {
io::Error::new(
ErrorKind::NotFound,
@@ -106,8 +128,10 @@ impl Database {
})?;
let value = serde_json::to_vec(value)
.map_err(|e| io::Error::new(ErrorKind::Other, e.to_string()))?;
+ let key_bytes =
+ serde_json::to_vec(key).map_err(|e| io::Error::new(ErrorKind::Other, e.to_string()))?;
self.db
- .put_cf(cf_handle, key.as_bytes(), &value)
+ .put_cf(cf_handle, key_bytes, &value)
.map_err(|e| io::Error::new(ErrorKind::Other, e))?;
Ok(())
}
@@ -119,134 +143,70 @@ mod tests {
use tempfile::tempdir;
#[test]
- fn test_open() {
+ fn test_database_operations() {
let dir = tempdir().unwrap();
- let db = Database::open(dir.path());
- assert!(db.is_ok());
- }
+ let db_path = dir.path();
- #[test]
- fn test_insert_and_get() {
- let dir = tempdir().unwrap();
- let db = Database::open(dir.path()).unwrap();
- let key = "test_key";
- let value = "test_value";
+ // Test open
+ let db = Database::open(db_path).unwrap();
// Test insert
- let insert_result = db.insert("NamespaceData", key, &value);
- assert!(insert_result.is_ok());
+ let key: String = "key1".to_string();
+ let value = "value1";
+ db.insert("NamespaceData", &key, &value).unwrap();
// Test get
- let get_result: Result
, _> = db.get("NamespaceData", key);
- assert!(get_result.is_ok());
- assert_eq!(get_result.unwrap().unwrap(), value);
- }
-
- #[test]
- fn test_delete() {
- let dir = tempdir().unwrap();
- let db = Database::open(dir.path()).unwrap();
- let key = "test_key";
- let value = "test_value";
-
- // Insert a key-value pair
- db.insert("NamespaceData", key, &value).unwrap();
-
- // Delete the key
- let delete_result = db.delete("NamespaceData", key);
- assert!(delete_result.is_ok());
-
- // Try to get the deleted key
- let get_result: Result
, _> = db.get("NamespaceData", key);
- assert!(get_result.is_ok());
- assert!(get_result.unwrap().is_none());
- }
-
- #[test]
- fn test_insert_and_get_nonexistent_cf() {
- let dir = tempdir().unwrap();
- let db = Database::open(dir.path()).unwrap();
- let key = "test_key";
- let value = "test_value";
-
- // Test insert with nonexistent column family
- let insert_result = db.insert("NonexistentCF", key, &value);
- assert!(insert_result.is_err());
-
- // Test get with nonexistent column family
- let get_result: Result
, _> = db.get("NonexistentCF", key);
- assert!(get_result.is_err());
- }
-
- #[test]
- fn test_get_nonexistent_key() {
- let dir = tempdir().unwrap();
- let db = Database::open(dir.path()).unwrap();
-
- // Test get with nonexistent key
- let get_result: Result
, _> = db.get("NamespaceData", "nonexistent_key");
- assert!(get_result.is_ok());
- assert!(get_result.unwrap().is_none());
- }
-
- #[test]
- fn test_delete_nonexistent_key() {
- let dir = tempdir().unwrap();
- let db = Database::open(dir.path()).unwrap();
-
- // Test delete with nonexistent key
- let delete_result = db.delete("NamespaceData", "nonexistent_key");
- assert!(delete_result.is_ok());
- }
-
- #[test]
- fn test_insert_and_get_empty_key() {
- let dir = tempdir().unwrap();
- let db = Database::open(dir.path()).unwrap();
- let key = "";
- let value = "test_value";
-
- // Test insert with empty key
- let insert_result = db.insert("NamespaceData", key, &value);
- assert!(insert_result.is_ok());
-
- // Test get with empty key
- let get_result: Result
, _> = db.get("NamespaceData", key);
- assert!(get_result.is_ok());
- assert_eq!(get_result.unwrap().unwrap(), value);
- }
-
- #[test]
- fn test_insert_and_get_empty_value() {
- let dir = tempdir().unwrap();
- let db = Database::open(dir.path()).unwrap();
- let key = "test_key";
- let value = "";
-
- // Test insert with empty value
- let insert_result = db.insert("NamespaceData", key, &value);
- assert!(insert_result.is_ok());
-
- // Test get with empty value
- let get_result: Result
, _> = db.get("NamespaceData", key);
- assert!(get_result.is_ok());
- assert_eq!(get_result.unwrap().unwrap(), value);
+ let retrieved_value: Option = db.get("NamespaceData", &key).unwrap();
+ assert_eq!(retrieved_value, Some(value.to_string()));
+
+ // Test update
+ let updated_value = "updated_value1";
+ db.update("NamespaceData", &key, &updated_value).unwrap();
+ let retrieved_value: Option = db.get("NamespaceData", &key).unwrap();
+ assert_eq!(retrieved_value, Some(updated_value.to_string()));
+
+ // Test delete
+ db.delete("NamespaceData", &key).unwrap();
+ let retrieved_value: Option = db.get("NamespaceData", &key).unwrap();
+ assert_eq!(retrieved_value, None);
}
#[test]
- fn test_insert_and_get_large_data() {
+ fn test_database_operations_negative_paths() {
let dir = tempdir().unwrap();
- let db = Database::open(dir.path()).unwrap();
- let key = "test_key";
- let value = "a".repeat(1_000_000);
-
- // Test insert with large data
- let insert_result = db.insert("NamespaceData", key, &value);
- assert!(insert_result.is_ok());
-
- // Test get with large data
- let get_result: Result
, _> = db.get("NamespaceData", key);
- assert!(get_result.is_ok());
- assert_eq!(get_result.unwrap().unwrap(), value);
+ let db_path = dir.path();
+
+ // Test open
+ let db = Database::open(db_path).unwrap();
+
+ // Test get with non-existing key
+ let non_existing_key = "non_existing_key".to_string();
+ let retrieved_value: Option = db.get("NamespaceData", &non_existing_key).unwrap();
+ assert_eq!(retrieved_value, None);
+
+ // Test update with non-existing key
+ let updated_value = "updated_value1";
+ db.update("NamespaceData", &non_existing_key, &updated_value)
+ .unwrap();
+ let retrieved_value: Option = db.get("NamespaceData", &non_existing_key).unwrap();
+ assert_eq!(retrieved_value, Some(updated_value.to_string()));
+
+ // Test delete with non-existing key
+ db.delete("NamespaceData", &non_existing_key).unwrap();
+ let retrieved_value: Option = db.get("NamespaceData", &non_existing_key).unwrap();
+ assert_eq!(retrieved_value, None);
+
+ // Test operations with non-existing column family
+ let non_existing_cf = "non_existing_cf";
+ let key = "key1".to_string();
+ let value = "value1";
+ let result = db.insert(non_existing_cf, &key, &value);
+ assert!(result.is_err());
+ let result: Result
, Error> {
- // Check if the table is in the given namespace
- let tables_in_namespace = self.list_all_tables(namespace)?;
- if let Some(tables) = tables_in_namespace {
- if !tables.contains(&table_name.to_string()) {
- return Err(Error::new(
- ErrorKind::NotFound,
- "Table not found in the given namespace",
- ));
- }
- }
+ namespace: &NamespaceIdent,
+ table_name: String,
+ ) -> Result
, Error> {
+ let table_id = TableIdent::new(namespace.clone(), table_name.clone());
let db = self.database.lock().unwrap();
// If the table is in the namespace, get the table data
- db.get::("TableData", table_name)
+ db.get::("TableData", &table_id)
}
- pub fn drop_table(&self, namespace: &str, table_name: &str) -> Result<(), Error> {
+ pub fn drop_table(&self, namespace: &NamespaceIdent, table_name: String) -> Result<(), Error> {
let db = self.database.lock().unwrap();
- db.delete("TableData", table_name)?;
+ let table_id = TableIdent::new(namespace.clone(), table_name.clone());
+
+ let _: Table = match db.get::("TableData", &table_id)? {
+ Some(data) => data,
+ None => {
+ return Err(std::io::Error::new(
+ ErrorKind::NotFound,
+ format!("Namespace {} not found", namespace.clone().0.join("\u{1F}")),
+ ))
+ }
+ };
+
+ db.delete("TableData", &table_id)?;
let mut tables = db
- .get::>("TableNamespaceMap", namespace)
+ .get::>("TableNamespaceMap", namespace)
.unwrap()
.unwrap();
- tables.retain(|name| name != table_name);
+ tables.retain(|id| id.name != table_name);
db.insert("TableNamespaceMap", namespace, &tables)
}
- // for the ?? route
- pub fn insert_table(&self, namespace: &str, table: &TableData) -> Result<(), Error> {
- self.create_table(namespace, table)
- }
-
- pub fn table_exists(&self, namespace: &str, table_name: &str) -> Result {
+ pub fn table_exists(
+ &self,
+ namespace: &NamespaceIdent,
+ table_name: String,
+ ) -> Result {
let table = self.load_table(namespace, table_name)?;
Ok(table.is_some())
}
pub fn rename_table(&self, rename_request: &TableRenameRequest) -> Result<(), Error> {
- let namespace = &rename_request.namespace;
- let old_name = &rename_request.old_name;
- let new_name = &rename_request.new_name;
+ let source = rename_request.source.clone();
+ let destination = rename_request.destination.clone();
+ let namespace = source.namespace.clone();
+
let table = self
- .load_table(namespace, old_name)?
- .ok_or_else(|| Error::new(ErrorKind::NotFound, "Table not found"))?;
+ .load_table(&namespace, source.name.clone())?
+ .ok_or_else(|| Error::new(ErrorKind::NotFound, "Source table not found"))?;
+
+ if self.table_exists(&destination.namespace, destination.name.clone())? {
+ return Err(Error::new(
+ ErrorKind::AlreadyExists,
+ "Destination table already exists",
+ ));
+ }
+
let mut new_table = table.clone();
- new_table.name = new_name.clone();
- self.drop_table(namespace, old_name)?;
- self.create_table(namespace, &new_table)
+ new_table.id = destination.clone();
+
+ self.create_table(
+ &destination.namespace.clone(),
+ &TableCreation {
+ name: destination.name.clone(),
+ },
+ )?;
+ self.drop_table(&namespace, source.name.clone())
}
}
-// todo: check commented tests
#[cfg(test)]
mod tests {
use super::*;
- use serde_json::json;
+ use crate::dto::table_data::TableCreation;
+ use crate::repository::namespace::NamespaceRepository;
use std::sync::{Arc, Mutex};
use tempfile::tempdir;
#[test]
- fn test_list_all_tables() {
- let db = Arc::new(Mutex::new(
- Database::open(tempdir().unwrap().path()).unwrap(),
- ));
- let repo = TableRepository::new(db);
- assert_eq!(repo.list_all_tables("namespace").unwrap(), None);
- }
+ fn test_table_repository() {
+ let dir = tempdir().unwrap();
+ let db = Database::open(dir.path()).unwrap();
+ let db = Arc::new(Mutex::new(db));
+ let repo = TableRepository::new(db.clone());
- #[test]
- fn test_create_table() {
- let db = Arc::new(Mutex::new(
- Database::open(tempdir().unwrap().path()).unwrap(),
- ));
- let repo = TableRepository::new(db);
- let table = TableData {
- name: "table".to_string(),
- num_columns: 0,
- read_properties: json!({}),
- write_properties: json!({}),
- file_urls: vec![],
- columns: vec![],
- };
- assert!(repo.create_table("namespace", &table).is_ok());
- }
+ // Create a namespace for testing
+ let namespace_ident = NamespaceIdent(vec!["test".to_string()]);
+ let namespace_repo = NamespaceRepository::new(db.clone());
+ namespace_repo
+ .create_namespace(namespace_ident.clone(), None)
+ .unwrap();
- #[test]
- fn test_load_table() {
- let db = Arc::new(Mutex::new(
- Database::open(tempdir().unwrap().path()).unwrap(),
- ));
- let repo = TableRepository::new(db);
- let table = TableData {
- name: "table".to_string(),
- num_columns: 0,
- read_properties: json!({}),
- write_properties: json!({}),
- file_urls: vec![],
- columns: vec![],
+ // Test create_table
+ let table_creation = TableCreation {
+ name: "table1".to_string(),
};
- repo.create_table("namespace", &table).unwrap();
- assert!(repo.load_table("namespace", "table").unwrap().is_some());
- }
+ repo.create_table(&namespace_ident, &table_creation)
+ .unwrap();
- #[test]
- fn test_drop_table() {
- let db = Arc::new(Mutex::new(
- Database::open(tempdir().unwrap().path()).unwrap(),
- ));
- let repo = TableRepository::new(db);
- let table = TableData {
- name: "table".to_string(),
- num_columns: 0,
- read_properties: json!({}),
- write_properties: json!({}),
- file_urls: vec![],
- columns: vec![],
- };
- repo.create_table("namespace", &table).unwrap();
- assert!(repo.drop_table("namespace", "table").is_ok());
- }
+ // Test table_exists
+ assert!(repo
+ .table_exists(&namespace_ident, "table1".to_string())
+ .unwrap());
- #[test]
- fn test_table_exists() {
- let db = Arc::new(Mutex::new(
- Database::open(tempdir().unwrap().path()).unwrap(),
- ));
- let repo = TableRepository::new(db);
- let table = TableData {
- name: "table".to_string(),
- num_columns: 0,
- read_properties: json!({}),
- write_properties: json!({}),
- file_urls: vec![],
- columns: vec![],
- };
- repo.create_table("namespace", &table).unwrap();
- assert!(repo.table_exists("namespace", "table").unwrap());
- }
+ // Test load_table
+ let table = repo
+ .load_table(&namespace_ident, "table1".to_string())
+ .unwrap()
+ .unwrap();
+ assert_eq!(table.id.name, "table1");
- #[test]
- fn test_rename_table() {
- let db = Arc::new(Mutex::new(
- Database::open(tempdir().unwrap().path()).unwrap(),
- ));
- let repo = TableRepository::new(db);
- let table = TableData {
- name: "table".to_string(),
- num_columns: 0,
- read_properties: json!({}),
- write_properties: json!({}),
- file_urls: vec![],
- columns: vec![],
- };
- repo.create_table("namespace", &table).unwrap();
+ // Test rename_table
let rename_request = TableRenameRequest {
- namespace: "namespace".to_string(),
- old_name: "table".to_string(),
- new_name: "new_table".to_string(),
+ source: TableIdent::new(namespace_ident.clone(), "table1".to_string()),
+ destination: TableIdent::new(namespace_ident.clone(), "table2".to_string()),
};
- assert!(repo.rename_table(&rename_request).is_ok());
- }
-
- #[test]
- fn test_load_table_not_found() {
- let db = Arc::new(Mutex::new(
- Database::open(tempdir().unwrap().path()).unwrap(),
- ));
- let repo = TableRepository::new(db);
+ repo.rename_table(&rename_request).unwrap();
+ assert!(!repo
+ .table_exists(&namespace_ident, "table1".to_string())
+ .unwrap());
assert!(repo
- .load_table("namespace", "nonexistent")
- .unwrap()
- .is_none());
- }
-
- #[test]
- fn test_table_exists_not_found() {
- let db = Arc::new(Mutex::new(
- Database::open(tempdir().unwrap().path()).unwrap(),
- ));
- let repo = TableRepository::new(db);
- assert!(!repo.table_exists("namespace", "nonexistent").unwrap());
- }
+ .table_exists(&namespace_ident, "table2".to_string())
+ .unwrap());
- /*
- #[test]
- fn test_drop_table_not_found() {
- let db = Arc::new(Mutex::new(
- Database::open(tempdir().unwrap().path()).unwrap(),
- ));
- let repo = TableRepository::new(db);
- assert!(repo.drop_table("namespace", "nonexistent").is_err());
+ // Test drop_table
+ repo.drop_table(&namespace_ident, "table2".to_string())
+ .unwrap();
+ assert!(!repo
+ .table_exists(&namespace_ident, "table2".to_string())
+ .unwrap());
}
- */
#[test]
- fn test_rename_table_not_found() {
- let db = Arc::new(Mutex::new(
- Database::open(tempdir().unwrap().path()).unwrap(),
- ));
- let repo = TableRepository::new(db);
- let rename_request = TableRenameRequest {
- namespace: "namespace".to_string(),
- old_name: "nonexistent".to_string(),
- new_name: "new_table".to_string(),
- };
- assert!(repo.rename_table(&rename_request).is_err());
- }
+ fn test_table_repository_negative() {
+ let dir = tempdir().unwrap();
+ let db = Database::open(dir.path()).unwrap();
+ let db = Arc::new(Mutex::new(db));
+ let repo = TableRepository::new(db.clone());
- /*
- #[test]
- fn test_create_table_empty_name() {
- let db = Arc::new(Mutex::new(
- Database::open(tempdir().unwrap().path()).unwrap(),
- ));
- let repo = TableRepository::new(db);
- let table = TableData {
- name: "".to_string(),
- num_columns: 0,
- read_properties: json!({}),
- write_properties: json!({}),
- file_urls: vec![],
- columns: vec![],
+ // Test with non-existent namespace
+ let non_existent_namespace = NamespaceIdent(vec!["non_existent".to_string()]);
+ let table_creation = TableCreation {
+ name: "table1".to_string(),
};
- assert!(repo.create_table("namespace", &table).is_err());
- }
+ assert!(repo
+ .create_table(&non_existent_namespace, &table_creation)
+ .is_err());
+ assert!(repo
+ .drop_table(&non_existent_namespace, "table1".to_string())
+ .is_err());
+ // Test with existing table
+ let namespace_ident = NamespaceIdent(vec!["test".to_string()]);
+ let namespace_repo = NamespaceRepository::new(db.clone());
+ namespace_repo
+ .create_namespace(namespace_ident.clone(), None)
+ .unwrap();
+ repo.create_table(&namespace_ident, &table_creation)
+ .unwrap();
+ assert!(repo
+ .create_table(&namespace_ident, &table_creation)
+ .is_err());
- #[test]
- fn test_create_table_already_exists() {
- let db = Arc::new(Mutex::new(
- Database::open(tempdir().unwrap().path()).unwrap(),
- ));
- let repo = TableRepository::new(db);
- let table = TableData {
- name: "table".to_string(),
- num_columns: 0,
- read_properties: json!({}),
- write_properties: json!({}),
- file_urls: vec![],
- columns: vec![],
+ // Test rename_table with non-existent source table
+ let rename_request = TableRenameRequest {
+ source: TableIdent::new(namespace_ident.clone(), "non_existent".to_string()),
+ destination: TableIdent::new(namespace_ident.clone(), "table2".to_string()),
};
- repo.create_table("namespace", &table).unwrap();
- assert!(repo.create_table("namespace", &table).is_err());
- }
- */
-
- #[test]
- fn test_load_table_empty_name() {
- let db = Arc::new(Mutex::new(
- Database::open(tempdir().unwrap().path()).unwrap(),
- ));
- let repo = TableRepository::new(db);
- assert!(repo.load_table("namespace", "").unwrap().is_none());
- }
- /*
- #[test]
- fn test_drop_table_empty_name() {
- let db = Arc::new(Mutex::new(
- Database::open(tempdir().unwrap().path()).unwrap(),
- ));
- let repo = TableRepository::new(db);
- assert!(repo.drop_table("namespace", "").is_err());
- }
-
+ assert!(repo.rename_table(&rename_request).is_err());
- #[test]
- fn test_rename_table_empty_new_name() {
- let db = Arc::new(Mutex::new(
- Database::open(tempdir().unwrap().path()).unwrap(),
- ));
- let repo = TableRepository::new(db);
- let table = TableData {
- name: "table".to_string(),
- num_columns: 0,
- read_properties: json!({}),
- write_properties: json!({}),
- file_urls: vec![],
- columns: vec![],
- };
- repo.create_table("namespace", &table).unwrap();
+ // Test rename_table with existing destination table
let rename_request = TableRenameRequest {
- namespace: "namespace".to_string(),
- old_name: "table".to_string(),
- new_name: "".to_string(),
+ source: TableIdent::new(namespace_ident.clone(), "table1".to_string()),
+ destination: TableIdent::new(namespace_ident.clone(), "table1".to_string()),
};
assert!(repo.rename_table(&rename_request).is_err());
}
- */
}
diff --git a/src/routes/namespace.rs b/src/routes/namespace.rs
index 455fc22..a2e86bf 100644
--- a/src/routes/namespace.rs
+++ b/src/routes/namespace.rs
@@ -15,19 +15,19 @@ pub fn routes(db: Arc>) -> Router {
.route("/namespaces", get(namespace_handler::list_namespaces))
.route("/namespaces", post(namespace_handler::create_namespace))
.route(
- "/namespace/:namespace",
+ "/namespaces/:namespace",
get(namespace_handler::load_namespace_metadata),
)
.route(
- "/namespace/:namespace",
+ "/namespaces/:namespace",
head(namespace_handler::namespace_exists),
)
.route(
- "/namespace/:namespace",
+ "/namespaces/:namespace",
delete(namespace_handler::drop_namespace),
)
.route(
- "/namespace/:namespace/properties",
+ "/namespaces/:namespace/properties",
post(namespace_handler::set_namespace_properties),
)
.with_state(repo);
diff --git a/src/routes/table.rs b/src/routes/table.rs
index 91da45a..a8a5c84 100644
--- a/src/routes/table.rs
+++ b/src/routes/table.rs
@@ -18,10 +18,6 @@ pub fn routes(db: Arc>) -> Router {
"/namespaces/:namespace/tables",
post(table_handler::create_table),
)
- .route(
- "/namespaces/:namespace/register",
- post(table_handler::register_table),
- )
.route(
"/namespaces/:namespace/tables/:table",
get(table_handler::load_table),
@@ -35,10 +31,6 @@ pub fn routes(db: Arc>) -> Router {
head(table_handler::table_exists),
)
.route("/tables/rename", post(table_handler::rename_table))
- .route(
- "/namespaces/:namespace/tables/:table/metrics",
- post(table_handler::report_metrics),
- )
.with_state(repo);
return router;
diff --git a/src/tests/mod.rs b/src/tests/mod.rs
deleted file mode 100644
index 8b13789..0000000
--- a/src/tests/mod.rs
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/src/tests/namespace_test.rs b/src/tests/namespace_test.rs
deleted file mode 100644
index 3e6e447..0000000
--- a/src/tests/namespace_test.rs
+++ /dev/null
@@ -1,43 +0,0 @@
-use axum::{http::StatusCode, response::Json};
-use axum::extract::Json as JsonExtractor;
-use axum::handler::post;
-use axum::routing::Router;
-use serde_json::json;
-use axum::test::extract;
-
-use crate::{create_namespace, list_namespaces, Namespace};
-
-#[tokio::test]
-async fn test_list_namespaces() {
- // Create a test router with the list_namespaces route
- let app = Router::new().route("/namespaces", post(list_namespaces));
-
- // Perform a request to the route
- let response = axum::test::call(&app, axum::test::request::Request::post("/namespaces").body(()).unwrap()).await;
-
- // Ensure that the response status code is OK
- assert_eq!(response.status(), StatusCode::OK);
-
- // Ensure that the response body contains the expected JSON data
- let body = extract::>>(response.into_body()).await.unwrap();
- assert_eq!(body.0, vec!["accounting", "tax", "paid"]);
-}
-
-#[tokio::test]
-async fn test_create_namespace() {
- // Create a test router with the create_namespace route
- let app = Router::new().route("/namespaces", post(create_namespace));
-
- // Create a JSON payload representing a new namespace
- let payload = json!({});
-
- // Perform a request to the route with the JSON payload
- let response = axum::test::call(&app, axum::test::request::Request::post("/namespaces").body(payload.to_string()).unwrap()).await;
-
- // Ensure that the response status code is OK
- assert_eq!(response.status(), StatusCode::OK);
-
- // Ensure that the response body contains the expected JSON data
- let body = extract::>(response.into_body()).await.unwrap();
- assert_eq!(body, Json(Namespace {}));
-}