From 60791d0ebe67aaacec1c87619e24d381842eca72 Mon Sep 17 00:00:00 2001 From: Sean Chao Date: Sun, 19 Sep 2021 13:35:11 +0000 Subject: [PATCH 1/3] feat: metrics keys use policy names refactor: remove trival Task enum, add a CacheSizeType --- .gitignore | 2 + Makefile | 7 ++- docs/README.md | 4 +- redis.conf | 1 - src/cache.rs | 97 ++++++++++++++++++++++++++-------------- src/main.rs | 2 +- src/task.rs | 118 +++++++++++++++++++------------------------------ 7 files changed, 121 insertions(+), 110 deletions(-) delete mode 100644 redis.conf diff --git a/.gitignore b/.gitignore index 073664d..2ca7145 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ /target +redis.conf + .vscode diff --git a/Makefile b/Makefile index 2e61c83..fd4430b 100644 --- a/Makefile +++ b/Makefile @@ -26,13 +26,16 @@ dev_deps: cargo install cargo-watch yarn global add zx -redis: +redis_conf: + echo "notify-keyspace-events Kx" > redis.conf + +redis: redis_conf docker run $(REDIS_OPTS) --name redis_dev -d --network host --rm redis /conf/redis.conf redis_stop: docker stop redis_dev -redis_test: +redis_test: redis_conf docker stop redis_test || return 0 docker run $(REDIS_OPTS) --name redis_test -d -p 3001:6379 --rm redis /conf/redis.conf diff --git a/docs/README.md b/docs/README.md index a6bf2f6..58272ae 100644 --- a/docs/README.md +++ b/docs/README.md @@ -32,6 +32,8 @@ The type of `size` in the config file is string. E.g: `1000` (B), `42 KB`, `2.33 `log_level` specifies the log level. Allowed values are `trace`, `debug`, `info`, `warn`, `error`. +`hot_reload` specifies whether to enable configuration hot reloading. Default `false`. + #### Redis `url` is the Redis connection string. @@ -55,7 +57,7 @@ Rules are an array of customized proxy rules. Policies are an array of customized cache policies. -- `name`: the unique name of the policy +- `name`: the **unique** name of the policy. Used in database key spaces and metrics to identify the policy in a user-friendly way. - `type`: the type of the policy, see [Cache Policies](#cache-policies) for details - `path`: the path of cached data - `metadata_db`: the metadata database to use: `redis` or `sled`. See [Cache Policies](#cache-policies) for details diff --git a/redis.conf b/redis.conf deleted file mode 100644 index 42128f9..0000000 --- a/redis.conf +++ /dev/null @@ -1 +0,0 @@ -notify-keyspace-events Kx diff --git a/src/cache.rs b/src/cache.rs index 485cf1e..98aa2e3 100644 --- a/src/cache.rs +++ b/src/cache.rs @@ -24,6 +24,10 @@ use std::sync::Arc; use std::thread::JoinHandle; use std::vec::Vec; +/// Datatype of cache size. +/// Note: It is persistent in some database, so changes may not be backward compatible. +type CacheSizeType = u64; + pub enum CacheHitMiss { Hit, Miss, @@ -110,8 +114,14 @@ pub trait LruMetadataStore: Sync + Send { fn get_lru_entry(&self, key: &str) -> CacheHitMiss; fn set_lru_entry(&self, key: &str, value: &CacheData); /// Run eviction policy if needed, reserve at least `size` for new cache entry. - fn evict(&self, new_size: u64, new_key: &str, size_limit: u64, storage: &Storage); - fn get_total_size(&self) -> u64; + fn evict( + &self, + new_size: CacheSizeType, + new_key: &str, + size_limit: CacheSizeType, + storage: &Storage, + ); + fn get_total_size(&self) -> CacheSizeType; } /// `TtlMetadataStore` defines required behavior for a TTL cache @@ -127,20 +137,20 @@ pub trait TtlMetadataStore: Sync + Send { /// Wrapper of an LRU cache object pub struct LruCache { - pub size_limit: u64, + pub size_limit: CacheSizeType, metadata_db: Arc, storage: Storage, } impl LruCache { pub fn new( - size_limit: u64, + size_limit: CacheSizeType, metadata_db: Arc, storage: Storage, - metric_id: String, + metric_id: &str, ) -> Self { register_histogram!( - metric::get_cache_size_metrics_key(&metric_id), + metric::get_cache_size_metrics_key(metric_id), metrics::Unit::Bytes, ); Self { @@ -154,7 +164,7 @@ impl LruCache { #[async_trait] impl Cache for LruCache { async fn put(&self, key: &str, mut entry: CacheData) { - let file_size = entry.len() as u64; + let file_size = entry.len() as CacheSizeType; if file_size > self.size_limit { info!( @@ -247,8 +257,11 @@ pub struct RedisMetadataDb { } impl RedisMetadataDb { - pub fn new(redis_client: redis::Client, id: String) -> Self { - Self { redis_client, id } + pub fn new(redis_client: redis::Client, id: &str) -> Self { + Self { + redis_client, + id: id.into(), + } } pub fn from_prefixed_key(&self, cache_key: &str) -> String { @@ -312,7 +325,7 @@ impl LruMetadataStore for RedisMetadataDb { fn set_lru_entry(&self, key: &str, value: &CacheData) { let redis_key = &self.to_prefixed_key(key); let mut con = models::get_sync_con(&self.redis_client).unwrap(); - let entry = &CacheEntry::new(redis_key, value.len() as u64); + let entry = &CacheEntry::new(redis_key, value.len() as CacheSizeType); let _redis_resp_str = models::set_lru_cache_entry( &mut con, redis_key, @@ -323,7 +336,13 @@ impl LruMetadataStore for RedisMetadataDb { trace!("CACHE SET {} -> {:?}", &redis_key, value); } - fn evict(&self, new_size: u64, new_key: &str, size_limit: u64, storage: &Storage) { + fn evict( + &self, + new_size: CacheSizeType, + new_key: &str, + size_limit: CacheSizeType, + storage: &Storage, + ) { let redis_key = &self.to_prefixed_key(new_key); // eviction policy let file_size = new_size; @@ -338,13 +357,13 @@ impl LruMetadataStore for RedisMetadataDb { // LRU eviction trace!( "current {} + new {} > limit {}", - con.get::<&str, Option>(&self.total_size_key()) + con.get::<&str, Option>(&self.total_size_key()) .unwrap() .unwrap_or(0), file_size, size_limit ); - let pkg_to_remove: Vec<(String, u64)> = + let pkg_to_remove: Vec<(String, CacheSizeType)> = con.zpopmin(&self.entries_zlist_key(), 1).unwrap(); trace!("pkg_to_remove: {:?}", pkg_to_remove); if pkg_to_remove.is_empty() { @@ -366,10 +385,13 @@ impl LruMetadataStore for RedisMetadataDb { warn!("failed to remove file: {:?}", e); } }; - let pkg_size: Option = con.hget(&f, "size").unwrap(); + let pkg_size: Option = con.hget(&f, "size").unwrap(); let _del_cnt = con.del::<&str, isize>(&f); cur_cache_size = con - .decr::<&str, u64, u64>(&self.total_size_key(), pkg_size.unwrap_or(0)) + .decr::<&str, CacheSizeType, CacheSizeType>( + &self.total_size_key(), + pkg_size.unwrap_or(0), + ) .unwrap(); trace!("total_size -= {:?} -> {}", pkg_size, cur_cache_size); } @@ -379,10 +401,13 @@ impl LruMetadataStore for RedisMetadataDb { ); } - fn get_total_size(&self) -> u64 { + fn get_total_size(&self) -> CacheSizeType { let key = self.total_size_key(); let mut con = self.redis_client.get_connection().unwrap(); - let size = con.get::<&str, Option>(&key).unwrap().unwrap_or(0); + let size = con + .get::<&str, Option>(&key) + .unwrap() + .unwrap_or(0); histogram!(metric::get_cache_size_metrics_key(&self.id), size as f64); size } @@ -636,13 +661,13 @@ impl LruMetadataStore for SledMetadataDb { metadata_tree, atime_tree, key, - value.len() as u64, + value.len() as CacheSizeType, atime, ); let current_size = models::sled_lru_get_current_size(db, &self.cf) .unwrap() .unwrap() - + value.len() as u64; + + value.len() as CacheSizeType; models::sled_lru_set_current_size(db, &self.cf, current_size); histogram!( metric::get_cache_size_metrics_key(&self.cf), @@ -660,7 +685,13 @@ impl LruMetadataStore for SledMetadataDb { } /// Run eviction policy if needed, reserve at least `size` for new cache entry. - fn evict(&self, evict_size: u64, _new_key: &str, size_limit: u64, storage: &Storage) { + fn evict( + &self, + evict_size: CacheSizeType, + _new_key: &str, + size_limit: CacheSizeType, + storage: &Storage, + ) { let db = &self.db; let prefix = &self.cf; let default_tree: &sled::Tree = db; @@ -719,7 +750,7 @@ impl LruMetadataStore for SledMetadataDb { } } - fn get_total_size(&self) -> u64 { + fn get_total_size(&self) -> CacheSizeType { self.db .transaction::<_, _, ()>(|tx_db| { Ok(models::sled_lru_get_current_size(tx_db, &self.cf) @@ -814,7 +845,7 @@ pub struct CacheEntry { #[derive(Debug)] pub struct LruCacheMetadata { - pub size: u64, + pub size: CacheSizeType, pub atime: i64, // last access timestamp } @@ -884,7 +915,7 @@ mod tests { } impl LruCache { - fn get_total_size(&self) -> u64 { + fn get_total_size(&self) -> CacheSizeType { self.metadata_db.get_total_size() } } @@ -933,11 +964,11 @@ mod tests { ($dir: expr, $size: expr, $redis_client: expr, $id: expr) => { LruCache::new( $size, - Arc::new(RedisMetadataDb::new($redis_client, $id.to_string())), + Arc::new(RedisMetadataDb::new($redis_client, $id)), Storage::FileSystem { root_dir: $dir.to_string(), }, - $id.to_string(), + $id, ) }; } @@ -950,7 +981,7 @@ mod tests { Storage::FileSystem { root_dir: $dir.to_string(), }, - $id.to_string(), + $id, ) }; } @@ -959,7 +990,7 @@ mod tests { ($dir: expr, $ttl: expr, $redis_client:expr, $id: expr) => { TtlCache::new( $ttl, - Arc::new(RedisMetadataDb::new($redis_client, $id.to_string())), + Arc::new(RedisMetadataDb::new($redis_client, $id)), Storage::FileSystem { root_dir: $dir.to_string(), }, @@ -1011,8 +1042,8 @@ mod tests { let cached_data = vec![42]; let len = cached_data.len(); cache_put!(lru_cache, "answer", cached_data.clone().into()); - let total_size_expected = len as u64; - let total_size_actual: u64 = lru_cache.get_total_size(); + let total_size_expected = len as CacheSizeType; + let total_size_actual: CacheSizeType = lru_cache.get_total_size(); let cached_data_actual = get_file_all(&format!("{}/{}", TEST_CACHE_DIR, key)); // metadata: size is 1, file content is the same assert_eq!(total_size_actual, total_size_expected); @@ -1028,8 +1059,8 @@ mod tests { let cached_data = vec![42]; let len = cached_data.len(); cache_put!(lru_cache, "answer", cached_data.clone().into()); - let total_size_expected = len as u64; - let total_size_actual: u64 = lru_cache.get_total_size(); + let total_size_expected = len as CacheSizeType; + let total_size_actual: CacheSizeType = lru_cache.get_total_size(); let cached_data_actual = get_file_all(&format!("{}/{}", TEST_CACHE_DIR, key)); // metadata: size is 1, file content is the same assert_eq!(total_size_actual, total_size_expected); @@ -1038,11 +1069,11 @@ mod tests { async fn lru_cache_size_constaint_tester(lru_cache: LruCache, cached_path: &str) { cache_put!(lru_cache, "tsu_ki", vec![0; 5].into()); - let total_size_actual: u64 = lru_cache.get_total_size(); + let total_size_actual: CacheSizeType = lru_cache.get_total_size(); assert_eq!(total_size_actual, 5); thread::sleep(time::Duration::from_secs(1)); cache_put!(lru_cache, "kirei", vec![0; 11].into()); - let total_size_actual: u64 = lru_cache.get_total_size(); + let total_size_actual: CacheSizeType = lru_cache.get_total_size(); assert_eq!(total_size_actual, 16); assert_eq!( get_file_all(&format!("{}/{}", cached_path, "tsu_ki")), diff --git a/src/main.rs b/src/main.rs index 037ac10..dfab3fa 100644 --- a/src/main.rs +++ b/src/main.rs @@ -221,7 +221,7 @@ mod handlers { trace!("matched by rule #{}: {}", idx, &rule.path); increment_counter!(metric::COUNTER_REQ, "rule" => rule_label(rule)); let replaced = re.replace_all(&path, &upstream); - let task = Task::Others { + let task = Task { rule_id: idx, url: String::from(replaced), }; diff --git a/src/task.rs b/src/task.rs index 7ccbb90..f67a49c 100644 --- a/src/task.rs +++ b/src/task.rs @@ -21,8 +21,9 @@ use tokio::sync::RwLock; use warp::http::Response; #[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum Task { - Others { rule_id: RuleId, url: String }, +pub struct Task { + pub rule_id: RuleId, + pub url: String, } pub enum TaskResponse { @@ -67,13 +68,11 @@ impl warp::Reply for TaskResponse { impl Task { /// create a unique key for the current task pub fn to_key(&self) -> String { - match &self { - Task::Others { url, .. } => url - .replace("http://", "http/") - .replace("https://", "https/") - .trim_end_matches('/') - .to_string(), - } + self.url + .replace("http://", "http/") + .replace("https://", "https/") + .trim_end_matches('/') + .to_string() } } @@ -113,13 +112,10 @@ impl TaskManager { // try get from cache let mut cache_result = None; let key = task.to_key(); - match &task { - Task::Others { .. } => { - if let Some(bytes) = self.get(task, &key).await { - cache_result = Some(bytes); - } - } - }; + + if let Some(bytes) = self.get(task, &key).await { + cache_result = Some(bytes); + } if let Some(data) = cache_result { info!("[Request] [HIT] {:?}", &task); return (Ok(data.into()), CacheHitMiss::Hit); @@ -154,22 +150,19 @@ impl TaskManager { } // dispatch async cache task let _ = self.spawn_task(task.clone()).await; - match &task { - Task::Others { rule_id, .. } => { - if let Some(rewrite_rules) = self.rewrite_map.get(rule_id) { - let text = res.text().await.unwrap(); - let content = Self::rewrite_upstream(text, rewrite_rules); - (Ok(content.into()), CacheHitMiss::Miss) - } else { - ( - Ok(TaskResponse::StreamResponse(Box::pin( - res.bytes_stream() - .map(move |x| x.map_err(Error::RequestError)), - ))), - CacheHitMiss::Miss, - ) - } - } + let rule_id = task.rule_id; + if let Some(rewrite_rules) = self.rewrite_map.get(&rule_id) { + let text = res.text().await.unwrap(); + let content = Self::rewrite_upstream(text, rewrite_rules); + (Ok(content.into()), CacheHitMiss::Miss) + } else { + ( + Ok(TaskResponse::StreamResponse(Box::pin( + res.bytes_stream() + .map(move |x| x.map_err(Error::RequestError)), + ))), + CacheHitMiss::Miss, + ) } } Err(e) => { @@ -235,7 +228,7 @@ impl TaskManager { sled_metadata_path: &str, ) -> Result> { let policy_ident = policy_name; - for (idx, p) in policies.iter().enumerate() { + for p in policies { if p.name == policy_ident { let policy_type = p.typ; let metadata_db = p.metadata_db; @@ -243,49 +236,41 @@ impl TaskManager { (PolicyType::Lru, MetadataDb::Redis) => { return Ok(Arc::new(LruCache::new( p.size.as_ref().map_or(0, |x| bytefmt::parse(x).unwrap()), - Arc::new(RedisMetadataDb::new( - redis_client.unwrap(), - format!("lru_rule_{}", idx), - )), + Arc::new(RedisMetadataDb::new(redis_client.unwrap(), policy_ident)), Storage::FileSystem { root_dir: p.path.clone().unwrap(), }, - format!("lru_rule_{}", idx), + policy_ident, ))); } (PolicyType::Lru, MetadataDb::Sled) => { - let id = format!("lru_rule_{}", idx); return Ok(Arc::new(LruCache::new( p.size.as_ref().map_or(0, |x| bytefmt::parse(x).unwrap()), Arc::new(SledMetadataDb::new_lru( - &format!("{}/{}", sled_metadata_path, &id), - &id, + &format!("{}/{}", sled_metadata_path, policy_ident), + policy_ident, )), Storage::FileSystem { root_dir: p.path.clone().unwrap(), }, - format!("lru_rule_{}", idx), + policy_ident, ))); } (PolicyType::Ttl, MetadataDb::Redis) => { return Ok(Arc::new(TtlCache::new( p.timeout.unwrap_or(0), - Arc::new(RedisMetadataDb::new( - redis_client.unwrap(), - format!("ttl_rule_{}", idx), - )), + Arc::new(RedisMetadataDb::new(redis_client.unwrap(), policy_ident)), Storage::FileSystem { root_dir: p.path.clone().unwrap(), }, ))); } (PolicyType::Ttl, MetadataDb::Sled) => { - let id = format!("ttl_rule_{}", policy_ident); return Ok(Arc::new(TtlCache::new( p.timeout.unwrap_or(0), Arc::new(SledMetadataDb::new_ttl( - &format!("{}/{}", sled_metadata_path, &id), - &id, + &format!("{}/{}", sled_metadata_path, &policy_ident), + policy_ident, p.clean_interval.unwrap(), )), Storage::FileSystem { @@ -330,14 +315,8 @@ impl TaskManager { self.taskset_add(task.clone()).await; let task_set_len = Self::taskset_len(self.task_set.clone()).await; info!("[TASK] [len={}] + {:?}", task_set_len, task); - let c; - let rewrites; - match &task { - Task::Others { rule_id, .. } => { - c = self.get_cache_for_cache_rule(*rule_id).unwrap(); - rewrites = self.rewrite_map.get(rule_id).cloned() - } - }; + let c = self.get_cache_for_cache_rule(task.rule_id).unwrap(); + let rewrites = self.rewrite_map.get(&task.rule_id).cloned(); let task_clone = task.clone(); let upstream_url = self.resolve_task_upstream(&task_clone); let task_list_ptr = self.task_set.clone(); @@ -394,15 +373,14 @@ impl TaskManager { } /// get task result from cache - pub async fn get(&self, task_type: &Task, key: &str) -> Option { - match &task_type { - Task::Others { rule_id, .. } => match self.get_cache_for_cache_rule(*rule_id) { - Some(cache) => cache.get(key).await, - None => { - error!("Failed to get cache for rule #{} from cache map", rule_id); - None - } - }, + pub async fn get(&self, task: &Task, key: &str) -> Option { + let rule_id = task.rule_id; + match self.get_cache_for_cache_rule(rule_id) { + Some(cache) => cache.get(key).await, + None => { + error!("Failed to get cache for rule #{} from cache map", rule_id); + None + } } } @@ -415,9 +393,7 @@ impl TaskManager { } pub fn resolve_task_upstream(&self, task_type: &Task) -> String { - match &task_type { - Task::Others { url, .. } => url.clone(), - } + task_type.url.clone() } pub fn get_cache_for_cache_rule(&self, rule_id: RuleId) -> Option> { @@ -425,9 +401,7 @@ impl TaskManager { } pub fn get_task_size_limit(&self, task: &Task) -> usize { - match task { - Task::Others { rule_id, .. } => self.rule_map.get(rule_id).unwrap().1, - } + self.rule_map.get(&task.rule_id).unwrap().1 } } From c2a9c2aaaf56d8f938729c616572efbe9440ffd7 Mon Sep 17 00:00:00 2001 From: Sean Chao Date: Sun, 19 Sep 2021 13:45:18 +0000 Subject: [PATCH 2/3] chore(ci): fix and simplify test --- Makefile | 4 ++-- scripts/stress.mjs | 29 ----------------------------- 2 files changed, 2 insertions(+), 31 deletions(-) delete mode 100644 scripts/stress.mjs diff --git a/Makefile b/Makefile index fd4430b..0d9da7d 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ build: run: cargo run -test: +test: redis_conf docker stop redis_test || return 0 docker run $(REDIS_OPTS) --name redis_test -d -p 3001:6379 --rm redis /conf/redis.conf rm -r cache || return 0 @@ -27,6 +27,7 @@ dev_deps: yarn global add zx redis_conf: + rm -rf redis.conf || return 0 echo "notify-keyspace-events Kx" > redis.conf redis: redis_conf @@ -62,7 +63,6 @@ scenario_test: zx ./scripts/pip_test.mjs zx ./scripts/conda_test.mjs zx ./scripts/conda_cloud_test.mjs - zx ./scripts/stress.mjs metrics: docker run -d --rm --network host -v $$PWD/prom.yml:/srv/prom.yml:ro --name metrics prom/prometheus --config.file /srv/prom.yml diff --git a/scripts/stress.mjs b/scripts/stress.mjs deleted file mode 100644 index a77bc73..0000000 --- a/scripts/stress.mjs +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/zx -$.quote = v => v - -import { pip_install } from './lib.mjs' - -const config = { - 'mirror': 'http://localhost:9000/pypi/simple', - ...argv -} - -let exitCode = 0; -try { - // begin tests - const base = ["torch", "tensorflow"] - let targets = [] - for (let i = 0; i < 3; i++) { - targets = targets.concat(base) - } - console.log(targets) - await Promise.all(targets.map(e => $`${pip_install(e, config.mirror)}`)); - // end tests -} catch (p) { - console.log(p); - exitCode = p.exitCode; -} finally { - console.log('Tests finished, cleaning up...') -} - -await $`exit ${exitCode}` From 830dbcf4ddefe622f1e107466a8d57d5d37bb351 Mon Sep 17 00:00:00 2001 From: Sean Chao Date: Sun, 19 Sep 2021 14:34:43 +0000 Subject: [PATCH 3/3] chore(ci): add dockerhub and crates.io release actions --- .github/workflows/release.yml | 50 +++++++++++++++++++++++++++++++---- Cargo.lock | 2 +- Cargo.toml | 2 +- Dockerfile | 4 +-- src/main.rs | 3 ++- 5 files changed, 51 insertions(+), 10 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 94829ee..c89204c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -69,16 +69,15 @@ jobs: kill %1 make redis_stop - release: + github-release: name: GitHub Releases runs-on: ubuntu-latest - needs: - - style - - test + needs: [style, test] steps: - name: Checkout uses: actions/checkout@v1 - - uses: actions-rs/cargo@v1 + - name: Cargo Build + uses: actions-rs/cargo@v1 with: command: build args: --release @@ -87,3 +86,44 @@ jobs: allowUpdates: true artifacts: "target/release/mirror-cache" token: ${{ secrets.GITHUB_TOKEN }} + + docker-release: + name: Push Docker image to Docker Hub + runs-on: ubuntu-latest + needs: [style, test] + steps: + - name: Check out the repo + uses: actions/checkout@v2 + - name: Log in to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v3 + with: + images: seanchao/mirror-cache + + - name: Build and push Docker image + uses: docker/build-push-action@v2 + with: + context: . + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + + crates-release: + name: Publish on crates.io + runs-on: ubuntu-latest + needs: [style, test] + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + override: true + - uses: katyo/publish-crates@v1 + with: + registry-token: ${{ secrets.CARGO_REGISTRY_TOKEN }} diff --git a/Cargo.lock b/Cargo.lock index 234c0b5..6c6069f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1046,7 +1046,7 @@ dependencies = [ [[package]] name = "mirror-cache" -version = "0.0.1" +version = "0.0.3" dependencies = [ "async-trait", "bytefmt", diff --git a/Cargo.toml b/Cargo.toml index 48d036f..83e345e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "mirror-cache" -version = "0.0.1" +version = "0.0.3" authors = ["SeanChao "] edition = "2018" diff --git a/Dockerfile b/Dockerfile index 8713287..992fb0b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,7 +27,7 @@ RUN apt-get install -y openssl ca-certificates RUN update-ca-certificates # Copy from the previous build -COPY --from=build /mirror-cache/target/release/mirror-cache /app/mirror-cache +COPY --from=build /mirror-cache/target/release/mirror-cache /bin/mirror-cache WORKDIR /app/ # Run the binary -CMD ["/app/mirror-cache"] +CMD ["mirror-cache"] diff --git a/src/main.rs b/src/main.rs index dfab3fa..9836b44 100644 --- a/src/main.rs +++ b/src/main.rs @@ -8,7 +8,7 @@ mod task; mod util; use cache::CacheHitMiss; -use clap::{App, Arg}; +use clap::{crate_version, App, Arg}; use metrics::{increment_counter, register_counter}; use metrics_exporter_prometheus::PrometheusBuilder; use metrics_util::MetricKindMask; @@ -47,6 +47,7 @@ lazy_static::lazy_static! { #[tokio::main] async fn main() { let matches = App::new("mirror-cache") + .version(crate_version!()) .arg( Arg::with_name("config") .short("c")