From ec17b4ff77117c923c4f3c0f4a9b94e35684871e Mon Sep 17 00:00:00 2001 From: Igor Pashev Date: Mon, 26 Oct 2020 18:42:47 +0200 Subject: [PATCH] Implement new automatic snapshot feature Signed-off-by: Igor Pashev --- Cargo.lock | 1 + iml-api/src/graphql/mod.rs | 472 +----------- iml-api/src/graphql/snapshot.rs | 397 ++++++++++ iml-api/src/main.rs | 1 - iml-api/src/timer.rs | 105 --- iml-graphql-queries/src/snapshot.rs | 412 +++++----- iml-gui/crate/src/lib.rs | 7 +- .../crate/src/page/snapshot/add_interval.rs | 316 -------- .../crate/src/page/snapshot/create_policy.rs | 471 ++++++++++++ .../src/page/snapshot/create_retention.rs | 330 -------- .../crate/src/page/snapshot/list_interval.rs | 211 ------ .../{list_retention.rs => list_policy.rs} | 64 +- iml-gui/crate/src/page/snapshot/mod.rs | 139 +--- iml-gui/crate/src/page/snapshot/take.rs | 2 +- iml-gui/crate/src/test_utils/fixture.json | 3 +- iml-manager-cli/src/display_utils.rs | 61 +- iml-manager-cli/src/snapshot.rs | 184 ++--- iml-services/iml-snapshot/Cargo.toml | 5 +- .../iml-snapshot/src/client_monitor.rs | 2 +- iml-services/iml-snapshot/src/lib.rs | 2 +- iml-services/iml-snapshot/src/main.rs | 22 +- iml-services/iml-snapshot/src/policy.rs | 709 ++++++++++++++++++ iml-services/iml-snapshot/src/retention.rs | 213 ------ iml-warp-drive/src/cache.rs | 45 +- iml-warp-drive/src/db_record.rs | 15 +- iml-wire-types/src/graphql_duration.rs | 2 +- iml-wire-types/src/snapshot.rs | 94 +-- iml-wire-types/src/warp_drive.rs | 60 +- migrations/20201214000000_snapshot_v2.sql | 71 ++ sqlx-data.json | 517 ++++--------- 30 files changed, 2248 insertions(+), 2685 deletions(-) create mode 100644 iml-api/src/graphql/snapshot.rs delete mode 100644 iml-api/src/timer.rs delete mode 100644 iml-gui/crate/src/page/snapshot/add_interval.rs create mode 100644 iml-gui/crate/src/page/snapshot/create_policy.rs delete mode 100644 iml-gui/crate/src/page/snapshot/create_retention.rs delete mode 100644 iml-gui/crate/src/page/snapshot/list_interval.rs rename iml-gui/crate/src/page/snapshot/{list_retention.rs => list_policy.rs} (65%) create mode 100644 iml-services/iml-snapshot/src/policy.rs delete mode 100644 iml-services/iml-snapshot/src/retention.rs create mode 100644 migrations/20201214000000_snapshot_v2.sql diff --git a/Cargo.lock b/Cargo.lock index de9f5b5277..1d31c9cc8f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2089,6 +2089,7 @@ dependencies = [ name = "iml-snapshot" version = "0.4.0" dependencies = [ + "chrono", "futures", "futures-util", "iml-command-utils", diff --git a/iml-api/src/graphql/mod.rs b/iml-api/src/graphql/mod.rs index b2dfe3959a..ec0ad1196d 100644 --- a/iml-api/src/graphql/mod.rs +++ b/iml-api/src/graphql/mod.rs @@ -3,29 +3,22 @@ // license that can be found in the LICENSE file. mod filesystem; +mod snapshot; mod stratagem; mod task; -use crate::{ - command::get_command, - error::ImlApiError, - timer::{configure_snapshot_timer, remove_snapshot_timer}, -}; +use crate::error::ImlApiError; use chrono::{DateTime, Utc}; use futures::{ future::{self, join_all}, TryFutureExt, TryStreamExt, }; -use iml_postgres::{ - active_mgs_host_fqdn, fqdn_by_host_id, sqlx, sqlx::postgres::types::PgInterval, PgPool, -}; +use iml_postgres::{fqdn_by_host_id, sqlx, PgPool}; use iml_rabbit::{ImlRabbitError, Pool}; use iml_wire_types::{ db::{LogMessageRecord, LustreFid, ServerProfileRecord, TargetRecord}, graphql::{ServerProfile, ServerProfileInput}, - graphql_duration::GraphQLDuration, logs::{LogResponse, Meta}, - snapshot::{ReserveUnit, Snapshot, SnapshotInterval, SnapshotRetention}, task::Task, Command, EndpointName, FsType, Job, LogMessage, LogSeverity, MessageClass, SortDir, }; @@ -36,7 +29,7 @@ use juniper::{ }; use std::{ collections::{HashMap, HashSet}, - convert::{Infallible, TryFrom as _, TryInto}, + convert::{Infallible, TryInto}, ops::Deref, str::FromStr, sync::Arc, @@ -126,6 +119,9 @@ impl QueryRoot { fn task(&self) -> task::TaskQuery { task::TaskQuery } + fn snapshot(&self) -> snapshot::SnapshotQuery { + snapshot::SnapshotQuery + } /// Given a host id, try to find the matching corosync node name #[graphql(arguments(host_id(description = "The id to search on")))] async fn corosync_node_name_by_host( @@ -295,47 +291,6 @@ impl QueryRoot { Ok(xs) } - #[graphql(arguments( - limit(description = "optional paging limit, defaults to all rows"), - offset(description = "Offset into items, defaults to 0"), - dir(description = "Sort direction, defaults to ASC"), - fsname(description = "Filesystem the snapshot was taken from"), - name(description = "Name of the snapshot"), - ))] - /// Fetch the list of snapshots - async fn snapshots( - context: &Context, - limit: Option, - offset: Option, - dir: Option, - fsname: String, - name: Option, - ) -> juniper::FieldResult> { - let dir = dir.unwrap_or_default(); - - let _ = fs_id_by_name(&context.pg_pool, &fsname).await?; - - let snapshots = sqlx::query_as!( - Snapshot, - r#" - SELECT filesystem_name, snapshot_name, create_time, modify_time, snapshot_fsname, mounted, comment FROM snapshot s - WHERE filesystem_name = $4 AND ($5::text IS NULL OR snapshot_name = $5) - ORDER BY - CASE WHEN $3 = 'ASC' THEN s.create_time END ASC, - CASE WHEN $3 = 'DESC' THEN s.create_time END DESC - OFFSET $1 LIMIT $2"#, - offset.unwrap_or(0) as i64, - limit.map(|x| x as i64), - dir.deref(), - fsname, - name, - ) - .fetch_all(&context.pg_pool) - .await?; - - Ok(snapshots) - } - /// Fetch the list of commands #[graphql(arguments( limit(description = "optional paging limit, defaults to all rows"), @@ -442,47 +397,6 @@ impl QueryRoot { Ok(commands) } - /// List all snapshot intervals - async fn snapshot_intervals(context: &Context) -> juniper::FieldResult> { - let xs: Vec = sqlx::query!("SELECT * FROM snapshot_interval") - .fetch(&context.pg_pool) - .map_ok(|x| SnapshotInterval { - id: x.id, - filesystem_name: x.filesystem_name, - use_barrier: x.use_barrier, - interval: x.interval.into(), - last_run: x.last_run, - }) - .try_collect() - .await?; - - Ok(xs) - } - /// List all snapshot retention policies. Snapshots will automatically be deleted (starting with the oldest) - /// when free space falls below the defined reserve value and its associated unit. - async fn snapshot_retention_policies( - context: &Context, - ) -> juniper::FieldResult> { - let xs: Vec = sqlx::query_as!( - SnapshotRetention, - r#" - SELECT - id, - filesystem_name, - reserve_value, - reserve_unit as "reserve_unit:ReserveUnit", - last_run, - keep_num - FROM snapshot_retention - "# - ) - .fetch(&context.pg_pool) - .try_collect() - .await?; - - Ok(xs) - } - #[graphql(arguments( limit(description = "optional paging limit, defaults to 100",), offset(description = "Offset into items, defaults to 0"), @@ -644,28 +558,6 @@ impl QueryRoot { } } -struct SnapshotIntervalName { - id: i32, - fs_name: String, - timestamp: DateTime, -} - -fn parse_snapshot_name(name: &str) -> Option { - match name.trim().splitn(3, '-').collect::>().as_slice() { - [id, fs, ts] => { - let ts = ts.parse::>().ok()?; - let id = id.parse::().ok()?; - - Some(SnapshotIntervalName { - id, - fs_name: fs.to_string(), - timestamp: ts, - }) - } - _ => None, - } -} - pub(crate) struct MutationRoot; #[juniper::graphql_object(Context = Context)] @@ -679,328 +571,8 @@ impl MutationRoot { fn task(&self) -> task::TaskMutation { task::TaskMutation } - #[graphql(arguments( - fsname(description = "Filesystem to snapshot"), - name(description = "Name of the snapshot"), - comment(description = "A description for the purpose of the snapshot"), - use_barrier( - description = "Set write barrier before creating snapshot. The default value is `false`" - ) - ))] - /// Creates a snapshot of an existing Lustre filesystem. Returns a `Command` to track progress. - /// For the `Command` to succeed, the filesystem being snapshoted must be available. - async fn create_snapshot( - context: &Context, - fsname: String, - name: String, - comment: Option, - use_barrier: Option, - ) -> juniper::FieldResult { - let _ = fs_id_by_name(&context.pg_pool, &fsname).await?; - let name = name.trim(); - validate_snapshot_name(name)?; - - let snapshot_interval_name = parse_snapshot_name(name); - if let Some(data) = snapshot_interval_name { - sqlx::query!( - r#" - UPDATE snapshot_interval - SET last_run=$1 - WHERE id=$2 AND filesystem_name=$3 - "#, - data.timestamp, - data.id, - data.fs_name, - ) - .execute(&context.pg_pool) - .await?; - } - - let active_mgs_host_fqdn = active_mgs_host_fqdn(&fsname, &context.pg_pool) - .await? - .ok_or_else(|| { - FieldError::new("Filesystem not found or MGS is not mounted", Value::null()) - })?; - - let kwargs: HashMap = vec![("message".into(), "Creating snapshot".into())] - .into_iter() - .collect(); - - let jobs = serde_json::json!([{ - "class_name": "CreateSnapshotJob", - "args": { - "fsname": fsname, - "name": name, - "comment": comment, - "fqdn": active_mgs_host_fqdn, - "use_barrier": use_barrier.unwrap_or(false), - } - }]); - let command_id: i32 = iml_job_scheduler_rpc::call( - &context.rabbit_pool.get().await?, - "run_jobs", - vec![jobs], - Some(kwargs), - ) - .map_err(ImlApiError::ImlJobSchedulerRpcError) - .await?; - - let command = get_command(&context.pg_pool, command_id).await?; - - Ok(command) - } - #[graphql(arguments( - fsname(description = "Filesystem snapshot was taken from"), - name(description = "Name of the snapshot"), - force(description = "Destroy the snapshot by force"), - ))] - /// Destroys an existing snapshot of an existing Lustre filesystem. Returns a `Command` to track progress. - /// For the `Command` to succeed, the filesystem must be available. - async fn destroy_snapshot( - context: &Context, - fsname: String, - name: String, - force: bool, - ) -> juniper::FieldResult { - let _ = fs_id_by_name(&context.pg_pool, &fsname).await?; - let name = name.trim(); - validate_snapshot_name(name)?; - - let active_mgs_host_fqdn = active_mgs_host_fqdn(&fsname, &context.pg_pool) - .await? - .ok_or_else(|| { - FieldError::new("Filesystem not found or MGS is not mounted", Value::null()) - })?; - - let kwargs: HashMap = - vec![("message".into(), "Destroying snapshot".into())] - .into_iter() - .collect(); - - let jobs = serde_json::json!([{ - "class_name": "DestroySnapshotJob", - "args": { - "fsname": fsname, - "name": name, - "force": force, - "fqdn": active_mgs_host_fqdn, - } - }]); - let command_id: i32 = iml_job_scheduler_rpc::call( - &context.rabbit_pool.get().await?, - "run_jobs", - vec![jobs], - Some(kwargs), - ) - .map_err(ImlApiError::ImlJobSchedulerRpcError) - .await?; - - let command = get_command(&context.pg_pool, command_id).await?; - - Ok(command) - } - #[graphql(arguments( - fsname(description = "Filesystem snapshot was taken from"), - name(description = "Name of the snapshot"), - ))] - /// Mounts an existing snapshot of an existing Lustre filesystem. Returns a `Command` to track progress. - /// For the `Command` to succeed, the filesystem must be available. - async fn mount_snapshot( - context: &Context, - fsname: String, - name: String, - ) -> juniper::FieldResult { - let name = name.trim(); - validate_snapshot_name(name)?; - - let active_mgs_host_fqdn = active_mgs_host_fqdn(&fsname, &context.pg_pool) - .await? - .ok_or_else(|| { - FieldError::new("Filesystem not found or MGS is not mounted", Value::null()) - })?; - - let kwargs: HashMap = vec![("message".into(), "Mounting snapshot".into())] - .into_iter() - .collect(); - - let jobs = serde_json::json!([{ - "class_name": "MountSnapshotJob", - "args": { - "fsname": fsname, - "name": name, - "fqdn": active_mgs_host_fqdn, - } - }]); - let command_id: i32 = iml_job_scheduler_rpc::call( - &context.rabbit_pool.get().await?, - "run_jobs", - vec![jobs], - Some(kwargs), - ) - .map_err(ImlApiError::ImlJobSchedulerRpcError) - .await?; - - get_command(&context.pg_pool, command_id) - .await - .map_err(|e| e.into()) - } - #[graphql(arguments( - fsname(description = "Filesystem snapshot was taken from"), - name(description = "Name of the snapshot"), - ))] - /// Unmounts an existing snapshot of an existing Lustre filesystem. Returns a `Command` to track progress. - /// For the `Command` to succeed, the filesystem must be available. - async fn unmount_snapshot( - context: &Context, - fsname: String, - name: String, - ) -> juniper::FieldResult { - let _ = fs_id_by_name(&context.pg_pool, &fsname).await?; - let name = name.trim(); - validate_snapshot_name(name)?; - - let active_mgs_host_fqdn = active_mgs_host_fqdn(&fsname, &context.pg_pool) - .await? - .ok_or_else(|| { - FieldError::new("Filesystem not found or MGS is not mounted", Value::null()) - })?; - - let kwargs: HashMap = - vec![("message".into(), "Unmounting snapshot".into())] - .into_iter() - .collect(); - - let jobs = serde_json::json!([{ - "class_name": "UnmountSnapshotJob", - "args": { - "fsname": fsname, - "name": name, - "fqdn": active_mgs_host_fqdn, - } - }]); - let command_id: i32 = iml_job_scheduler_rpc::call( - &context.rabbit_pool.get().await?, - "run_jobs", - vec![jobs], - Some(kwargs), - ) - .map_err(ImlApiError::ImlJobSchedulerRpcError) - .await?; - - get_command(&context.pg_pool, command_id) - .await - .map_err(|e| e.into()) - } - #[graphql(arguments( - fsname(description = "The filesystem to create snapshots with"), - interval(description = "How often a snapshot should be taken"), - use_barrier( - description = "Set write barrier before creating snapshot. The default value is `false`" - ), - ))] - /// Creates a new snapshot interval. - /// A recurring snapshot will be taken once the given `interval` expires for the given `fsname`. - /// In order for the snapshot to be successful, the filesystem must be available. - async fn create_snapshot_interval( - context: &Context, - fsname: String, - interval: GraphQLDuration, - use_barrier: Option, - ) -> juniper::FieldResult { - let _ = fs_id_by_name(&context.pg_pool, &fsname).await?; - let maybe_id = sqlx::query!( - r#" - INSERT INTO snapshot_interval ( - filesystem_name, - use_barrier, - interval - ) - VALUES ($1, $2, $3) - ON CONFLICT (filesystem_name, interval) - DO NOTHING - RETURNING id - "#, - fsname, - use_barrier.unwrap_or_default(), - PgInterval::try_from(interval.0)?, - ) - .fetch_optional(&context.pg_pool) - .await? - .map(|x| x.id); - - if let Some(id) = maybe_id { - configure_snapshot_timer(id, fsname, interval.0, use_barrier.unwrap_or_default()) - .await?; - } - - Ok(true) - } - /// Removes an existing snapshot interval. - /// This will also cancel any outstanding intervals scheduled by this rule. - #[graphql(arguments(id(description = "The snapshot interval id"),))] - async fn remove_snapshot_interval(context: &Context, id: i32) -> juniper::FieldResult { - sqlx::query!("DELETE FROM snapshot_interval WHERE id=$1", id) - .execute(&context.pg_pool) - .await?; - - remove_snapshot_timer(id).await?; - - Ok(true) - } - #[graphql(arguments( - fsname(description = "Filesystem name"), - reserve_value( - description = "Delete the oldest snapshot when available space falls below this value" - ), - reserve_unit(description = "The unit of measurement associated with the reserve_value"), - keep_num( - description = "The minimum number of snapshots to keep. This is to avoid deleting all snapshots while pursuiting the reserve goal" - ) - ))] - /// Creates a new snapshot retention policy for the given `fsname`. - /// Snapshots will automatically be deleted (starting with the oldest) - /// when free space falls below the defined reserve value and its associated unit. - async fn create_snapshot_retention( - context: &Context, - fsname: String, - reserve_value: i32, - reserve_unit: ReserveUnit, - keep_num: Option, - ) -> juniper::FieldResult { - let _ = fs_id_by_name(&context.pg_pool, &fsname).await?; - sqlx::query!( - r#" - INSERT INTO snapshot_retention ( - filesystem_name, - reserve_value, - reserve_unit, - keep_num - ) - VALUES ($1, $2, $3, $4) - ON CONFLICT (filesystem_name) - DO UPDATE SET - reserve_value = EXCLUDED.reserve_value, - reserve_unit = EXCLUDED.reserve_unit, - keep_num = EXCLUDED.keep_num - "#, - fsname, - reserve_value, - reserve_unit as ReserveUnit, - keep_num.unwrap_or(0) - ) - .execute(&context.pg_pool) - .await?; - - Ok(true) - } - /// Remove an existing snapshot retention policy. - #[graphql(arguments(id(description = "The snapshot retention policy id")))] - async fn remove_snapshot_retention(context: &Context, id: i32) -> juniper::FieldResult { - sqlx::query!("DELETE FROM snapshot_retention WHERE id=$1", id) - .execute(&context.pg_pool) - .await?; - - Ok(true) + fn snapshot(&self) -> snapshot::SnapshotMutation { + snapshot::SnapshotMutation } /// Create a server profile. @@ -1343,32 +915,6 @@ async fn get_banned_resources(pool: &PgPool) -> Result, ImlA Ok(xs) } -fn validate_snapshot_name(x: &str) -> Result<(), FieldError> { - if x.contains(' ') { - Err(FieldError::new( - "Snapshot name cannot contain spaces", - Value::null(), - )) - } else if x.contains('*') { - Err(FieldError::new( - "Snapshot name cannot contain * character", - Value::null(), - )) - } else if x.contains('/') { - Err(FieldError::new( - "Snapshot name cannot contain / character", - Value::null(), - )) - } else if x.contains('&') { - Err(FieldError::new( - "Snapshot name cannot contain & character", - Value::null(), - )) - } else { - Ok(()) - } -} - async fn fs_id_by_name(pool: &PgPool, name: &str) -> Result { sqlx::query!( "SELECT id FROM chroma_core_managedfilesystem WHERE name=$1 and not_deleted = 't'", diff --git a/iml-api/src/graphql/snapshot.rs b/iml-api/src/graphql/snapshot.rs new file mode 100644 index 0000000000..c7a66ff83e --- /dev/null +++ b/iml-api/src/graphql/snapshot.rs @@ -0,0 +1,397 @@ +// Copyright (c) 2020 DDN. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. +use crate::{ + command::get_command, + error::ImlApiError, + graphql::{fs_id_by_name, Context}, +}; +use chrono::{DateTime, Utc}; +use futures::{TryFutureExt, TryStreamExt}; +use iml_postgres::{active_mgs_host_fqdn, sqlx, sqlx::postgres::types::PgInterval}; +use iml_wire_types::{ + graphql_duration::GraphQLDuration, + snapshot::{Snapshot, SnapshotPolicy}, + Command, SortDir, +}; +use juniper::{FieldError, Value}; +use std::{collections::HashMap, convert::TryFrom as _, ops::Deref}; + +pub(crate) struct SnapshotQuery; + +#[juniper::graphql_object(Context = Context)] +impl SnapshotQuery { + #[graphql(arguments( + limit(description = "optional paging limit, defaults to all rows"), + offset(description = "Offset into items, defaults to 0"), + dir(description = "Sort direction, defaults to ASC"), + fsname(description = "Filesystem the snapshot was taken from"), + name(description = "Name of the snapshot"), + ))] + /// Fetch the list of snapshots + async fn snapshots( + context: &Context, + limit: Option, + offset: Option, + dir: Option, + fsname: String, + name: Option, + ) -> juniper::FieldResult> { + let dir = dir.unwrap_or_default(); + + let _ = fs_id_by_name(&context.pg_pool, &fsname).await?; + + let snapshots = sqlx::query_as!( + Snapshot, + r#" + SELECT filesystem_name, snapshot_name, create_time, modify_time, snapshot_fsname, mounted, comment FROM snapshot s + WHERE filesystem_name = $4 AND ($5::text IS NULL OR snapshot_name = $5) + ORDER BY + CASE WHEN $3 = 'ASC' THEN s.create_time END ASC, + CASE WHEN $3 = 'DESC' THEN s.create_time END DESC + OFFSET $1 LIMIT $2"#, + offset.unwrap_or_default() as i64, + limit.map(|x| x as i64), + dir.deref(), + fsname, + name, + ) + .fetch_all(&context.pg_pool) + .await?; + + Ok(snapshots) + } + + /// List automatic snapshot policies. + async fn snapshot_policies(context: &Context) -> juniper::FieldResult> { + let xs = sqlx::query!(r#"SELECT * FROM snapshot_policy"#) + .fetch(&context.pg_pool) + .map_ok(|x| SnapshotPolicy { + id: x.id, + filesystem: x.filesystem, + interval: x.interval.into(), + barrier: x.barrier, + start: x.start, + keep: x.keep, + daily: x.daily, + weekly: x.weekly, + monthly: x.monthly, + last_run: x.last_run, + }) + .try_collect() + .await?; + + Ok(xs) + } +} + +pub(crate) struct SnapshotMutation; + +#[juniper::graphql_object(Context = Context)] +impl SnapshotMutation { + #[graphql(arguments( + fsname(description = "Filesystem to snapshot"), + name(description = "Name of the snapshot"), + comment(description = "A description for the purpose of the snapshot"), + use_barrier( + description = "Set write barrier before creating snapshot. The default value is `false`" + ) + ))] + /// Creates a snapshot of an existing Lustre filesystem. Returns a `Command` to track progress. + /// For the `Command` to succeed, the filesystem being snapshoted must be available. + async fn create_snapshot( + context: &Context, + fsname: String, + name: String, + comment: Option, + use_barrier: Option, + ) -> juniper::FieldResult { + let _ = fs_id_by_name(&context.pg_pool, &fsname).await?; + let name = name.trim(); + validate_snapshot_name(name)?; + + let active_mgs_host_fqdn = active_mgs_host_fqdn(&fsname, &context.pg_pool) + .await? + .ok_or_else(|| { + FieldError::new("Filesystem not found or MGS is not mounted", Value::null()) + })?; + + let kwargs: HashMap = vec![("message".into(), "Creating snapshot".into())] + .into_iter() + .collect(); + + let jobs = serde_json::json!([{ + "class_name": "CreateSnapshotJob", + "args": { + "fsname": fsname, + "name": name, + "comment": comment, + "fqdn": active_mgs_host_fqdn, + "use_barrier": use_barrier.unwrap_or_default(), + } + }]); + let command_id: i32 = iml_job_scheduler_rpc::call( + &context.rabbit_pool.get().await?, + "run_jobs", + vec![jobs], + Some(kwargs), + ) + .map_err(ImlApiError::ImlJobSchedulerRpcError) + .await?; + + let command = get_command(&context.pg_pool, command_id).await?; + + Ok(command) + } + #[graphql(arguments( + fsname(description = "Filesystem snapshot was taken from"), + name(description = "Name of the snapshot"), + force(description = "Destroy the snapshot by force"), + ))] + /// Destroys an existing snapshot of an existing Lustre filesystem. Returns a `Command` to track progress. + /// For the `Command` to succeed, the filesystem must be available. + async fn destroy_snapshot( + context: &Context, + fsname: String, + name: String, + force: bool, + ) -> juniper::FieldResult { + let _ = fs_id_by_name(&context.pg_pool, &fsname).await?; + let name = name.trim(); + validate_snapshot_name(name)?; + + let active_mgs_host_fqdn = active_mgs_host_fqdn(&fsname, &context.pg_pool) + .await? + .ok_or_else(|| { + FieldError::new("Filesystem not found or MGS is not mounted", Value::null()) + })?; + + let kwargs: HashMap = + vec![("message".into(), "Destroying snapshot".into())] + .into_iter() + .collect(); + + let jobs = serde_json::json!([{ + "class_name": "DestroySnapshotJob", + "args": { + "fsname": fsname, + "name": name, + "force": force, + "fqdn": active_mgs_host_fqdn, + } + }]); + let command_id: i32 = iml_job_scheduler_rpc::call( + &context.rabbit_pool.get().await?, + "run_jobs", + vec![jobs], + Some(kwargs), + ) + .map_err(ImlApiError::ImlJobSchedulerRpcError) + .await?; + + let command = get_command(&context.pg_pool, command_id).await?; + + Ok(command) + } + #[graphql(arguments( + fsname(description = "Filesystem snapshot was taken from"), + name(description = "Name of the snapshot"), + ))] + /// Mounts an existing snapshot of an existing Lustre filesystem. Returns a `Command` to track progress. + /// For the `Command` to succeed, the filesystem must be available. + async fn mount_snapshot( + context: &Context, + fsname: String, + name: String, + ) -> juniper::FieldResult { + let name = name.trim(); + validate_snapshot_name(name)?; + + let active_mgs_host_fqdn = active_mgs_host_fqdn(&fsname, &context.pg_pool) + .await? + .ok_or_else(|| { + FieldError::new("Filesystem not found or MGS is not mounted", Value::null()) + })?; + + let kwargs: HashMap = vec![("message".into(), "Mounting snapshot".into())] + .into_iter() + .collect(); + + let jobs = serde_json::json!([{ + "class_name": "MountSnapshotJob", + "args": { + "fsname": fsname, + "name": name, + "fqdn": active_mgs_host_fqdn, + } + }]); + let command_id: i32 = iml_job_scheduler_rpc::call( + &context.rabbit_pool.get().await?, + "run_jobs", + vec![jobs], + Some(kwargs), + ) + .map_err(ImlApiError::ImlJobSchedulerRpcError) + .await?; + + get_command(&context.pg_pool, command_id) + .await + .map_err(|e| e.into()) + } + #[graphql(arguments( + fsname(description = "Filesystem snapshot was taken from"), + name(description = "Name of the snapshot"), + ))] + /// Unmounts an existing snapshot of an existing Lustre filesystem. Returns a `Command` to track progress. + /// For the `Command` to succeed, the filesystem must be available. + async fn unmount_snapshot( + context: &Context, + fsname: String, + name: String, + ) -> juniper::FieldResult { + let _ = fs_id_by_name(&context.pg_pool, &fsname).await?; + let name = name.trim(); + validate_snapshot_name(name)?; + + let fqdn = active_mgs_host_fqdn(&fsname, &context.pg_pool) + .await? + .ok_or_else(|| { + FieldError::new("Filesystem not found or MGS is not mounted", Value::null()) + })?; + + let kwargs: HashMap = + vec![("message".into(), "Unmounting snapshot".into())] + .into_iter() + .collect(); + + let jobs = serde_json::json!([{ + "class_name": "UnmountSnapshotJob", + "args": { + "fsname": fsname, + "name": name, + "fqdn": fqdn, + } + }]); + let command_id: i32 = iml_job_scheduler_rpc::call( + &context.rabbit_pool.get().await?, + "run_jobs", + vec![jobs], + Some(kwargs), + ) + .map_err(ImlApiError::ImlJobSchedulerRpcError) + .await?; + + get_command(&context.pg_pool, command_id) + .await + .map_err(|e| e.into()) + } + + #[graphql(arguments( + filesystem(description = "The filesystem to create snapshots with"), + interval(description = "How often a snapshot should be taken"), + use_barrier( + description = "Set write barrier before creating snapshot. The default value is `false`" + ), + keep(description = "Number of the most recent snapshots to keep"), + daily(description = "The number of days when keep the most recent snapshot of each day"), + weekly( + description = "The number of weeks when keep the most recent snapshot of each week" + ), + monthly( + description = "The number of months when keep the most recent snapshot of each month" + ), + ))] + /// Create an automatic snapshot policy. + async fn create_snapshot_policy( + context: &Context, + filesystem: String, + interval: GraphQLDuration, + start: Option>, + barrier: Option, + keep: i32, + daily: Option, + weekly: Option, + monthly: Option, + ) -> juniper::FieldResult { + sqlx::query!( + r#" + INSERT INTO snapshot_policy ( + filesystem, + interval, + start, + barrier, + keep, + daily, + weekly, + monthly + ) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + ON CONFLICT (filesystem) + DO UPDATE SET + interval = EXCLUDED.interval, + start = EXCLUDED.start, + barrier = EXCLUDED.barrier, + keep = EXCLUDED.keep, + daily = EXCLUDED.daily, + weekly = EXCLUDED.weekly, + monthly = EXCLUDED.monthly + "#, + filesystem, + PgInterval::try_from(interval.0)?, + start, + barrier.unwrap_or_default(), + keep, + daily.unwrap_or_default(), + weekly.unwrap_or_default(), + monthly.unwrap_or_default() + ) + .execute(&context.pg_pool) + .await?; + + Ok(true) + } + + #[graphql(arguments(filesystem( + description = "The filesystem to remove snapshot policies for" + ),))] + /// Removes the automatic snapshot policy. + async fn remove_snapshot_policy( + context: &Context, + filesystem: String, + ) -> juniper::FieldResult { + sqlx::query!( + "DELETE FROM snapshot_policy WHERE filesystem = $1", + filesystem + ) + .execute(&context.pg_pool) + .await?; + + Ok(true) + } +} + +fn validate_snapshot_name(x: &str) -> Result<(), FieldError> { + if x.contains(' ') { + Err(FieldError::new( + "Snapshot name cannot contain spaces", + Value::null(), + )) + } else if x.contains('*') { + Err(FieldError::new( + "Snapshot name cannot contain * character", + Value::null(), + )) + } else if x.contains('/') { + Err(FieldError::new( + "Snapshot name cannot contain / character", + Value::null(), + )) + } else if x.contains('&') { + Err(FieldError::new( + "Snapshot name cannot contain & character", + Value::null(), + )) + } else { + Ok(()) + } +} diff --git a/iml-api/src/main.rs b/iml-api/src/main.rs index 6e4b078361..2a9dd03bdf 100644 --- a/iml-api/src/main.rs +++ b/iml-api/src/main.rs @@ -6,7 +6,6 @@ mod action; mod command; mod error; mod graphql; -mod timer; use iml_manager_env::get_pool_limit; use iml_postgres::get_db_pool; diff --git a/iml-api/src/timer.rs b/iml-api/src/timer.rs deleted file mode 100644 index 615e6ad48a..0000000000 --- a/iml-api/src/timer.rs +++ /dev/null @@ -1,105 +0,0 @@ -use crate::error::ImlApiError; -use iml_manager_client::{delete, get_client, put}; -use iml_manager_env::{get_timer_addr, running_in_docker}; -use std::time::Duration; - -#[derive(serde::Serialize, Debug)] -pub struct TimerConfig { - config_id: String, - file_prefix: String, - timer_config: String, - service_config: String, -} - -pub async fn configure_snapshot_timer( - config_id: i32, - fsname: String, - interval: Duration, - use_barrier: bool, -) -> Result<(), ImlApiError> { - let iml_cmd = format!( - r#"/bin/bash -c "/usr/bin/date +\"%%Y-%%m-%%dT%%TZ\" | xargs -I %% /usr/bin/iml snapshot create {} -c 'automatically created by IML' {} {}-{}-%%""#, - if use_barrier { "-b" } else { "" }, - fsname, - config_id, - fsname - ); - - let timer_config = format!( - r#"# Automatically created by IML - -[Unit] -Description=Create snapshot on filesystem {} - -[Timer] -OnActiveSec={} -OnUnitActiveSec={} -AccuracySec=1us -Persistent=true - -[Install] -WantedBy=timers.target -"#, - fsname, - interval.as_secs(), - interval.as_secs() - ); - - let service_config = format!( - r#"# Automatically created by IML - -[Unit] -Description=Create snapshot on filesystem {} -{} - -[Service] -Type=oneshot -EnvironmentFile=/var/lib/chroma/iml-settings.conf -ExecStart={} -"#, - fsname, - if !running_in_docker() { - "After=iml-manager.target" - } else { - "" - }, - iml_cmd, - ); - - let config = TimerConfig { - config_id: config_id.to_string(), - file_prefix: "iml-snapshot".to_string(), - timer_config, - service_config, - }; - - let client = get_client()?; - - let url = format!("http://{}/configure/", get_timer_addr()); - tracing::debug!( - "Sending snapshot interval config to timer service: {:?} {:?}", - url, - config - ); - put(client, url.as_str(), config).await?; - - Ok(()) -} - -pub async fn remove_snapshot_timer(config_id: i32) -> Result<(), ImlApiError> { - let client = get_client()?; - - delete( - client, - format!( - "http://{}/unconfigure/iml-snapshot/{}", - get_timer_addr(), - config_id - ) - .as_str(), - serde_json::json!("{}"), - ) - .await?; - - Ok(()) -} diff --git a/iml-graphql-queries/src/snapshot.rs b/iml-graphql-queries/src/snapshot.rs index 54795d4b87..c644bf2501 100644 --- a/iml-graphql-queries/src/snapshot.rs +++ b/iml-graphql-queries/src/snapshot.rs @@ -2,25 +2,32 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. +#[derive(Debug, Clone, serde::Deserialize)] +pub struct Resp { + pub snapshot: T, +} + pub mod create { use crate::Query; use iml_wire_types::Command; pub static QUERY: &str = r#" - mutation CreateSnapshot($fsname: String!, $name: String!, $comment: String, $use_barrier: Boolean) { - createSnapshot(fsname: $fsname, name: $name, comment: $comment, useBarrier: $use_barrier) { - cancelled - complete - created_at: createdAt - errored - id - jobs - logs - message - resource_uri: resourceUri - } + mutation CreateSnapshot($fsname: String!, $name: String!, $comment: String, $use_barrier: Boolean) { + snapshot { + createSnapshot(fsname: $fsname, name: $name, comment: $comment, useBarrier: $use_barrier) { + cancelled + complete + created_at: createdAt + errored + id + jobs + logs + message + resource_uri: resourceUri + } } - "#; + } + "#; #[derive(Debug, serde::Serialize)] pub struct Vars { @@ -48,10 +55,12 @@ pub mod create { } #[derive(Debug, Clone, serde::Deserialize)] - pub struct Resp { + pub struct CreateSnapshot { #[serde(rename(deserialize = "createSnapshot"))] - pub create_snapshot: Command, + pub create: Command, } + + pub type Resp = super::Resp; } pub mod destroy { @@ -60,16 +69,18 @@ pub mod destroy { pub static QUERY: &str = r#" mutation DestroySnapshot($fsname: String!, $name: String!, $force: Boolean!) { - destroySnapshot(fsname: $fsname, name: $name, force: $force) { - cancelled - complete - created_at: createdAt - errored - id - jobs - logs - message - resource_uri: resourceUri + snapshot { + destroySnapshot(fsname: $fsname, name: $name, force: $force) { + cancelled + complete + created_at: createdAt + errored + id + jobs + logs + message + resource_uri: resourceUri + } } } "#; @@ -93,10 +104,12 @@ pub mod destroy { } #[derive(Debug, Clone, serde::Deserialize)] - pub struct Resp { + pub struct DestroySnapshot { #[serde(rename(deserialize = "destroySnapshot"))] - pub destroy_snapshot: Command, + pub destroy: Command, } + + pub type Resp = super::Resp; } pub mod mount { @@ -105,17 +118,19 @@ pub mod mount { pub static QUERY: &str = r#" mutation MountSnapshot($fsname: String!, $name: String!) { - mountSnapshot(fsname: $fsname, name: $name) { - cancelled - complete - created_at: createdAt - errored - id - jobs - logs - message - resource_uri: resourceUri - } + snapshot { + mountSnapshot(fsname: $fsname, name: $name) { + cancelled + complete + created_at: createdAt + errored + id + jobs + logs + message + resource_uri: resourceUri + } + } } "#; @@ -136,10 +151,12 @@ pub mod mount { } #[derive(Debug, Clone, serde::Deserialize)] - pub struct Resp { + pub struct MountSnapshot { #[serde(rename(deserialize = "mountSnapshot"))] - pub mount_snapshot: Command, + pub mount: Command, } + + pub type Resp = super::Resp; } pub mod unmount { @@ -148,17 +165,19 @@ pub mod unmount { pub static QUERY: &str = r#" mutation UnmountSnapshot($fsname: String!, $name: String!) { - unmountSnapshot(fsname: $fsname, name: $name) { - cancelled - complete - created_at: createdAt - errored - id - jobs - logs - message - resource_uri: resourceUri - } + snapshot { + unmountSnapshot(fsname: $fsname, name: $name) { + cancelled + complete + created_at: createdAt + errored + id + jobs + logs + message + resource_uri: resourceUri + } + } } "#; @@ -179,10 +198,12 @@ pub mod unmount { } #[derive(Debug, Clone, serde::Deserialize)] - pub struct Resp { + pub struct UnmountSnapshot { #[serde(rename(deserialize = "unmountSnapshot"))] - pub unmount_snapshot: Command, + pub unmount: Command, } + + pub type Resp = super::Resp; } pub mod list { @@ -191,15 +212,17 @@ pub mod list { pub static QUERY: &str = r#" query Snapshots($fsname: String!, $name: String, $dir: SortDir, $offset: Int, $limit: Int) { - snapshots(fsname: $fsname, name: $name, dir: $dir, offset: $offset, limit: $limit) { - comment - create_time: createTime - filesystem_name: filesystemName - modify_time: modifyTime - mounted - snapshot_fsname: snapshotFsname - snapshot_name: snapshotName - } + snapshot { + snapshots(fsname: $fsname, name: $name, dir: $dir, offset: $offset, limit: $limit) { + comment + create_time: createTime + filesystem_name: filesystemName + modify_time: modifyTime + mounted + snapshot_fsname: snapshotFsname + snapshot_name: snapshotName + } + } } "#; @@ -232,211 +255,126 @@ pub mod list { } #[derive(Debug, Clone, serde::Deserialize)] - pub struct Resp { + pub struct ListSnapshots { #[serde(rename(deserialize = "snapshots"))] pub snapshots: Vec, } -} - -pub mod create_interval { - use crate::Query; - - pub static QUERY: &str = r#" - mutation CreateSnapshotInterval($fsname: String!, $interval: Duration!, $use_barrier: Boolean) { - createSnapshotInterval(fsname: $fsname, interval: $interval, useBarrier: $use_barrier) - } - "#; - - #[derive(Debug, serde::Serialize)] - pub struct Vars { - fsname: String, - interval: String, - use_barrier: Option, - } - - pub fn build( - fsname: impl ToString, - interval: String, - use_barrier: Option, - ) -> Query { - Query { - query: QUERY.to_string(), - variables: Some(Vars { - fsname: fsname.to_string(), - interval, - use_barrier, - }), - } - } - #[derive(Debug, Clone, serde::Deserialize)] - pub struct Resp { - #[serde(rename(deserialize = "createSnapshotInterval"))] - pub create_snapshot_interval: bool, - } + pub type Resp = super::Resp; } -pub mod remove_interval { - use crate::Query; +pub mod policy { + pub mod list { + use crate::Query; + use iml_wire_types::snapshot::SnapshotPolicy; + + pub static QUERY: &str = r#" + query SnapshotPolicies { + snapshot { + snapshotPolicies { + id + filesystem + interval + start + barrier + keep + daily + weekly + monthly + last_run: lastRun + } + } + } + "#; - pub static QUERY: &str = r#" - mutation RemoveSnapshotInterval($id: Int!) { - removeSnapshotInterval(id: $id) + pub fn build() -> Query<()> { + Query { + query: QUERY.to_string(), + variables: None, + } } - "#; - #[derive(Debug, serde::Serialize)] - pub struct Vars { - id: i32, - } - - pub fn build(id: i32) -> Query { - Query { - query: QUERY.to_string(), - variables: Some(Vars { id }), + #[derive(Debug, Clone, serde::Deserialize)] + pub struct ListPolicies { + #[serde(rename(deserialize = "snapshotPolicies"))] + pub policies: Vec, } - } - #[derive(Debug, Clone, serde::Deserialize)] - pub struct Resp { - #[serde(rename(deserialize = "removeSnapshotInterval"))] - pub remove_snapshot_interval: bool, + pub type Resp = super::super::Resp; } -} -pub mod list_intervals { - use crate::Query; - use iml_wire_types::snapshot::SnapshotInterval; + pub mod create { + use crate::Query; - pub static QUERY: &str = r#" - query SnapshotIntervals { - snapshotIntervals { - id - filesystem_name: filesystemName - use_barrier: useBarrier - interval - last_run: lastRun - } - } - "#; + pub static QUERY: &str = r#" + mutation CreateSnapshotPolicy($filesystem: String!, $interval: Duration!, $barrier: Boolean, $start: DateTimeUtc, + $keep: Int!, $daily: Int, $weekly: Int, $monthly: Int) { + snapshot { + createSnapshotPolicy(filesystem: $filesystem, interval: $interval, barrier: $barrier, start: $start, + keep: $keep, daily: $daily, weekly: $weekly, monthly: $monthly) + } + } + "#; - pub fn build() -> Query<()> { - Query { - query: QUERY.to_string(), - variables: None, + #[derive(Debug, serde::Serialize, Default, Clone)] + pub struct Vars { + pub filesystem: String, + pub interval: String, + pub start: Option, + pub barrier: Option, + pub keep: i32, + pub daily: Option, + pub weekly: Option, + pub monthly: Option, } - } - #[derive(Debug, Clone, serde::Deserialize)] - pub struct Resp { - #[serde(rename(deserialize = "snapshotIntervals"))] - pub snapshot_intervals: Vec, - } -} - -/// Graphql query to create a new retention. Note that -/// Snapshots will automatically be deleted (starting with the oldest) -/// when free space falls below the defined reserve value and its associated unit. -pub mod create_retention { - use crate::Query; - use iml_wire_types::snapshot::ReserveUnit; - - pub static QUERY: &str = r#" - mutation CreateSnapshotRetention($fsname: String!, $reserve_value: Int!, $reserve_unit: ReserveUnit!, $keep_num: Int) { - createSnapshotRetention(fsname: $fsname, reserveValue: $reserve_value, reserveUnit: $reserve_unit, keepNum: $keep_num) + pub fn build(vars: Vars) -> Query { + Query { + query: QUERY.to_string(), + variables: Some(vars), + } } - "#; - #[derive(Debug, serde::Serialize)] - pub struct Vars { - fsname: String, - reserve_value: u32, - reserve_unit: ReserveUnit, - keep_num: Option, - } - - pub fn build( - fsname: impl ToString, - reserve_value: u32, - reserve_unit: ReserveUnit, - keep_num: Option, - ) -> Query { - Query { - query: QUERY.to_string(), - variables: Some(Vars { - fsname: fsname.to_string(), - reserve_value, - reserve_unit, - keep_num, - }), + #[derive(Debug, Clone, serde::Deserialize)] + pub struct CreateSnapshotPolicy { + #[serde(rename(deserialize = "createSnapshotPolicy"))] + pub created: bool, } - } - #[derive(Debug, Clone, serde::Deserialize)] - pub struct Resp { - #[serde(rename(deserialize = "createSnapshotRetention"))] - pub create_snapshot_retention: bool, + pub type Resp = super::super::Resp; } -} - -pub mod remove_retention { - use crate::Query; - pub static QUERY: &str = r#" - mutation RemoveSnapshotRetention($id: Int!) { - removeSnapshotRetention(id: $id) - } - "#; + pub mod remove { + use crate::Query; - #[derive(Debug, serde::Serialize)] - pub struct Vars { - id: i32, - } + pub static QUERY: &str = r#" + mutation RemoveSnapshotPolicy($filesystem: String!) { + snapshot { + removeSnapshotPolicy(filesystem: $filesystem) + } + } + "#; - pub fn build(id: i32) -> Query { - Query { - query: QUERY.to_string(), - variables: Some(Vars { id }), + #[derive(Debug, serde::Serialize, Default)] + pub struct Vars { + filesystem: String, } - } - - #[derive(Debug, Clone, serde::Deserialize)] - pub struct Resp { - #[serde(rename(deserialize = "removeSnapshotRetention"))] - pub remove_snapshot_retention: bool, - } -} - -/// Graphql query to list retentions. For each retention, snapshots will automatically -/// be deleted (starting with the oldest) when free space falls below the defined reserve -/// value and its associated unit. -pub mod list_retentions { - use crate::Query; - use iml_wire_types::snapshot::SnapshotRetention; - pub static QUERY: &str = r#" - query SnapshotRetentionPolicies { - snapshotRetentionPolicies { - id - filesystem_name: filesystemName - reserve_value: reserveValue - reserve_unit: reserveUnit - keep_num: keepNum - last_run: lastRun - } + pub fn build(filesystem: impl ToString) -> Query { + Query { + query: QUERY.to_string(), + variables: Some(Vars { + filesystem: filesystem.to_string(), + }), + } } - "#; - pub fn build() -> Query<()> { - Query { - query: QUERY.to_string(), - variables: None, + #[derive(Debug, Clone, serde::Deserialize)] + pub struct RemoveSnapshotPolicy { + #[serde(rename(deserialize = "removeSnapshotPolicy"))] + pub removed: bool, } - } - #[derive(Debug, Clone, serde::Deserialize)] - pub struct Resp { - #[serde(rename(deserialize = "snapshotRetentionPolicies"))] - pub snapshot_retention_policies: Vec, + pub type Resp = super::super::Resp; } } diff --git a/iml-gui/crate/src/lib.rs b/iml-gui/crate/src/lib.rs index 611826b78d..a9c1530477 100644 --- a/iml-gui/crate/src/lib.rs +++ b/iml-gui/crate/src/lib.rs @@ -682,11 +682,8 @@ fn handle_record_change( ArcRecord::Snapshot(x) => { model.records.snapshot.insert(x.id, Arc::clone(&x)); } - ArcRecord::SnapshotInterval(x) => { - model.records.snapshot_interval.insert(x.id, Arc::clone(&x)); - } - ArcRecord::SnapshotRetention(x) => { - model.records.snapshot_retention.insert(x.id, Arc::clone(&x)); + ArcRecord::SnapshotPolicy(x) => { + model.records.snapshot_policy.insert(x.id, Arc::clone(&x)); } ArcRecord::StratagemConfig(x) => { model.records.stratagem_config.insert(x.id, Arc::clone(&x)); diff --git a/iml-gui/crate/src/page/snapshot/add_interval.rs b/iml-gui/crate/src/page/snapshot/add_interval.rs deleted file mode 100644 index a5afcabf1c..0000000000 --- a/iml-gui/crate/src/page/snapshot/add_interval.rs +++ /dev/null @@ -1,316 +0,0 @@ -// Copyright (c) 2020 DDN. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -use crate::{ - components::{font_awesome, form, modal, Placement}, - extensions::{MergeAttrs as _, NodeExt as _}, - generated::css_classes::C, - key_codes, - page::{ - snapshot::{get_fs_names, help_indicator}, - RecordChange, - }, - GMsg, RequestExt, -}; -use iml_graphql_queries::{snapshot, Response}; -use iml_wire_types::{warp_drive::ArcCache, warp_drive::ArcRecord, warp_drive::RecordId, Filesystem}; -use seed::{prelude::*, *}; -use std::sync::Arc; - -#[derive(Debug)] -pub struct Model { - submitting: bool, - filesystems: Vec>, - fs_name: String, - barrier: bool, - interval_value: String, - interval_unit: String, - pub modal: modal::Model, -} - -impl Default for Model { - fn default() -> Self { - Model { - submitting: false, - filesystems: vec![], - fs_name: "".into(), - barrier: false, - interval_value: "".into(), - interval_unit: "d".into(), - modal: modal::Model::default(), - } - } -} - -impl RecordChange for Model { - fn update_record(&mut self, _: ArcRecord, cache: &ArcCache, orders: &mut impl Orders) { - orders.send_msg(Msg::SetFilesystems(cache.filesystem.values().cloned().collect())); - } - fn remove_record(&mut self, _: RecordId, cache: &ArcCache, orders: &mut impl Orders) { - orders.send_msg(Msg::SetFilesystems(cache.filesystem.values().cloned().collect())); - - let present = cache.filesystem.values().any(|x| x.name == self.fs_name); - - if !present { - let x = get_fs_names(cache).into_iter().next().unwrap_or_default(); - orders.send_msg(Msg::FsNameChanged(x)); - } - } - fn set_records(&mut self, cache: &ArcCache, orders: &mut impl Orders) { - orders.send_msg(Msg::SetFilesystems(cache.filesystem.values().cloned().collect())); - - let x = get_fs_names(cache).into_iter().next().unwrap_or_default(); - orders.send_msg(Msg::FsNameChanged(x)); - } -} - -#[derive(Clone, Debug)] -pub enum Msg { - Modal(modal::Msg), - Open, - Close, - SetFilesystems(Vec>), - BarrierChanged(String), - FsNameChanged(String), - IntervalValueChanged(String), - IntervalUnitChanged(String), - Submit, - SnapshotCreateIntervalResp(fetch::ResponseDataResult>), - Noop, -} - -pub fn update(msg: Msg, model: &mut Model, orders: &mut impl Orders) { - match msg { - Msg::Modal(msg) => { - modal::update(msg, &mut model.modal, &mut orders.proxy(Msg::Modal)); - } - Msg::Open => { - model.modal.open = true; - } - Msg::Close => { - model.modal.open = false; - } - Msg::SetFilesystems(x) => { - model.filesystems = x; - } - Msg::FsNameChanged(x) => { - model.fs_name = x; - } - Msg::BarrierChanged(_) => { - model.barrier = !model.barrier; - } - Msg::IntervalValueChanged(x) => { - model.interval_value = x; - } - Msg::IntervalUnitChanged(x) => { - model.interval_unit = x; - } - Msg::Submit => { - model.submitting = true; - - let interval = format!("{}{}", model.interval_value.trim(), model.interval_unit); - - let query = snapshot::create_interval::build(&model.fs_name, interval, Some(model.barrier)); - - let req = fetch::Request::graphql_query(&query); - - orders.perform_cmd(req.fetch_json_data(|x| Msg::SnapshotCreateIntervalResp(x))); - } - Msg::SnapshotCreateIntervalResp(x) => { - model.submitting = false; - orders.send_msg(Msg::Close); - - match x { - Ok(Response::Data(_)) => { - *model = Model { - fs_name: model.fs_name.to_string(), - ..Model::default() - }; - } - Ok(Response::Errors(e)) => { - error!("An error has occurred during Snapshot Interval creation: ", e); - } - Err(e) => { - error!("An error has occurred during Snapshot Interval creation: ", e); - } - } - } - Msg::Noop => {} - }; -} - -pub fn view(model: &Model) -> Node { - let input_cls = class![ - C.appearance_none, - C.focus__outline_none, - C.focus__shadow_outline, - C.px_3, - C.py_2, - C.rounded_sm - ]; - - modal::bg_view( - model.modal.open, - Msg::Modal, - modal::content_view( - Msg::Modal, - div![ - modal::title_view(Msg::Modal, span!["Add Automated Snapshot Rule"]), - form![ - ev(Ev::Submit, move |event| { - event.prevent_default(); - Msg::Submit - }), - div![ - class![C.grid, C.grid_cols_2, C.gap_4, C.p_4, C.items_center], - label![attrs! {At::For => "interval_fs_name"}, "Filesystem Name"], - div![ - class![C.inline_block, C.relative, C.bg_gray_200], - select![ - id!["interval_fs_name"], - &input_cls, - class![ - C.block, - C.text_gray_800, - C.leading_tight, - C.bg_transparent, - C.pr_8, - C.rounded, - C.w_full - ], - model.filesystems.iter().map(|x| { - let mut opt = option![class![C.font_sans], attrs! {At::Value => x.name}, x.name]; - if x.name == model.fs_name.as_str() { - opt.add_attr(At::Selected.to_string(), "selected"); - } - - opt - }), - attrs! { - At::Required => true.as_at_value(), - }, - input_ev(Ev::Change, Msg::FsNameChanged), - ], - div![ - class![ - C.pointer_events_none, - C.absolute, - C.inset_y_0, - C.right_0, - C.flex, - C.items_center, - C.px_2, - C.text_gray_700, - ], - font_awesome(class![C.w_4, C.h_4, C.inline, C.ml_1], "chevron-down") - ], - ], - label![ - attrs! {At::For => "interval_value"}, - "Interval", - help_indicator("How often to take snapshot for selected filesystem", Placement::Right) - ], - div![ - class![C.grid, C.grid_cols_6], - input![ - &input_cls, - class![C.bg_gray_200, C.text_gray_800, C.col_span_4, C.rounded_r_none], - id!["interval_value"], - attrs! { - At::Type => "number", - At::Min => "1", - At::Placeholder => "Required", - At::Required => true.as_at_value(), - }, - input_ev(Ev::Change, Msg::IntervalValueChanged), - ], - div![ - class![C.inline_block, C.relative, C.col_span_2, C.text_white, C.bg_blue_500], - select![ - id!["interval_unit"], - &input_cls, - class![C.w_full, C.h_full C.rounded_l_none, C.bg_transparent], - option![class![C.font_sans], attrs! {At::Value => "d"}, "Days"], - option![class![C.font_sans], attrs! {At::Value => "y"}, "Years"], - option![class![C.font_sans], attrs! {At::Value => "m"}, "Minutes"], - attrs! { - At::Required => true.as_at_value(), - }, - input_ev(Ev::Change, Msg::IntervalUnitChanged), - ], - div![ - class![ - C.pointer_events_none, - C.absolute, - C.inset_y_0, - C.right_0, - C.flex, - C.items_center, - C.px_2, - C.text_white, - ], - font_awesome(class![C.w_4, C.h_4, C.inline, C.ml_1], "chevron-down") - ] - ], - ], - label![ - attrs! {At::For => "interval_barrier"}, - "Use Barrier", - help_indicator("Set write barrier before creating snapshot", Placement::Right) - ], - form::toggle() - .merge_attrs(id!["interval_barrier"]) - .merge_attrs(attrs! { - At::Checked => model.barrier.as_at_value() - }) - .with_listener(input_ev(Ev::Change, Msg::BarrierChanged)), - ], - modal::footer_view(vec![ - button![ - class![ - C.bg_blue_500, - C.duration_300, - C.flex, - C.form_invalid__bg_gray_500, - C.form_invalid__cursor_not_allowed, - C.form_invalid__pointer_events_none, - C.hover__bg_blue_400, - C.items_center - C.px_4, - C.py_2, - C.rounded_full, - C.text_white, - C.transition_colors, - ], - font_awesome(class![C.h_3, C.w_3, C.mr_1, C.inline], "plus"), - "Add Rule", - ], - button![ - class![ - C.bg_transparent, - C.duration_300, - C.hover__bg_gray_100, - C.hover__text_blue_400, - C.ml_2, - C.px_4, - C.py_2, - C.rounded_full, - C.text_blue_500, - C.transition_colors, - ], - simple_ev(Ev::Click, modal::Msg::Close), - "Cancel", - ] - .map_msg(Msg::Modal), - ]) - .merge_attrs(class![C.pt_8]) - ] - ], - ) - .with_listener(keyboard_ev(Ev::KeyDown, move |ev| match ev.key_code() { - key_codes::ESC => Msg::Modal(modal::Msg::Close), - _ => Msg::Noop, - })), - ) -} diff --git a/iml-gui/crate/src/page/snapshot/create_policy.rs b/iml-gui/crate/src/page/snapshot/create_policy.rs new file mode 100644 index 0000000000..5741d696e3 --- /dev/null +++ b/iml-gui/crate/src/page/snapshot/create_policy.rs @@ -0,0 +1,471 @@ +// Copyright (c) 2020 DDN. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +use crate::{ + components::{font_awesome, form, modal, Placement}, + extensions::{MergeAttrs as _, NodeExt as _}, + generated::css_classes::C, + key_codes, + page::{ + snapshot::{get_fs_names, help_indicator}, + RecordChange, + }, + GMsg, RequestExt, +}; +use iml_graphql_queries::{snapshot, Response}; +use iml_wire_types::{snapshot::SnapshotPolicy, warp_drive::ArcCache, warp_drive::ArcRecord, warp_drive::RecordId}; +use seed::{prelude::*, *}; +use std::{collections::HashMap, sync::Arc}; + +#[derive(Debug, Default)] +pub struct Model { + pub modal: modal::Model, + submitting: bool, + filesystems: Vec, + policies: HashMap>, + interval: u64, + interval_unit: String, + start: String, + vars: snapshot::policy::create::Vars, +} + +impl Model { + fn refresh(&mut self, cache: &ArcCache) { + self.filesystems = cache.filesystem.iter().map(|(_, x)| x.name.clone()).collect(); + self.policies = cache + .snapshot_policy + .iter() + .map(|(_, x)| (x.filesystem.clone(), x.clone())) + .collect(); + } +} + +impl RecordChange for Model { + fn update_record(&mut self, _: ArcRecord, cache: &ArcCache, _orders: &mut impl Orders) { + self.refresh(cache); + } + fn remove_record(&mut self, _: RecordId, cache: &ArcCache, orders: &mut impl Orders) { + self.refresh(cache); + + let present = cache.filesystem.values().any(|x| x.name == self.vars.filesystem); + + if !present { + let x = get_fs_names(cache).into_iter().next().unwrap_or_default(); + orders.send_msg(Msg::Input(Input::Filesystem(x))); + } + } + fn set_records(&mut self, cache: &ArcCache, orders: &mut impl Orders) { + self.refresh(cache); + + let x = get_fs_names(cache).into_iter().next().unwrap_or_default(); + orders.send_msg(Msg::Input(Input::Filesystem(x))); + } +} + +#[derive(Clone, Debug)] +pub enum Msg { + Modal(modal::Msg), + Open, + Close, + Input(Input), + Submit, + CreatePolicyResp(fetch::ResponseDataResult>), + Noop, +} + +#[derive(Clone, Debug)] +pub enum Input { + Filesystem(String), + Interval(u64), + IntervalUnit(String), + Start(String), + ToggleBarrier, + Keep(i32), + Daily(Option), + Monthly(Option), + Weekly(Option), +} + +pub fn update(msg: Msg, model: &mut Model, orders: &mut impl Orders) { + match msg { + Msg::Input(x) => match x { + Input::Interval(i) => model.interval = i, + Input::IntervalUnit(i) => model.interval_unit = i, + Input::Start(i) => model.start = i, + + Input::Filesystem(i) => { + if let Some(p) = model.policies.get(&i) { + model.vars.keep = p.keep; + model.vars.daily = Some(p.daily); + model.vars.weekly = Some(p.weekly); + model.vars.monthly = Some(p.monthly); + model.vars.barrier = Some(p.barrier); + model.start = p.start.map(|t| t.format("%FT%R").to_string()).unwrap_or_default(); + model.interval = p.interval.0.as_secs() / 60; + model.interval_unit = "m".into(); + } else { + model.vars = Default::default(); + model.vars.keep = 32; + model.interval = 60; + model.interval_unit = "m".into(); + model.start = "".into(); + } + model.vars.filesystem = i; + } + Input::ToggleBarrier => model.vars.barrier = Some(!model.vars.barrier.unwrap_or_default()), + Input::Keep(i) => model.vars.keep = i, + Input::Daily(i) => model.vars.daily = i, + Input::Weekly(i) => model.vars.weekly = i, + Input::Monthly(i) => model.vars.monthly = i, + }, + Msg::Submit => { + model.submitting = true; + model.vars.interval = format!("{}{}", model.interval, model.interval_unit); + model.vars.start = if model.start.is_empty() { + None + } else { + Some(format!("{}:00Z", model.start)) + }; + + let query = snapshot::policy::create::build(model.vars.clone()); + + let req = fetch::Request::graphql_query(&query); + + orders.perform_cmd(req.fetch_json_data(|x| Msg::CreatePolicyResp(x))); + } + Msg::CreatePolicyResp(x) => { + model.submitting = false; + orders.send_msg(Msg::Close); + + match x { + Ok(Response::Data(_)) => {} + Ok(Response::Errors(e)) => { + error!("An error has occurred during policy creation: ", e); + } + Err(e) => { + error!("An error has occurred during policy creation: ", e); + } + } + } + Msg::Modal(msg) => { + modal::update(msg, &mut model.modal, &mut orders.proxy(Msg::Modal)); + } + Msg::Open => { + model.modal.open = true; + } + Msg::Close => { + model.modal.open = false; + } + Msg::Noop => {} + }; +} + +// FIXME: this function was created to help rustfmt only +fn interval_unit_options(selected: &str) -> Vec> { + vec![ + option![ + class![C.font_sans], + attrs! {At::Value => "minutes", At::Selected => (selected == "minutes").as_at_value()}, + "Minutes" + ], + option![ + class![C.font_sans], + attrs! {At::Value => "hours", At::Selected => (selected == "hours").as_at_value()}, + "Hours" + ], + option![ + class![C.font_sans], + attrs! {At::Value => "days", At::Selected => (selected == "days").as_at_value()}, + "Days" + ], + ] +} + +pub fn view(model: &Model) -> Node { + let input_cls = class![ + C.appearance_none, + C.focus__outline_none, + C.focus__shadow_outline, + C.px_3, + C.py_2, + C.rounded_sm + ]; + + modal::bg_view( + model.modal.open, + Msg::Modal, + modal::content_view( + Msg::Modal, + div![ + modal::title_view(Msg::Modal, span!["Create Automatic Snapshot Policy"]), + form![ + ev(Ev::Submit, move |event| { + event.prevent_default(); + Msg::Submit + }), + div![ + class![C.grid, C.grid_cols_2, C.gap_4, C.p_4, C.items_center], + label![attrs! {At::For => "policy_filesystem"}, "Filesystem Name"], + div![ + class![C.inline_block, C.relative, C.bg_gray_200], + select![ + id!["policy_filesystem"], + &input_cls, + class![ + C.block, + C.text_gray_800, + C.leading_tight, + C.bg_transparent, + C.pr_8, + C.rounded, + C.w_full + ], + model.filesystems.iter().map(|x| { + let mut opt = option![class![C.font_sans], attrs! {At::Value => x}, x]; + if x == model.vars.filesystem.as_str() { + opt.add_attr(At::Selected.to_string(), "selected"); + } + + opt + }), + attrs! { + At::Required => true.as_at_value(), + }, + input_ev(Ev::Change, |s| Msg::Input(Input::Filesystem(s))), + ], + div![ + class![ + C.pointer_events_none, + C.absolute, + C.inset_y_0, + C.right_0, + C.flex, + C.items_center, + C.px_2, + C.text_gray_700, + ], + font_awesome(class![C.w_4, C.h_4, C.inline, C.ml_1], "chevron-down") + ], + ], + label![ + attrs! {At::For => "policy_interval"}, + "Interval", + help_indicator( + "How often to take a snapshot for a selected filesystem", + Placement::Right + ) + ], + div![ + class![C.grid, C.grid_cols_6], + input![ + &input_cls, + class![C.bg_gray_200, C.text_gray_800, C.col_span_4, C.rounded_r_none], + id!["policy_interval"], + attrs! { + At::Type => "number", + At::Min => "1", + At::Placeholder => "Required", + At::Required => true.as_at_value(), + At::Value => model.interval + }, + input_ev(Ev::Input, |s| s + .parse() + .map(|i| Msg::Input(Input::Interval(i))) + .unwrap_or(Msg::Noop)), + ], + div![ + class![C.inline_block, C.relative, C.col_span_2, C.text_white, C.bg_blue_500], + select![ + id!["interval_unit"], + &input_cls, + class![C.w_full, C.h_full C.rounded_l_none, C.bg_transparent], + interval_unit_options(&model.interval_unit), + attrs! { + At::Required => true.as_at_value(), + }, + input_ev(Ev::Change, |s| Msg::Input(Input::IntervalUnit(s))), + ], + div![ + class![ + C.pointer_events_none, + C.absolute, + C.inset_y_0, + C.right_0, + C.flex, + C.items_center, + C.px_2, + C.text_white, + ], + font_awesome(class![C.w_4, C.h_4, C.inline, C.ml_1], "chevron-down") + ] + ], + ], + label![ + attrs! {At::For => "policy_start"}, + "Start at", + help_indicator("Date and time when to start creating snapshots", Placement::Right) + ], + input![ + &input_cls, + class![C.bg_gray_200, C.text_gray_800, C.rounded_r_none], + id!["policy_start"], + attrs! { + At::Type => "datetime-local", + At::Min => "1", + At::Placeholder => "e. g. 2020-12-25T20:00", + At::Pattern => "[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}", + At::Value => model.start + }, + input_ev(Ev::Input, |s| { Msg::Input(Input::Start(s)) }), + ], + label![ + attrs! {At::For => "policy_daily"}, + "Daily Snapshots", + help_indicator( + "Number of days when keep the most recent snapshot of each day", + Placement::Right + ) + ], + input![ + &input_cls, + class![C.bg_gray_200, C.text_gray_800, C.rounded_r_none], + id!["policy_daily"], + attrs! { + At::Type => "number", + At::Min => "0", + At::Max => "32", + At::Placeholder => "Optional", + At::Required => false.as_at_value(), + At::Value => model.vars.daily.map(|i| i.to_string()).unwrap_or_default() + }, + input_ev(Ev::Input, |s| { Msg::Input(Input::Daily(s.parse().ok())) }), + ], + label![ + attrs! {At::For => "policy_weekly"}, + "Weekly Snapshots", + help_indicator( + "Number of weeks when keep the most recent snapshot of each week", + Placement::Right + ) + ], + input![ + &input_cls, + class![C.bg_gray_200, C.text_gray_800, C.rounded_r_none], + id!["policy_weekly"], + attrs! { + At::Type => "number", + At::Min => "0", + At::Max => "32", + At::Placeholder => "Optional", + At::Required => false.as_at_value(), + At::Value => model.vars.weekly.map(|i| i.to_string()).unwrap_or_default() + }, + input_ev(Ev::Input, |s| Msg::Input(Input::Weekly(s.parse().ok()))), + ], + label![ + attrs! {At::For => "policy_monthly"}, + "Monthly Snapshots", + help_indicator( + "Number of months when keep the most recent snapshot of each months", + Placement::Right + ) + ], + input![ + &input_cls, + class![C.bg_gray_200, C.text_gray_800, C.rounded_r_none], + id!["policy_monthly"], + attrs! { + At::Type => "number", + At::Min => "0", + At::Max => "32", + At::Placeholder => "Optional", + At::Required => false.as_at_value(), + At::Value => model.vars.monthly.map(|i| i.to_string()).unwrap_or_default() + }, + input_ev(Ev::Input, |s| Msg::Input(Input::Monthly(s.parse().ok()))), + ], + label![ + attrs! {At::For => "policy_keep"}, + "Maximum number of snapshots", + help_indicator( + "Maximum number of all snapshots to keep, automatic or not", + Placement::Right + ) + ], + input![ + &input_cls, + class![C.bg_gray_200, C.text_gray_800, C.rounded_r_none], + id!["policy_keep"], + attrs! { + At::Type => "number", + At::Min => "2", + At::Max => "32", + At::Placeholder => "Required", + At::Required => true.as_at_value(), + At::Value => model.vars.keep + }, + input_ev(Ev::Input, |s| { + s.parse().map(|i| Msg::Input(Input::Keep(i))).unwrap_or(Msg::Noop) + }), + ], + label![ + attrs! {At::For => "policy_barrier"}, + "Use Barrier", + help_indicator("Set write barrier before creating snapshot", Placement::Right) + ], + form::toggle() + .merge_attrs(id!["policy_barrier"]) + .merge_attrs(attrs! { + At::Checked => model.vars.barrier.unwrap_or(false).as_at_value() + }) + .with_listener(input_ev(Ev::Change, |_| Msg::Input(Input::ToggleBarrier))), + ], + modal::footer_view(vec![ + button![ + class![ + C.bg_blue_500, + C.duration_300, + C.flex, + C.form_invalid__bg_gray_500, + C.form_invalid__cursor_not_allowed, + C.form_invalid__pointer_events_none, + C.hover__bg_blue_400, + C.items_center + C.px_4, + C.py_2, + C.rounded_full, + C.text_white, + C.transition_colors, + ], + font_awesome(class![C.h_3, C.w_3, C.mr_1, C.inline], "plus"), + "Create Policy", + ], + button![ + class![ + C.bg_transparent, + C.duration_300, + C.hover__bg_gray_100, + C.hover__text_blue_400, + C.ml_2, + C.px_4, + C.py_2, + C.rounded_full, + C.text_blue_500, + C.transition_colors, + ], + simple_ev(Ev::Click, modal::Msg::Close), + "Cancel", + ] + .map_msg(Msg::Modal), + ]) + .merge_attrs(class![C.pt_8]) + ] + ], + ) + .with_listener(keyboard_ev(Ev::KeyDown, move |ev| match ev.key_code() { + key_codes::ESC => Msg::Modal(modal::Msg::Close), + _ => Msg::Noop, + })), + ) +} diff --git a/iml-gui/crate/src/page/snapshot/create_retention.rs b/iml-gui/crate/src/page/snapshot/create_retention.rs deleted file mode 100644 index 4a4f8e3638..0000000000 --- a/iml-gui/crate/src/page/snapshot/create_retention.rs +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright (c) 2020 DDN. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -use crate::{ - components::{font_awesome, modal, Placement}, - extensions::{MergeAttrs as _, NodeExt as _}, - generated::css_classes::C, - key_codes, - page::{ - snapshot::{get_fs_names, help_indicator}, - RecordChange, - }, - GMsg, RequestExt, -}; -use iml_graphql_queries::{snapshot, Response}; -use iml_wire_types::{ - snapshot::ReserveUnit, warp_drive::ArcCache, warp_drive::ArcRecord, warp_drive::RecordId, Filesystem, -}; -use seed::{prelude::*, *}; -use std::{str::FromStr, sync::Arc}; - -#[derive(Debug)] -pub struct Model { - submitting: bool, - filesystems: Vec>, - fs_name: String, - reserve_value: u32, - reserve_unit: ReserveUnit, - keep_num: Option, - pub modal: modal::Model, -} - -impl Default for Model { - fn default() -> Self { - Model { - submitting: false, - filesystems: vec![], - fs_name: "".into(), - reserve_value: 0, - reserve_unit: ReserveUnit::Percent, - keep_num: None, - modal: modal::Model::default(), - } - } -} - -impl RecordChange for Model { - fn update_record(&mut self, _: ArcRecord, cache: &ArcCache, orders: &mut impl Orders) { - orders.send_msg(Msg::SetFilesystems(cache.filesystem.values().cloned().collect())); - } - fn remove_record(&mut self, _: RecordId, cache: &ArcCache, orders: &mut impl Orders) { - orders.send_msg(Msg::SetFilesystems(cache.filesystem.values().cloned().collect())); - - let present = cache.filesystem.values().any(|x| x.name == self.fs_name); - - if !present { - let x = get_fs_names(cache).into_iter().next().unwrap_or_default(); - orders.send_msg(Msg::FsNameChanged(x)); - } - } - fn set_records(&mut self, cache: &ArcCache, orders: &mut impl Orders) { - orders.send_msg(Msg::SetFilesystems(cache.filesystem.values().cloned().collect())); - - let x = get_fs_names(cache).into_iter().next().unwrap_or_default(); - orders.send_msg(Msg::FsNameChanged(x)); - } -} - -#[derive(Clone, Debug)] -pub enum Msg { - Modal(modal::Msg), - Open, - Close, - SetFilesystems(Vec>), - KeepNumChanged(String), - FsNameChanged(String), - ReserveValueChanged(String), - ReserveUnitChanged(String), - Submit, - SnapshotCreateRetentionResp(fetch::ResponseDataResult>), - Noop, -} - -pub fn update(msg: Msg, model: &mut Model, orders: &mut impl Orders) { - match msg { - Msg::Modal(msg) => { - modal::update(msg, &mut model.modal, &mut orders.proxy(Msg::Modal)); - } - Msg::Open => { - model.modal.open = true; - } - Msg::Close => { - model.modal.open = false; - } - Msg::SetFilesystems(x) => { - model.filesystems = x; - } - Msg::FsNameChanged(x) => { - model.fs_name = x; - } - Msg::KeepNumChanged(x) => { - model.keep_num = x.parse().ok(); - } - Msg::ReserveValueChanged(x) => { - model.reserve_value = x.parse().unwrap(); - } - Msg::ReserveUnitChanged(x) => { - model.reserve_unit = ReserveUnit::from_str(&x).unwrap(); - } - Msg::Submit => { - model.submitting = true; - - let query = snapshot::create_retention::build( - &model.fs_name, - model.reserve_value, - model.reserve_unit, - model.keep_num, - ); - - let req = fetch::Request::graphql_query(&query); - - orders.perform_cmd(req.fetch_json_data(|x| Msg::SnapshotCreateRetentionResp(x))); - } - Msg::SnapshotCreateRetentionResp(x) => { - model.submitting = false; - orders.send_msg(Msg::Close); - - match x { - Ok(Response::Data(_)) => { - *model = Model { - fs_name: model.fs_name.to_string(), - ..Model::default() - }; - } - Ok(Response::Errors(e)) => { - error!("An error has occurred during policy creation: ", e); - } - Err(e) => { - error!("An error has occurred during policy creation: ", e); - } - } - } - Msg::Noop => {} - }; -} - -pub fn view(model: &Model) -> Node { - let input_cls = class![ - C.appearance_none, - C.focus__outline_none, - C.focus__shadow_outline, - C.px_3, - C.py_2, - C.rounded_sm - ]; - - modal::bg_view( - model.modal.open, - Msg::Modal, - modal::content_view( - Msg::Modal, - div![ - modal::title_view(Msg::Modal, span!["Create Snapshot Retention Policy"]), - form![ - ev(Ev::Submit, move |event| { - event.prevent_default(); - Msg::Submit - }), - div![ - class![C.grid, C.grid_cols_2, C.gap_4, C.p_4, C.items_center], - label![attrs! {At::For => "retention_fs_name"}, "Filesystem Name"], - div![ - class![C.inline_block, C.relative, C.bg_gray_200], - select![ - id!["retention_fs_name"], - &input_cls, - class![ - C.block, - C.text_gray_800, - C.leading_tight, - C.bg_transparent, - C.pr_8, - C.rounded, - C.w_full - ], - model.filesystems.iter().map(|x| { - let mut opt = option![class![C.font_sans], attrs! {At::Value => x.name}, x.name]; - if x.name == model.fs_name.as_str() { - opt.add_attr(At::Selected.to_string(), "selected"); - } - - opt - }), - attrs! { - At::Required => true.as_at_value(), - }, - input_ev(Ev::Change, Msg::FsNameChanged), - ], - div![ - class![ - C.pointer_events_none, - C.absolute, - C.inset_y_0, - C.right_0, - C.flex, - C.items_center, - C.px_2, - C.text_gray_700, - ], - font_awesome(class![C.w_4, C.h_4, C.inline, C.ml_1], "chevron-down") - ], - ], - label![ - attrs! {At::For => "reserve_value"}, - "Reserve", - help_indicator( - "Delete the oldest snapshot when available space falls below this value.", - Placement::Right - ) - ], - div![ - class![C.grid, C.grid_cols_6], - input![ - &input_cls, - class![C.bg_gray_200, C.text_gray_800, C.col_span_4, C.rounded_r_none], - id!["reserve_value"], - attrs! { - At::Type => "number", - At::Min => "0", - At::Placeholder => "Required", - At::Required => true.as_at_value(), - }, - input_ev(Ev::Change, Msg::ReserveValueChanged), - ], - div![ - class![C.inline_block, C.relative, C.col_span_2, C.text_white, C.bg_blue_500], - select![ - id!["reserve_unit"], - &input_cls, - class![C.w_full, C.h_full C.rounded_l_none, C.bg_transparent], - option![class![C.font_sans], attrs! {At::Value => "percent"}, "%"], - option![class![C.font_sans], attrs! {At::Value => "gibibytes"}, "GiB"], - option![class![C.font_sans], attrs! {At::Value => "tebibytes"}, "TiB"], - attrs! { - At::Required => true.as_at_value(), - }, - input_ev(Ev::Change, Msg::ReserveUnitChanged), - ], - div![ - class![ - C.pointer_events_none, - C.absolute, - C.inset_y_0, - C.right_0, - C.flex, - C.items_center, - C.px_2, - C.text_white, - ], - font_awesome(class![C.w_4, C.h_4, C.inline, C.ml_1], "chevron-down") - ] - ], - ], - label![ - attrs! {At::For => "keep_num"}, - "Minimum Snapshots", - help_indicator("Minimum number of snapshots to keep", Placement::Right) - ], - input![ - &input_cls, - class![C.bg_gray_200, C.text_gray_800, C.rounded_r_none], - id!["keep_num"], - attrs! { - At::Type => "number", - At::Min => "0", - At::Placeholder => "Optional (default: 0)", - At::Required => false.as_at_value(), - }, - input_ev(Ev::Change, Msg::KeepNumChanged), - ], - ], - modal::footer_view(vec![ - button![ - class![ - C.bg_blue_500, - C.duration_300, - C.flex, - C.form_invalid__bg_gray_500, - C.form_invalid__cursor_not_allowed, - C.form_invalid__pointer_events_none, - C.hover__bg_blue_400, - C.items_center - C.px_4, - C.py_2, - C.rounded_full, - C.text_white, - C.transition_colors, - ], - font_awesome(class![C.h_3, C.w_3, C.mr_1, C.inline], "plus"), - "Create Policy", - ], - button![ - class![ - C.bg_transparent, - C.duration_300, - C.hover__bg_gray_100, - C.hover__text_blue_400, - C.ml_2, - C.px_4, - C.py_2, - C.rounded_full, - C.text_blue_500, - C.transition_colors, - ], - simple_ev(Ev::Click, modal::Msg::Close), - "Cancel", - ] - .map_msg(Msg::Modal), - ]) - .merge_attrs(class![C.pt_8]) - ] - ], - ) - .with_listener(keyboard_ev(Ev::KeyDown, move |ev| match ev.key_code() { - key_codes::ESC => Msg::Modal(modal::Msg::Close), - _ => Msg::Noop, - })), - ) -} diff --git a/iml-gui/crate/src/page/snapshot/list_interval.rs b/iml-gui/crate/src/page/snapshot/list_interval.rs deleted file mode 100644 index 2863781b4a..0000000000 --- a/iml-gui/crate/src/page/snapshot/list_interval.rs +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright (c) 2020 DDN. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -use super::*; -use crate::{extensions::RequestExt, font_awesome}; -use chrono_humanize::{Accuracy, HumanTime, Tense}; -use iml_wire_types::snapshot::SnapshotInterval; -use std::time::Duration; - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum SortField { - FilesystemName, - Interval, -} - -impl Default for SortField { - fn default() -> Self { - Self::FilesystemName - } -} - -#[derive(Clone, Debug)] -pub enum Msg { - Page(paging::Msg), - Sort, - Delete(Arc), - SnapshotDeleteIntervalResp(fetch::ResponseDataResult>), - SortBy(table::SortBy), -} - -#[derive(Default, Debug)] -pub struct Model { - pager: paging::Model, - rows: Vec>, - sort: (SortField, paging::Dir), - take: take::Model, -} - -pub fn update(msg: Msg, model: &mut Model, orders: &mut impl Orders) { - match msg { - Msg::SortBy(table::SortBy(x)) => { - let dir = if x == model.sort.0 { - model.sort.1.next() - } else { - paging::Dir::default() - }; - - model.sort = (x, dir); - - orders.send_msg(Msg::Sort); - } - Msg::Page(msg) => { - paging::update(msg, &mut model.pager, &mut orders.proxy(Msg::Page)); - } - Msg::Sort => { - let sort_fn = match model.sort { - (SortField::FilesystemName, paging::Dir::Asc) => { - Box::new(|a: &Arc, b: &Arc| { - natord::compare(&a.filesystem_name, &b.filesystem_name) - }) as Box, &Arc) -> Ordering> - } - (SortField::FilesystemName, paging::Dir::Desc) => { - Box::new(|a: &Arc, b: &Arc| { - natord::compare(&b.filesystem_name, &a.filesystem_name) - }) - } - (SortField::Interval, paging::Dir::Asc) => { - Box::new(|a: &Arc, b: &Arc| { - a.interval.0.partial_cmp(&b.interval.0).unwrap() - }) - } - (SortField::Interval, paging::Dir::Desc) => { - Box::new(|a: &Arc, b: &Arc| { - b.interval.0.partial_cmp(&a.interval.0).unwrap() - }) - } - }; - - model.rows.sort_by(sort_fn); - } - Msg::Delete(x) => { - if let Ok(true) = window().confirm_with_message("Are you sure you want to delete this interval?") { - let query = snapshot::remove_interval::build(x.id); - - let req = fetch::Request::graphql_query(&query); - - orders.perform_cmd(req.fetch_json_data(|x| Msg::SnapshotDeleteIntervalResp(x))); - } - } - Msg::SnapshotDeleteIntervalResp(x) => match x { - Ok(Response::Data(_)) => {} - Ok(Response::Errors(e)) => { - error!("An error has occurred during Snapshot deletion: ", e); - } - Err(e) => { - error!("An error has occurred during Snapshot deletion: ", e); - } - }, - }; -} - -impl RecordChange for Model { - fn update_record(&mut self, _: ArcRecord, cache: &ArcCache, orders: &mut impl Orders) { - self.rows = cache.snapshot_interval.values().cloned().collect(); - - orders.proxy(Msg::Page).send_msg(paging::Msg::SetTotal(self.rows.len())); - - orders.send_msg(Msg::Sort); - } - fn remove_record(&mut self, _: RecordId, cache: &ArcCache, orders: &mut impl Orders) { - self.rows = cache.snapshot_interval.values().cloned().collect(); - - orders.proxy(Msg::Page).send_msg(paging::Msg::SetTotal(self.rows.len())); - - orders.send_msg(Msg::Sort); - } - fn set_records(&mut self, cache: &ArcCache, orders: &mut impl Orders) { - self.rows = cache.snapshot_interval.values().cloned().collect(); - - orders.proxy(Msg::Page).send_msg(paging::Msg::SetTotal(self.rows.len())); - - orders.send_msg(Msg::Sort); - } -} - -pub fn view(model: &Model, cache: &ArcCache, session: Option<&Session>) -> Node { - panel::view( - h3![class![C.py_4, C.font_normal, C.text_lg], "Automated Snapshot Rules"], - div![ - table::wrapper_view(vec![ - table::thead_view(vec![ - table::sort_header("FS Name", SortField::FilesystemName, model.sort.0, model.sort.1) - .map_msg(Msg::SortBy), - table::sort_header("Interval", SortField::Interval, model.sort.0, model.sort.1) - .map_msg(Msg::SortBy), - table::th_view(plain!["Use Barrier"]), - table::th_view(plain!["Last Run"]), - restrict::view(session, GroupType::FilesystemAdministrators, th![]), - ]), - tbody![model.rows[model.pager.range()].iter().map(|x| { - tr![ - td![ - table::td_cls(), - class![C.text_center], - match get_fs_by_name(cache, &x.filesystem_name) { - Some(x) => { - div![resource_links::fs_link(&x)] - } - None => { - plain![x.filesystem_name.to_string()] - } - } - ], - table::td_center(plain![display_interval(x.interval.0)]), - table::td_center(plain![match x.use_barrier { - true => { - "yes" - } - false => { - "no" - } - }]), - table::td_center(plain![x - .last_run - .map(|x| x.format("%m/%d/%Y %H:%M:%S").to_string()) - .unwrap_or_else(|| "---".to_string())]), - td![ - class![C.flex, C.justify_center, C.p_4, C.px_3], - restrict::view( - session, - GroupType::FilesystemAdministrators, - button![ - class![ - C.bg_blue_500, - C.duration_300, - C.flex, - C.hover__bg_blue_400, - C.items_center, - C.px_6, - C.py_2, - C.rounded_sm, - C.text_white, - C.transition_colors, - ], - font_awesome(class![C.w_3, C.h_3, C.inline, C.mr_1], "trash"), - "Delete Rule", - simple_ev(Ev::Click, Msg::Delete(Arc::clone(&x))) - ] - ) - ] - ] - })] - ]) - .merge_attrs(class![C.my_6]), - div![ - class![C.flex, C.justify_end, C.py_1, C.pr_3], - paging::limit_selection_view(&model.pager).map_msg(Msg::Page), - paging::page_count_view(&model.pager), - paging::next_prev_view(&model.pager).map_msg(Msg::Page) - ] - ], - ) -} - -fn display_interval(x: Duration) -> String { - chrono::Duration::from_std(x) - .map(HumanTime::from) - .map(|x| x.to_text_en(Accuracy::Precise, Tense::Present)) - .unwrap_or("---".into()) -} diff --git a/iml-gui/crate/src/page/snapshot/list_retention.rs b/iml-gui/crate/src/page/snapshot/list_policy.rs similarity index 65% rename from iml-gui/crate/src/page/snapshot/list_retention.rs rename to iml-gui/crate/src/page/snapshot/list_policy.rs index 597b01ec3f..47eb994c44 100644 --- a/iml-gui/crate/src/page/snapshot/list_retention.rs +++ b/iml-gui/crate/src/page/snapshot/list_policy.rs @@ -4,19 +4,20 @@ use super::*; use crate::{extensions::RequestExt, font_awesome}; -use iml_wire_types::snapshot::{ReserveUnit, SnapshotRetention}; +use chrono_humanize::{Accuracy, HumanTime, Tense}; +use iml_wire_types::snapshot::SnapshotPolicy; #[derive(Clone, Debug)] pub enum Msg { Page(paging::Msg), - Delete(Arc), - DeleteRetentionResp(fetch::ResponseDataResult>), + Delete(Arc), + DeleteResp(fetch::ResponseDataResult>), } #[derive(Default, Debug)] pub struct Model { pager: paging::Model, - rows: Vec>, + rows: Vec>, take: take::Model, } @@ -26,21 +27,22 @@ pub fn update(msg: Msg, model: &mut Model, orders: &mut impl Orders) paging::update(msg, &mut model.pager, &mut orders.proxy(Msg::Page)); } Msg::Delete(x) => { - if let Ok(true) = window().confirm_with_message("Are you sure you want to delete this retention policy?") { - let query = snapshot::remove_retention::build(x.id); + if let Ok(true) = window().confirm_with_message(&format!("Delete snapshot policy for '{}' ?", x.filesystem)) + { + let query = snapshot::policy::remove::build(&x.filesystem); let req = fetch::Request::graphql_query(&query); - orders.perform_cmd(req.fetch_json_data(|x| Msg::DeleteRetentionResp(x))); + orders.perform_cmd(req.fetch_json_data(Msg::DeleteResp)); } } - Msg::DeleteRetentionResp(x) => match x { + Msg::DeleteResp(x) => match x { Ok(Response::Data(_)) => {} Ok(Response::Errors(e)) => { - error!("An error has occurred during Snapshot deletion: ", e); + error!("An error has occurred during deletion: ", e); } Err(e) => { - error!("An error has occurred during Snapshot deletion: ", e); + error!("An error has occurred during deletion: ", e); } }, }; @@ -48,17 +50,17 @@ pub fn update(msg: Msg, model: &mut Model, orders: &mut impl Orders) impl RecordChange for Model { fn update_record(&mut self, _: ArcRecord, cache: &ArcCache, orders: &mut impl Orders) { - self.rows = cache.snapshot_retention.values().cloned().collect(); + self.rows = cache.snapshot_policy.values().cloned().collect(); orders.proxy(Msg::Page).send_msg(paging::Msg::SetTotal(self.rows.len())); } fn remove_record(&mut self, _: RecordId, cache: &ArcCache, orders: &mut impl Orders) { - self.rows = cache.snapshot_retention.values().cloned().collect(); + self.rows = cache.snapshot_policy.values().cloned().collect(); orders.proxy(Msg::Page).send_msg(paging::Msg::SetTotal(self.rows.len())); } fn set_records(&mut self, cache: &ArcCache, orders: &mut impl Orders) { - self.rows = cache.snapshot_retention.values().cloned().collect(); + self.rows = cache.snapshot_policy.values().cloned().collect(); orders.proxy(Msg::Page).send_msg(paging::Msg::SetTotal(self.rows.len())); } @@ -66,13 +68,18 @@ impl RecordChange for Model { pub fn view(model: &Model, cache: &ArcCache, session: Option<&Session>) -> Node { panel::view( - h3![class![C.py_4, C.font_normal, C.text_lg], "Snapshot Retention Policies"], + h3![class![C.py_4, C.font_normal, C.text_lg], "Automatic Snapshot Policies"], div![ table::wrapper_view(vec![ table::thead_view(vec![ table::th_view(plain!["Filesystem"]), - table::th_view(plain!["Reserve"]), + table::th_view(plain!["Interval"]), + table::th_view(plain!["Start"]), table::th_view(plain!["Keep"]), + table::th_view(plain!["Daily"]), + table::th_view(plain!["Weekly"]), + table::th_view(plain!["Monthly"]), + table::th_view(plain!["Barrier"]), table::th_view(plain!["Last Run"]), restrict::view(session, GroupType::FilesystemAdministrators, th![]), ]), @@ -81,25 +88,28 @@ pub fn view(model: &Model, cache: &ArcCache, session: Option<&Session>) -> Node< td![ table::td_cls(), class![C.text_center], - match get_fs_by_name(cache, &x.filesystem_name) { + match get_fs_by_name(cache, &x.filesystem) { Some(x) => { div![resource_links::fs_link(&x)] } None => { - plain![x.filesystem_name.to_string()] + plain![x.filesystem.to_string()] } } ], - table::td_center(plain![format!( - "{} {}", - x.reserve_value, - match x.reserve_unit { - ReserveUnit::Percent => "%", - ReserveUnit::Gibibytes => "GiB", - ReserveUnit::Tebibytes => "TiB", - } - )]), - table::td_center(plain![x.keep_num.to_string()]), + table::td_center(plain![chrono::Duration::from_std(x.interval.0) + .map(HumanTime::from) + .map(|x| x.to_text_en(Accuracy::Precise, Tense::Present)) + .unwrap_or("---".into())]), + table::td_center(plain![x + .start + .map(|x| x.format("%m/%d/%Y %H:%M:%S").to_string()) + .unwrap_or_else(|| "---".to_string())]), + table::td_center(plain![x.keep.to_string()]), + table::td_center(plain![x.daily.to_string()]), + table::td_center(plain![x.weekly.to_string()]), + table::td_center(plain![x.monthly.to_string()]), + table::td_center(plain![x.barrier.to_string()]), table::td_center(plain![x .last_run .map(|x| x.format("%m/%d/%Y %H:%M:%S").to_string()) diff --git a/iml-gui/crate/src/page/snapshot/mod.rs b/iml-gui/crate/src/page/snapshot/mod.rs index 9b419bd509..8d74f7662e 100644 --- a/iml-gui/crate/src/page/snapshot/mod.rs +++ b/iml-gui/crate/src/page/snapshot/mod.rs @@ -2,11 +2,9 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -mod add_interval; -mod create_retention; +mod create_policy; mod list; -mod list_interval; -mod list_retention; +mod list_policy; mod take; use crate::{ @@ -31,11 +29,9 @@ use std::{cmp::Ordering, ops::Deref, sync::Arc}; #[derive(Default, Debug)] pub struct Model { take: take::Model, - list_interval: list_interval::Model, - list_retention: list_retention::Model, list: list::Model, - add_interval: add_interval::Model, - create_retention: create_retention::Model, + create_policy: create_policy::Model, + list_policy: list_policy::Model, } impl RecordChange for Model { @@ -43,64 +39,41 @@ impl RecordChange for Model { self.list .update_record(record.clone(), cache, &mut orders.proxy(Msg::List)); - self.list_interval - .update_record(record.clone(), cache, &mut orders.proxy(Msg::ListInterval)); + self.take + .update_record(record.clone(), cache, &mut orders.proxy(Msg::Take)); - self.list_retention - .update_record(record.clone(), cache, &mut orders.proxy(Msg::ListRetention)); - - self.add_interval - .update_record(record.clone(), cache, &mut orders.proxy(Msg::AddInterval)); - - self.create_retention - .update_record(record.clone(), cache, &mut orders.proxy(Msg::CreatRetention)); - - self.take.update_record(record, cache, &mut orders.proxy(Msg::Take)); + self.list_policy + .update_record(record.clone(), cache, &mut orders.proxy(Msg::ListPolicy)); + self.create_policy + .update_record(record, cache, &mut orders.proxy(Msg::CreatePolicy)); } fn remove_record(&mut self, record: RecordId, cache: &ArcCache, orders: &mut impl Orders) { self.list.remove_record(record, cache, &mut orders.proxy(Msg::List)); - self.list_interval - .remove_record(record, cache, &mut orders.proxy(Msg::ListInterval)); - - self.list_retention - .remove_record(record, cache, &mut orders.proxy(Msg::ListRetention)); - - self.add_interval - .remove_record(record, cache, &mut orders.proxy(Msg::AddInterval)); - - self.create_retention - .remove_record(record, cache, &mut orders.proxy(Msg::CreatRetention)); - self.take.remove_record(record, cache, &mut orders.proxy(Msg::Take)); + + self.list_policy + .remove_record(record, cache, &mut orders.proxy(Msg::ListPolicy)); + self.create_policy + .remove_record(record, cache, &mut orders.proxy(Msg::CreatePolicy)); } fn set_records(&mut self, cache: &ArcCache, orders: &mut impl Orders) { self.list.set_records(cache, &mut orders.proxy(Msg::List)); - self.list_interval - .set_records(cache, &mut orders.proxy(Msg::ListInterval)); - - self.list_retention - .set_records(cache, &mut orders.proxy(Msg::ListRetention)); - - self.add_interval - .set_records(cache, &mut orders.proxy(Msg::AddInterval)); - - self.create_retention - .set_records(cache, &mut orders.proxy(Msg::CreatRetention)); - self.take.set_records(cache, &mut orders.proxy(Msg::Take)); + + self.list_policy.set_records(cache, &mut orders.proxy(Msg::ListPolicy)); + self.create_policy + .set_records(cache, &mut orders.proxy(Msg::CreatePolicy)); } } #[derive(Clone, Debug)] pub enum Msg { Take(take::Msg), - ListInterval(list_interval::Msg), - ListRetention(list_retention::Msg), List(list::Msg), - AddInterval(add_interval::Msg), - CreatRetention(create_retention::Msg), + CreatePolicy(create_policy::Msg), + ListPolicy(list_policy::Msg), } pub fn update(msg: Msg, model: &mut Model, orders: &mut impl Orders) { @@ -108,20 +81,14 @@ pub fn update(msg: Msg, model: &mut Model, orders: &mut impl Orders) Msg::Take(msg) => { take::update(msg, &mut model.take, &mut orders.proxy(Msg::Take)); } - Msg::ListInterval(msg) => { - list_interval::update(msg, &mut model.list_interval, &mut orders.proxy(Msg::ListInterval)); - } - Msg::ListRetention(msg) => { - list_retention::update(msg, &mut model.list_retention, &mut orders.proxy(Msg::ListRetention)); - } Msg::List(msg) => { list::update(msg, &mut model.list, &mut orders.proxy(Msg::List)); } - Msg::AddInterval(msg) => { - add_interval::update(msg, &mut model.add_interval, &mut orders.proxy(Msg::AddInterval)); + Msg::CreatePolicy(msg) => { + create_policy::update(msg, &mut model.create_policy, &mut orders.proxy(Msg::CreatePolicy)); } - Msg::CreatRetention(msg) => { - create_retention::update(msg, &mut model.create_retention, &mut orders.proxy(Msg::CreatRetention)); + Msg::ListPolicy(msg) => { + list_policy::update(msg, &mut model.list_policy, &mut orders.proxy(Msg::ListPolicy)); } } } @@ -142,60 +109,20 @@ pub fn view(model: &Model, cache: &ArcCache, session: Option<&Session>) -> impl div![ take::view(&model.take).map_msg(Msg::Take).merge_attrs(class![C.my_6]), - if cache.snapshot_interval.is_empty() { - vec![add_interval_btn(false, session)] - } else { - vec![ - list_interval::view(&model.list_interval, cache, session) - .map_msg(Msg::ListInterval) - .merge_attrs(class![C.my_6]), - add_interval_btn(true, session), - ] - }, - add_interval::view(&model.add_interval).map_msg(Msg::AddInterval), - if cache.snapshot_retention.is_empty() { + if cache.snapshot_policy.is_empty() { empty![] } else { - list_retention::view(&model.list_retention, cache, session) - .map_msg(Msg::ListRetention) + list_policy::view(&model.list_policy, cache, session) + .map_msg(Msg::ListPolicy) .merge_attrs(class![C.my_6]) }, - create_retention_btn(session), - create_retention::view(&model.create_retention).map_msg(Msg::CreatRetention), + create_policy_btn(session), + create_policy::view(&model.create_policy).map_msg(Msg::CreatePolicy), list::view(&model.list, cache).map_msg(Msg::List) ] } -fn add_interval_btn(has_intervals: bool, session: Option<&Session>) -> Node { - restrict::view( - session, - GroupType::FilesystemAdministrators, - button![ - class![ - C.bg_blue_500, - C.duration_300, - C.flex, - C.hover__bg_blue_400, - C.items_center, - C.mb_6, - C.px_6, - C.py_2, - C.rounded_sm, - C.text_white, - C.transition_colors - ], - font_awesome(class![C.h_3, C.w_3, C.mr_1, C.inline], "plus"), - if has_intervals { - "Add Another Automated Snapshot Rule" - } else { - "Add Automated Snapshot Rule" - }, - simple_ev(Ev::Click, add_interval::Msg::Open).map_msg(Msg::AddInterval) - ], - ) -} - -fn create_retention_btn(session: Option<&Session>) -> Node { +fn create_policy_btn(session: Option<&Session>) -> Node { restrict::view( session, GroupType::FilesystemAdministrators, @@ -214,8 +141,8 @@ fn create_retention_btn(session: Option<&Session>) -> Node { C.transition_colors ], font_awesome(class![C.h_3, C.w_3, C.mr_1, C.inline], "plus"), - "Create Snapshot Retention Policy", - simple_ev(Ev::Click, create_retention::Msg::Open).map_msg(Msg::CreatRetention) + "Create Automatic Snapshot Policy", + simple_ev(Ev::Click, create_policy::Msg::Open).map_msg(Msg::CreatePolicy) ], ) } diff --git a/iml-gui/crate/src/page/snapshot/take.rs b/iml-gui/crate/src/page/snapshot/take.rs index 1d9eb3b7b7..56a5f8e716 100644 --- a/iml-gui/crate/src/page/snapshot/take.rs +++ b/iml-gui/crate/src/page/snapshot/take.rs @@ -62,7 +62,7 @@ pub fn update(msg: Msg, model: &mut Model, orders: &mut impl Orders) } Msg::SnapshotCreateResp(x) => match x { Ok(Response::Data(x)) => { - let x = command_modal::Input::Commands(vec![Arc::new(x.data.create_snapshot)]); + let x = command_modal::Input::Commands(vec![Arc::new(x.data.snapshot.create)]); orders.send_g_msg(GMsg::OpenCommandModal(x)); diff --git a/iml-gui/crate/src/test_utils/fixture.json b/iml-gui/crate/src/test_utils/fixture.json index 38e193be67..5ed8ca4b91 100644 --- a/iml-gui/crate/src/test_utils/fixture.json +++ b/iml-gui/crate/src/test_utils/fixture.json @@ -1221,8 +1221,7 @@ "sfa_storage_system": {}, "sfa_controller": {}, "snapshot": {}, - "snapshot_interval": {}, - "snapshot_retention": {}, + "snapshot_policy": {}, "stratagem_config": {}, "target": { "19": { diff --git a/iml-manager-cli/src/display_utils.rs b/iml-manager-cli/src/display_utils.rs index 430cf6f186..6880faeae0 100644 --- a/iml-manager-cli/src/display_utils.rs +++ b/iml-manager-cli/src/display_utils.rs @@ -2,13 +2,12 @@ // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. -use chrono_humanize::{Accuracy, HumanTime, Tense}; use console::style; use futures::{Future, FutureExt}; use iml_wire_types::{ db::TargetRecord, graphql::ServerProfile, - snapshot::{ReserveUnit, Snapshot, SnapshotInterval, SnapshotRetention}, + snapshot::{Snapshot, SnapshotPolicy}, Command, Filesystem, Host, OstPool, StratagemConfiguration, StratagemReport, }; use indicatif::ProgressBar; @@ -138,47 +137,33 @@ impl IntoTable for Vec { } } -impl IntoTable for Vec { +impl IntoTable for Vec { fn into_table(self) -> Table { generate_table( - &["Id", "Filesystem", "Interval", "Use Barrier", "Last Run"], - self.into_iter().map(|i| { + &[ + "Filesystem", + "Interval", + "Start", + "Keep", + "Daily", + "Weekly", + "Monthly", + "Barrier", + "Last Run", + ], + self.into_iter().map(|p| { vec![ - i.id.to_string(), - i.filesystem_name, - chrono::Duration::from_std(i.interval.0) - .map(HumanTime::from) - .map(|x| x.to_text_en(Accuracy::Precise, Tense::Present)) - .unwrap_or_else(|_| "---".to_string()), - i.use_barrier.to_string(), - i.last_run + p.filesystem, + p.interval.to_string(), + p.start .map(|t| t.to_rfc2822()) .unwrap_or_else(|| "---".to_string()), - ] - }), - ) - } -} - -impl IntoTable for Vec { - fn into_table(self) -> Table { - generate_table( - &["Id", "Filesystem", "Reserve", "Keep", "Last Run"], - self.into_iter().map(|r| { - vec![ - r.id.to_string(), - r.filesystem_name, - format!( - "{} {}", - r.reserve_value, - match r.reserve_unit { - ReserveUnit::Percent => "%", - ReserveUnit::Gibibytes => "GiB", - ReserveUnit::Tebibytes => "TiB", - } - ), - r.keep_num.to_string(), - r.last_run + p.keep.to_string(), + p.daily.to_string(), + p.weekly.to_string(), + p.monthly.to_string(), + p.barrier.to_string(), + p.last_run .map(|t| t.to_rfc2822()) .unwrap_or_else(|| "---".to_string()), ] diff --git a/iml-manager-cli/src/snapshot.rs b/iml-manager-cli/src/snapshot.rs index 590d8008c1..52fb6976a5 100644 --- a/iml-manager-cli/src/snapshot.rs +++ b/iml-manager-cli/src/snapshot.rs @@ -14,57 +14,53 @@ use iml_wire_types::snapshot; use structopt::StructOpt; #[derive(Debug, StructOpt)] -pub enum IntervalCommand { - /// List snapshots intervals +pub enum PolicyCommand { + /// List snapshot policies (default) List { /// Display type: json, yaml, tabular #[structopt(short = "d", long = "display", default_value = "tabular")] display_type: DisplayType, }, - /// Add new snapshot interval - Add { + /// Create or update snapshot policy + Create { + /// Filesystem to create a snapshot policy for + filesystem: String, + /// Automatic snapshot interval in human form, e. g. 1hour + #[structopt()] + interval: String, + /// Time to start making automatic snapshots, e. g. 2020-12-25T20:00:00Z + #[structopt(short = "s", long = "start")] + start: Option, /// Use barrier when creating snapshots #[structopt(short = "b", long = "barrier")] barrier: bool, - /// Filesystem to add a snapshot interval for - filesystem: String, - /// Snapshot interval in human form, e. g. 1hour - #[structopt(required = true, min_values = 1)] - interval: Vec, + /// Maximum number of snapshots + #[structopt(short = "k", long = "keep")] + keep: i32, + /// The number of days when keep the most recent snapshot of each day + #[structopt(short = "d", long = "daily")] + daily: Option, + /// The number of weeks when keep the most recent snapshot of each week + #[structopt(short = "w", long = "weekly")] + weekly: Option, + /// The number of months when keep the most recent snapshot of each month + #[structopt(short = "m", long = "monthly")] + monthly: Option, }, - /// Remove snapshot intervals + /// Remove snapshot policies Remove { - /// The ids of the snapshot intervals to remove + /// Filesystem names to remove policies for #[structopt(required = true, min_values = 1)] - ids: Vec, + filesystem: Vec, }, } -#[derive(Debug, StructOpt)] -pub enum RetentionCommand { - /// List snapshots retention rules - List { - /// Display type: json, yaml, tabular - #[structopt(short = "d", long = "display", default_value = "tabular")] - display_type: DisplayType, - }, - /// Create snapshot retention rule - Create { - /// Filesystem to create a snapshot retention rule for - filesystem: String, - /// Delete the oldest snapshot when available space falls below this value - reserve_value: u32, - /// The unit of measurement associated with the reserve_value (%, GiB or TiB) - reserve_unit: snapshot::ReserveUnit, - /// Minimum number of snapshots to keep (default: 0) - keep_num: Option, - }, - /// Remove snapshot retention rule - Remove { - /// The ids of the retention rules to remove - #[structopt(required = true, min_values = 1)] - ids: Vec, - }, +impl Default for PolicyCommand { + fn default() -> Self { + PolicyCommand::List { + display_type: DisplayType::Tabular, + } + } } #[derive(Debug, StructOpt)] @@ -85,99 +81,66 @@ pub enum SnapshotCommand { /// The filesystem to list snapshots for fsname: String, }, - /// Snapshot intervals operations - Interval(IntervalCommand), - /// Snapshot retention rules operations - Retention(RetentionCommand), + /// Automatic snapshot policies operations + Policy { + #[structopt(subcommand)] + command: Option, + }, } -async fn interval_cli(cmd: IntervalCommand) -> Result<(), ImlManagerCliError> { +async fn policy_cli(cmd: PolicyCommand) -> Result<(), ImlManagerCliError> { match cmd { - IntervalCommand::List { display_type } => { - let query = snapshot_queries::list_intervals::build(); + PolicyCommand::List { display_type } => { + let query = snapshot_queries::policy::list::build(); - let resp: iml_graphql_queries::Response = + let resp: iml_graphql_queries::Response = graphql(query).await?; - let intervals = Result::from(resp)?.data.snapshot_intervals; + let policies = Result::from(resp)?.data.snapshot.policies; - let x = intervals.into_display_type(display_type); + let x = policies.into_display_type(display_type); let term = Term::stdout(); term.write_line(&x).unwrap(); Ok(()) } - IntervalCommand::Add { + PolicyCommand::Create { filesystem, interval, + start, barrier, + keep, + daily, + weekly, + monthly, } => { - let query = snapshot_queries::create_interval::build( - filesystem, - interval.join(" "), - Some(barrier), - ); - - let _resp: iml_graphql_queries::Response = + let query = + snapshot_queries::policy::create::build(snapshot_queries::policy::create::Vars { + filesystem, + interval, + start, + barrier: Some(barrier), + keep, + daily, + weekly, + monthly, + }); + + let _resp: iml_graphql_queries::Response = graphql(query).await?; Ok(()) } - IntervalCommand::Remove { ids } => { - for id in ids { - let query = snapshot_queries::remove_interval::build(id); + PolicyCommand::Remove { filesystem } => { + for fs in filesystem { + let query = snapshot_queries::policy::remove::build(fs); - let _resp: iml_graphql_queries::Response = + let _resp: iml_graphql_queries::Response = graphql(query).await?; } - Ok(()) - } - } -} - -async fn retention_cli(cmd: RetentionCommand) -> Result<(), ImlManagerCliError> { - match cmd { - RetentionCommand::List { display_type } => { - let query = snapshot_queries::list_retentions::build(); - - let resp: iml_graphql_queries::Response = - graphql(query).await?; - let retentions = Result::from(resp)?.data.snapshot_retention_policies; - - let x = retentions.into_display_type(display_type); - - let term = Term::stdout(); - term.write_line(&x).unwrap(); Ok(()) } - RetentionCommand::Create { - filesystem, - keep_num, - reserve_value, - reserve_unit, - } => { - let query = snapshot_queries::create_retention::build( - filesystem, - reserve_value, - reserve_unit, - keep_num, - ); - - let _resp: iml_graphql_queries::Response = - graphql(query).await?; - - Ok(()) - } - RetentionCommand::Remove { ids } => { - for id in ids { - let query = snapshot_queries::remove_retention::build(id); - - let _resp: iml_graphql_queries::Response = - graphql(query).await?; - } - Ok(()) - } } } @@ -190,7 +153,7 @@ pub async fn snapshot_cli(command: SnapshotCommand) -> Result<(), ImlManagerCliE let query = snapshot_queries::list::build(fsname, None, None, None, Some(1_000)); let resp: iml_graphql_queries::Response = graphql(query).await?; - let snaps = Result::from(resp)?.data.snapshots; + let snaps = Result::from(resp)?.data.snapshot.snapshots; let x = snaps.into_display_type(display_type); @@ -205,7 +168,7 @@ pub async fn snapshot_cli(command: SnapshotCommand) -> Result<(), ImlManagerCliE let resp: iml_graphql_queries::Response = graphql(query).await?; - let x = Result::from(resp)?.data.create_snapshot; + let x = Result::from(resp)?.data.snapshot.create; wait_for_cmds_success(&[x]).await?; Ok(()) @@ -215,7 +178,7 @@ pub async fn snapshot_cli(command: SnapshotCommand) -> Result<(), ImlManagerCliE let resp: iml_graphql_queries::Response = graphql(query).await?; - let x = Result::from(resp)?.data.destroy_snapshot; + let x = Result::from(resp)?.data.snapshot.destroy; wait_for_cmds_success(&[x]).await?; Ok(()) @@ -224,7 +187,7 @@ pub async fn snapshot_cli(command: SnapshotCommand) -> Result<(), ImlManagerCliE let query = snapshot_queries::mount::build(x.fsname, x.name); let resp: iml_graphql_queries::Response = graphql(query).await?; - let x = Result::from(resp)?.data.mount_snapshot; + let x = Result::from(resp)?.data.snapshot.mount; wait_for_cmds_success(&[x]).await?; Ok(()) @@ -233,12 +196,11 @@ pub async fn snapshot_cli(command: SnapshotCommand) -> Result<(), ImlManagerCliE let query = snapshot_queries::unmount::build(x.fsname, x.name); let resp: iml_graphql_queries::Response = graphql(query).await?; - let x = Result::from(resp)?.data.unmount_snapshot; + let x = Result::from(resp)?.data.snapshot.unmount; wait_for_cmds_success(&[x]).await?; Ok(()) } - SnapshotCommand::Interval(cmd) => interval_cli(cmd).await, - SnapshotCommand::Retention(cmd) => retention_cli(cmd).await, + SnapshotCommand::Policy { command } => policy_cli(command.unwrap_or_default()).await, } } diff --git a/iml-services/iml-snapshot/Cargo.toml b/iml-services/iml-snapshot/Cargo.toml index 3bc9043c6b..636360c9ed 100644 --- a/iml-services/iml-snapshot/Cargo.toml +++ b/iml-services/iml-snapshot/Cargo.toml @@ -5,18 +5,19 @@ name = "iml-snapshot" version = "0.4.0" [dependencies] +chrono = "0.4" futures = "0.3" futures-util = "0.3" iml-command-utils = {path = "../../iml-command-utils", version = "0.4"} iml-graphql-queries = {path = "../../iml-graphql-queries", version = "0.2"} -iml-influx = {path = "../../iml-influx", version = "0.2"} +iml-influx = {path = "../../iml-influx", version = "0.2", features = ["with-db-client"]} iml-manager-client = {path = "../../iml-manager-client", version = "0.4"} iml-manager-env = {path = "../../iml-manager-env", version = "0.4"} iml-postgres = {path = "../../iml-postgres", version = "0.4"} iml-rabbit = {path = "../../iml-rabbit", version = "0.4"} iml-service-queue = {path = "../iml-service-queue", version = "0.4"} iml-tracing = {version = "0.3", path = "../../iml-tracing"} -iml-wire-types = {path = "../../iml-wire-types", version = "0.4"} +iml-wire-types = {path = "../../iml-wire-types", version = "0.4", features = ["postgres-interop"]} serde = "1.0" thiserror = "1.0" tokio = {version = "0.2", features = ["rt-threaded"]} diff --git a/iml-services/iml-snapshot/src/client_monitor.rs b/iml-services/iml-snapshot/src/client_monitor.rs index 1b44094216..be24216c1a 100644 --- a/iml-services/iml-snapshot/src/client_monitor.rs +++ b/iml-services/iml-snapshot/src/client_monitor.rs @@ -74,7 +74,7 @@ pub async fn tick( ); let resp: iml_graphql_queries::Response = graphql(client_2.clone(), query).await?; - let command = Result::from(resp)?.data.unmount_snapshot; + let command = Result::from(resp)?.data.snapshot.unmount; wait_for_cmds_success(&[command], None).await?; } } diff --git a/iml-services/iml-snapshot/src/lib.rs b/iml-services/iml-snapshot/src/lib.rs index cbaf33d6b5..2e3a4f9845 100644 --- a/iml-services/iml-snapshot/src/lib.rs +++ b/iml-services/iml-snapshot/src/lib.rs @@ -3,7 +3,7 @@ use iml_manager_client::ImlManagerClientError; use tokio::time::Instant; pub mod client_monitor; -pub mod retention; +pub mod policy; #[derive(thiserror::Error, Debug)] pub enum Error { diff --git a/iml-services/iml-snapshot/src/main.rs b/iml-services/iml-snapshot/src/main.rs index 96a0d709ec..4714f4682c 100644 --- a/iml-services/iml-snapshot/src/main.rs +++ b/iml-services/iml-snapshot/src/main.rs @@ -3,12 +3,11 @@ // license that can be found in the LICENSE file. use futures::{StreamExt, TryStreamExt}; -use iml_influx::Client as InfluxClient; -use iml_manager_client::{Client as ManagerClient, Url}; -use iml_manager_env::{get_influxdb_addr, get_influxdb_metrics_db, get_pool_limit}; +use iml_manager_client::Client as ManagerClient; +use iml_manager_env::get_pool_limit; use iml_postgres::{get_db_pool, sqlx}; use iml_service_queue::service_queue::consume_data; -use iml_snapshot::{client_monitor::tick, retention::handle_retention_rules, MonitorState}; +use iml_snapshot::{client_monitor::tick, policy, MonitorState}; use iml_tracing::tracing; use iml_wire_types::snapshot; use std::collections::HashMap; @@ -30,19 +29,12 @@ async fn main() -> Result<(), Box> { consume_data::>>(&ch, "rust_agent_snapshot_rx"); let pool = get_db_pool(get_pool_limit().unwrap_or(DEFAULT_POOL_LIMIT)).await?; - let pool_2 = pool.clone(); - let pool_3 = pool.clone(); let manager_client: ManagerClient = iml_manager_client::get_client()?; - let influx_url: String = format!("http://{}", get_influxdb_addr()); - let influx_client = InfluxClient::new( - Url::parse(&influx_url).expect("Influx URL is invalid."), - get_influxdb_metrics_db(), - ); - sqlx::migrate!("../../migrations").run(&pool).await?; + let pool_2 = pool.clone(); tokio::spawn(async move { let mut interval = interval(Duration::from_secs(60)); let mut snapshot_client_counts: HashMap = HashMap::new(); @@ -55,11 +47,7 @@ async fn main() -> Result<(), Box> { } }); - tokio::spawn(handle_retention_rules( - manager_client, - influx_client, - pool_3.clone(), - )); + tokio::spawn(policy::main(manager_client, pool.clone())); while let Some((fqdn, snap_map)) = s.try_next().await? { for (fs_name, snapshots) in snap_map { diff --git a/iml-services/iml-snapshot/src/policy.rs b/iml-services/iml-snapshot/src/policy.rs new file mode 100644 index 0000000000..ad754b8309 --- /dev/null +++ b/iml-services/iml-snapshot/src/policy.rs @@ -0,0 +1,709 @@ +// Copyright (c) 2020 DDN. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +use crate::Error; +use chrono::{DateTime, Datelike as _, Duration, NaiveDate, NaiveDateTime, Utc}; +use futures::future::{try_join_all, AbortHandle, Abortable}; +use iml_command_utils::wait_for_cmds_success; +use iml_graphql_queries::snapshot; +use iml_manager_client::{graphql, Client}; +use iml_postgres::{sqlx, PgPool}; +use iml_tracing::tracing; +use iml_wire_types::{snapshot::SnapshotPolicy, Command}; +use std::collections::{HashMap, HashSet, LinkedList}; + +pub async fn main(client: Client, pg: PgPool) -> Result<(), Error> { + let mut svcs: HashMap = HashMap::new(); + loop { + let resp: iml_graphql_queries::Response = + graphql(client.clone(), snapshot::policy::list::build()).await?; + + let new: HashSet = Result::from(resp)? + .data + .snapshot + .policies + .into_iter() + .collect(); + + let old: HashSet = svcs.keys().cloned().collect(); + + for p in old.difference(&new) { + tracing::debug!("stopping obsolete {:?}", p); + let ah = svcs.remove(p).unwrap(); + ah.abort(); + } + for p in new.difference(&old).cloned() { + tracing::debug!("starting new {:?}", p); + let ah = start(client.clone(), pg.clone(), p.clone()); + svcs.insert(p, ah); + } + + tokio::time::delay_for(tokio::time::Duration::from_secs(6)).await; + } +} + +fn start(client: Client, pg: PgPool, policy: SnapshotPolicy) -> AbortHandle { + let (ah, ah_reg) = AbortHandle::new_pair(); + + tokio::spawn(Abortable::new( + async move { + loop { + if let Err(e) = run(&client, &pg, &policy).await { + tracing::warn!("automatic snapshot failed: {}", e); + tokio::time::delay_for(tokio::time::Duration::from_secs(10)).await; + } + } + }, + ah_reg, + )); + + ah +} + +async fn run(client: &Client, pg: &PgPool, policy: &SnapshotPolicy) -> Result<(), Error> { + let interval = Duration::from_std(policy.interval.0).unwrap(); + + let mut next_time = Utc::now(); + + if let Some(start) = policy.start { + let next_time = next_tick(start, policy.interval.0, next_time); + tracing::debug!( + "next automatic snapshot of {} will be at {:?}", + policy.filesystem, + next_time + ); + } else { + let latest = sqlx::query!( + r#" + SELECT snapshot_name, create_time FROM snapshot + WHERE filesystem_name = $1 AND snapshot_name LIKE '\_\_%' + ORDER BY create_time DESC + LIMIT 1 + "#, + policy.filesystem + ) + .fetch_optional(pg) + .await?; + + if let Some(x) = latest { + tracing::debug!( + "latest automatic snapshot of {}: {} at {:?}", + policy.filesystem, + x.snapshot_name, + x.create_time + ); + next_time = x.create_time + interval; + } + } + + let pause = + Duration::to_std(&(next_time - Utc::now())).unwrap_or(std::time::Duration::from_secs(0)); + + if pause.as_secs() > 0 { + tracing::debug!( + "sleeping {:?} before creating next snapshot of {}", + pause, + policy.filesystem + ); + tokio::time::delay_for(pause).await; + } + + let mut snapshots = sqlx::query!( + r#" + SELECT snapshot_name, create_time FROM snapshot + WHERE filesystem_name = $1 + "#, + policy.filesystem + ) + .fetch_all(pg) + .await? + .into_iter() + .map(|x| (x.create_time, x.snapshot_name)) + .collect::>(); + + if snapshots.len() < policy.keep as usize { + let new = create_snapshot(client, pg, policy).await?; + snapshots.push(new); + } else { + tracing::warn!( + "cannot create new automatic snapshot: too many snapshots exist ({}, max: {})", + snapshots.len(), + policy.keep + ); + } + + let obsolete = get_obsolete(policy, snapshots); + + if obsolete.is_empty() { + tracing::info!("no obsolete snapshots of {}", policy.filesystem); + } else { + destroy_snapshots(client, pg, policy, obsolete).await?; + } + + // XXX Wait in hope that the new changes are reflected in the database: + let pause = Duration::to_std(&(next_time + interval - Utc::now())) + .unwrap_or(std::time::Duration::from_secs(0)) + .div_f32(2.0); + + if pause.as_secs() > 0 { + tracing::debug!( + "cooldown sleeping {:?} before creating next snapshot of {}", + pause, + policy.filesystem + ); + tokio::time::delay_for(pause).await; + } + + Ok(()) +} + +async fn create_snapshot( + client: &Client, + pg: &PgPool, + policy: &SnapshotPolicy, +) -> Result<(DateTime, String), Error> { + let now = Utc::now(); + let name = now.format("__%s").to_string(); + tracing::info!( + "creating automatic snapshot {} of {}", + name, + policy.filesystem + ); + + let query = snapshot::create::build( + policy.filesystem.clone(), + name.clone(), + "automatic snapshot".into(), + Some(policy.barrier), + ); + let resp: iml_graphql_queries::Response = + graphql(client.clone(), query).await?; + let x = Result::from(resp)?.data.snapshot.create; + wait_for_cmds_success(&[x], None).await?; + + sqlx::query!( + "UPDATE snapshot_policy SET last_run = $1 WHERE filesystem = $2", + Utc::now(), + policy.filesystem + ) + .execute(pg) + .await?; + + Ok((now, name)) +} + +async fn destroy_snapshots( + client: &Client, + pg: &PgPool, + policy: &SnapshotPolicy, + snapshots: Vec, +) -> Result<(), Error> { + let cmd_futs: Vec<_> = snapshots + .iter() + .map(|x| destroy_snapshot(client.clone(), policy.filesystem.clone(), x.clone())) + .collect(); + let cmds = try_join_all(cmd_futs).await?; + wait_for_cmds_success(&cmds, None).await?; + + tracing::info!("destroyed obsolete snapshots: {}", snapshots.join(",")); + + sqlx::query!( + "UPDATE snapshot_policy SET last_run = $1 WHERE filesystem = $2", + Utc::now(), + policy.filesystem + ) + .execute(pg) + .await?; + + Ok(()) +} + +async fn destroy_snapshot( + client: Client, + filesystem: String, + snapshot: String, +) -> Result { + let query = snapshot::destroy::build(filesystem, snapshot, true); + let resp: iml_graphql_queries::Response = + graphql(client, query).await?; + let cmd = Result::from(resp)?.data.snapshot.destroy; + Ok(cmd) +} + +fn get_obsolete(policy: &SnapshotPolicy, snapshots: Vec<(DateTime, String)>) -> Vec { + let immune_count = snapshots.iter().filter(|x| !x.1.starts_with("__")).count(); + let quote_count = + policy.keep - (policy.daily + policy.weekly + policy.monthly + immune_count as i32); + + let mut tail: Vec<_> = snapshots + .iter() + .skip(std::cmp::max(0, quote_count) as usize) + .map(|x| x.0) + .collect(); + + tracing::debug!("all snapshots of {}: {:?}", policy.filesystem, tail); + + let mut to_delete: Vec> = Vec::with_capacity(tail.len()); + + fn collect(d: &mut Vec>, res: LinkedList>) { + for x in res { + for y in x.into_iter().skip(1) { + d.push(DateTime::from_utc(y, Utc)); + } + } + } + + // Handle daily snapshots: + if let Some(x) = tail.get(0) { + let next_day = x.date().succ().and_hms(0, 0, 0); + let cut = next_day - Duration::days(policy.daily as i64); + + let (daily, new_tail): (Vec<_>, Vec<_>) = tail.into_iter().partition(|x| *x > cut); + tracing::debug!("daily snapshots to consider: {:?}, {:?}", cut, daily); + + let datetimes: Vec = daily.iter().map(|x| x.naive_utc()).collect(); + let res = partition_datetime( + &|_| Duration::days(1).num_seconds(), + next_day.naive_utc(), + &datetimes, + ); + tracing::debug!("daily partition: {:?}", res); + collect(&mut to_delete, res); + tail = new_tail; + } + tracing::debug!( + "snapshots of {} to consider for deletion after the daily schedule: {:?}", + policy.filesystem, + tail + ); + + // Handle weekly snapshots: + if let Some(x) = tail.get(0) { + let date = x.date(); + let days_to_next_week = Duration::days((7 - date.weekday().num_days_from_monday()).into()); + let next_week = (date + days_to_next_week).and_hms(0, 0, 0); + let cut = next_week - Duration::weeks(policy.weekly as i64); + + let (weekly, new_tail): (Vec<_>, Vec<_>) = tail.into_iter().partition(|x| *x > cut); + tracing::debug!("weekly snapshots to consider: {:?}, {:?}", cut, weekly); + + let datetimes: Vec = weekly.iter().map(|x| x.naive_utc()).collect(); + let res = partition_datetime( + &|_| Duration::weeks(1).num_seconds(), + next_week.naive_utc(), + &datetimes, + ); + tracing::debug!("weekly partition: {:?}", res); + collect(&mut to_delete, res); + tail = new_tail; + } + tracing::debug!( + "snapshots of {} to consider for deletion after the weekly schedule: {:?}", + policy.filesystem, + tail + ); + + // Handle monthly snapshots: + if let Some(x) = tail.get(0) { + let next_month = add_month(&x.naive_utc(), 1); + let cut = DateTime::::from_utc(add_month(&next_month, -policy.monthly), Utc); + let f = |n: u32| { + let n_month = add_month(&next_month, 0 - n as i32); + let n1_month = add_month(&n_month, 1); + (n1_month - n_month).num_seconds() + }; + + let (monthly, new_tail): (Vec<_>, Vec<_>) = tail.into_iter().partition(|x| *x > cut); + tracing::debug!("monthly snapshots to consider: {:?}, {:?}", cut, monthly); + + let datetimes: Vec = monthly.iter().map(|x| x.naive_utc()).collect(); + let res = partition_datetime(&f, next_month, &datetimes); + tracing::debug!("monthly partition: {:?}", res); + collect(&mut to_delete, res); + tail = new_tail; + } + tracing::debug!( + "snapshots of {} to consider for deletion after the monthly schedule: {:?}", + policy.filesystem, + tail + ); + + to_delete.append(&mut tail); + + to_delete.sort_unstable(); + let (mut delete, mut keep): (Vec<_>, Vec<_>) = snapshots + .into_iter() + .partition(|x| to_delete.binary_search(&x.0).is_ok() && x.1.starts_with("__")); + tracing::debug!("snapshots of {} to keep: {:?}", policy.filesystem, keep); + + let excess = keep.len() as i32 - policy.keep; + if excess > 0 { + keep.sort_unstable_by_key(|x| x.0); + let mut more = keep + .into_iter() + .filter(|x| x.1.starts_with("__")) + .take(excess as usize) + .collect(); + tracing::debug!( + "extra snapshots of {} to delete to keep number under {}: {:?}", + policy.filesystem, + policy.keep, + more + ); + delete.append(&mut more); + } + + delete.into_iter().map(|x| x.1).collect() +} + +fn add_month(date: &NaiveDateTime, n: i32) -> NaiveDateTime { + let month = date.date().month() as i32; + + let x = month + n; + let new_year = date.date().year() + + if x > 12 { + x / 12 + } else if x < 0 { + x / 12 - 1 + } else { + 0 + }; + + let x = month + n % 12; + let new_month = if x > 12 { + x - 12 + } else if x <= 0 { + 12 + x + } else { + x + } as u32; + + let new_date = NaiveDate::from_ymd(new_year, new_month, 1); + + new_date.and_hms(0, 0, 0) +} + +fn partition(f: &dyn Fn(u32) -> i64, v0: i64, v: I) -> LinkedList> +where + I: IntoIterator, +{ + let mut term: LinkedList = LinkedList::new(); + let mut res: LinkedList> = LinkedList::new(); + let mut n: u32 = 1; + let mut a: i64 = v0; + + for i in v { + while i >= a + f(n) { + res.push_back(term); + term = LinkedList::new(); + a += f(n); + n += 1; + } + term.push_back(i); + } + res.push_back(term); + + res +} + +fn partition_datetime( + f: &dyn Fn(u32) -> i64, + start: NaiveDateTime, + datetimes: &[NaiveDateTime], +) -> LinkedList> { + let mut v: Vec = datetimes + .into_iter() + .map(|&d| (start - d).num_seconds()) + .collect(); + v.sort_unstable(); + v.dedup(); + + let part = partition(f, 0, v); + + part.into_iter() + .map(|l| { + l.into_iter() + .map(|d| start - Duration::seconds(d)) + .collect() + }) + .collect() +} + +fn next_tick( + start: DateTime, + interval: std::time::Duration, + now: DateTime, +) -> DateTime { + if now > start { + let d = (now - start).num_seconds(); + let i = Duration::from_std(interval).unwrap().num_seconds(); + let one = if d % i == 0 { 0 } else { 1 }; + let n = (d / i) + one; + now + Duration::seconds(n * i - d) + } else { + start + } +} + +#[cfg(test)] +mod tests { + use super::*; + use chrono::Duration; + use iml_wire_types::graphql_duration::GraphQLDuration; + + fn to_datetime(s: &str) -> DateTime { + DateTime::parse_from_rfc3339(s).unwrap().with_timezone(&Utc) + } + + fn snapshots(x: Vec<(&str, &str)>) -> Vec<(DateTime, String)> { + x.into_iter() + .map(|(t, n)| { + ( + DateTime::parse_from_rfc3339(t).unwrap().with_timezone(&Utc), + n.into(), + ) + }) + .collect() + } + + #[test] + fn test_ticks() { + for (start, interval, now, res) in vec![ + ( + "2020-12-15T00:00:00Z", + Duration::hours(12), + "2020-12-15T04:00:00Z", + "2020-12-15T12:00:00Z", + ), + ( + "2020-12-25T01:23:45Z", + Duration::hours(2), + "2020-12-25T01:00:00Z", + "2020-12-25T01:23:45Z", + ), + ( + "2020-12-25T01:23:45Z", + Duration::hours(2), + "2020-12-25T01:30:00Z", + "2020-12-25T03:23:45Z", + ), + ( + "2020-12-15T20:00:00Z", + Duration::days(1), + "2020-12-15T20:00:00Z", + "2020-12-15T20:00:00Z", + ), + ( + "2002-12-25T01:00:00Z", + Duration::hours(1), + "2020-12-15T01:01:00Z", + "2020-12-15T02:00:00Z", + ), + ] + .into_iter() + .map(|(p, i, n, r)| { + ( + to_datetime(p), + i.to_std().unwrap(), + to_datetime(n), + to_datetime(r), + ) + }) { + assert_eq!(next_tick(start, interval, now), res); + } + } + + #[test] + fn test_add_month() { + for (d0, n, d) in vec![ + ("2020-11-24T12:34:56Z", 0, "2020-11-01T00:00:00Z"), + ("2020-10-24T14:35:02Z", 1, "2020-11-01T00:00:00Z"), + ("2020-11-24T14:35:02Z", 1, "2020-12-01T00:00:00Z"), + ("2020-01-11T14:34:10Z", 10, "2020-11-01T00:00:00Z"), + ("2020-12-01T06:34:12Z", 23, "2022-11-01T00:00:00Z"), + ("2020-03-11T06:34:12Z", 36, "2023-03-01T00:00:00Z"), + ("2020-10-24T14:35:02Z", -1, "2020-09-01T00:00:00Z"), + ("2020-12-01T00:00:00Z", -3, "2020-09-01T00:00:00Z"), + ("2020-10-24T14:35:02Z", -12, "2019-10-01T00:00:00Z"), + ("2020-10-14T14:35:02Z", -16, "2019-06-01T00:00:00Z"), + ("2020-09-04T14:35:02Z", -36, "2017-09-01T00:00:00Z"), + ] { + let start = to_datetime(d0).naive_utc(); + let end = to_datetime(d).naive_utc(); + + assert_eq!(add_month(&start, n), end); + } + } + + #[test] + fn test_get_obsolete_automatic() { + let policy = SnapshotPolicy { + id: 0, + filesystem: "fs".into(), + interval: GraphQLDuration(std::time::Duration::from_secs(60)), + start: None, + barrier: true, + keep: 11, + daily: 4, + weekly: 3, + monthly: 3, + last_run: None, + }; + + let snapshots = snapshots(vec![ + ("2020-11-24T14:34:11Z", "__01"), + ("2020-11-24T14:33:00Z", "__02"), + ("2020-11-24T04:30:00Z", "__03"), + ("2020-11-23T04:36:12Z", "__04"), + ("2020-11-23T04:34:00Z", "__05"), + ("2020-11-23T01:30:00Z", "__06"), + ("2020-11-22T21:38:13Z", "__07"), + ("2020-11-22T16:32:00Z", "__08"), + ("2020-11-22T03:33:00Z", "__09"), + ("2020-11-21T23:22:14Z", "__10"), + ("2020-11-21T11:59:00Z", "__11"), + ("2020-11-17T00:59:21Z", "__12"), + ("2020-11-14T23:22:22Z", "__13"), + ("2020-11-14T11:59:00Z", "__14"), + ("2020-11-13T09:44:00Z", "__15"), + ("2020-11-13T08:37:00Z", "__16"), + ("2020-11-12T05:11:00Z", "__18"), + ("2020-11-06T23:11:23Z", "__19"), + ("2020-11-05T13:55:00Z", "__20"), + ("2020-11-01T13:11:31Z", "__21"), + ("2020-10-31T10:55:32Z", "__22"), + ("2020-10-31T00:55:00Z", "__23"), + ("2020-10-23T00:55:00Z", "__24"), + ("2020-10-01T00:01:00Z", "__25"), + ("2020-09-21T00:00:33Z", "__26"), + ]); + + let mut obsolete = get_obsolete(&policy, snapshots); + obsolete.sort(); + + let expected_obsolete: Vec = vec![ + "__03", "__05", "__06", "__08", "__09", "__11", "__14", "__15", "__16", "__18", "__20", + "__23", "__24", "__25", + ] + .into_iter() + .map(String::from) + .collect(); + + assert_eq!(obsolete, expected_obsolete); + } + + #[test] + fn test_get_obsolete_mixed() { + let policy = SnapshotPolicy { + id: 0, + filesystem: "fs".into(), + interval: GraphQLDuration(std::time::Duration::from_secs(60)), + start: None, + barrier: true, + keep: 10, + daily: 4, + weekly: 3, + monthly: 3, + last_run: None, + }; + + let snapshots = snapshots(vec![ + ("2020-11-24T14:34:11Z", "__01"), + ("2020-11-24T14:33:00Z", "02"), + ("2020-11-24T04:30:00Z", "__03"), + ("2020-11-23T04:36:12Z", "__04"), + ("2020-11-23T04:34:00Z", "__05"), + ("2020-11-23T01:30:00Z", "__06"), + ("2020-11-22T21:38:13Z", "__07"), + ("2020-11-22T16:32:00Z", "__08"), + ("2020-11-22T03:33:00Z", "09"), + ("2020-11-21T23:22:14Z", "__10"), + ("2020-11-21T11:59:00Z", "__11"), + ("2020-11-17T00:59:21Z", "__12"), + ("2020-11-14T23:22:22Z", "__13"), + ("2020-11-14T11:59:00Z", "__14"), + ("2020-11-13T09:44:00Z", "__15"), + ("2020-11-13T08:37:00Z", "__16"), + ("2020-11-12T05:11:00Z", "__18"), + ("2020-11-06T23:11:23Z", "__19"), + ("2020-11-05T13:55:00Z", "__20"), + ("2020-11-01T13:11:31Z", "__21"), + ("2020-10-31T10:55:32Z", "__22"), + ("2020-10-31T00:55:00Z", "__23"), + ("2020-10-23T00:55:00Z", "__24"), + ("2020-10-01T00:01:00Z", "__25"), + ("2020-09-21T00:00:33Z", "26"), + ]); + + let mut obsolete = get_obsolete(&policy, snapshots); + obsolete.sort(); + + let expected_obsolete: Vec = vec![ + "__03", "__05", "__06", "__08", "__11", "__14", "__15", "__16", "__18", "__20", "__21", + "__22", "__23", "__24", "__25", + ] + .into_iter() + .map(String::from) + .collect(); + + assert_eq!(obsolete, expected_obsolete); + } + + #[test] + fn test_get_obsolete_max() { + let policy = SnapshotPolicy { + id: 0, + filesystem: "fs".into(), + interval: GraphQLDuration(std::time::Duration::from_secs(60)), + start: None, + barrier: true, + keep: 4, + daily: 4, + weekly: 3, + monthly: 3, + last_run: None, + }; + + let snapshots = snapshots(vec![ + ("2020-11-24T14:34:11Z", "__01"), + ("2020-11-24T14:33:00Z", "02"), + ("2020-11-24T04:30:00Z", "__03"), + ("2020-11-23T04:36:12Z", "__04"), + ("2020-11-23T04:34:00Z", "__05"), + ("2020-11-23T01:30:00Z", "__06"), + ("2020-11-22T21:38:13Z", "__07"), + ("2020-11-22T16:32:00Z", "__08"), + ("2020-11-22T03:33:00Z", "09"), + ("2020-11-21T23:22:14Z", "__10"), + ("2020-11-21T11:59:00Z", "__11"), + ("2020-11-17T00:59:21Z", "__12"), + ("2020-11-14T23:22:22Z", "__13"), + ("2020-11-14T11:59:00Z", "__14"), + ("2020-11-13T09:44:00Z", "__15"), + ("2020-11-13T08:37:00Z", "__16"), + ("2020-11-12T05:11:00Z", "__18"), + ("2020-11-06T23:11:23Z", "__19"), + ("2020-11-05T13:55:00Z", "__20"), + ("2020-11-01T13:11:31Z", "__21"), + ("2020-10-31T10:55:32Z", "__22"), + ("2020-10-31T00:55:00Z", "__23"), + ("2020-10-23T00:55:00Z", "__24"), + ("2020-10-01T00:01:00Z", "__25"), + ("2020-09-21T00:00:33Z", "26"), + ]); + + let mut obsolete = get_obsolete(&policy, snapshots); + obsolete.sort(); + + let expected_obsolete: Vec = vec![ + "__03", "__04", "__05", "__06", "__07", "__08", "__10", "__11", "__12", "__13", "__14", + "__15", "__16", "__18", "__19", "__20", "__21", "__22", "__23", "__24", "__25", + ] + .into_iter() + .map(String::from) + .collect(); + + assert_eq!(obsolete, expected_obsolete); + } +} diff --git a/iml-services/iml-snapshot/src/retention.rs b/iml-services/iml-snapshot/src/retention.rs deleted file mode 100644 index eacfc154bf..0000000000 --- a/iml-services/iml-snapshot/src/retention.rs +++ /dev/null @@ -1,213 +0,0 @@ -use crate::{Error, FsStats}; -use iml_command_utils::wait_for_cmds_success; -use iml_influx::{Client as InfluxClient, InfluxClientExt as _}; -use iml_manager_client::{graphql, Client}; -use iml_postgres::{sqlx, PgPool}; -use iml_tracing::tracing; -use iml_wire_types::{snapshot, Command}; -use std::collections::HashMap; - -async fn get_stats_from_influx( - fs_name: &str, - client: &InfluxClient, -) -> Result, Error> { - let nodes: Option> = client - .query_into( - format!( - r#" - SELECT - SUM(bytes_total) as bytes_total, - SUM(bytes_free) as bytes_free, - SUM("bytes_avail") as bytes_avail - FROM ( - SELECT LAST("bytes_total") AS bytes_total, - LAST("bytes_free") as bytes_free, - LAST("bytes_avail") as bytes_avail - FROM "target" - WHERE "kind" = 'OST' AND "fs" = '{}' - GROUP BY target - ) - "#, - fs_name, - ) - .as_str(), - None, - ) - .await?; - - if let Some(nodes) = nodes { - if nodes.is_empty() { - return Ok(None); - } else { - let bytes_avail = nodes[0].bytes_avail; - let bytes_total = nodes[0].bytes_total; - let bytes_free = nodes[0].bytes_free; - let bytes_used = bytes_total - bytes_free; - - return Ok(Some((bytes_avail, bytes_free, bytes_used))); - } - } - - Ok(None) -} - -async fn get_snapshots( - pool: &sqlx::PgPool, - fs_name: &str, -) -> Result, Error> { - let xs = sqlx::query_as!( - snapshot::SnapshotRecord, - "SELECT * FROM snapshot WHERE filesystem_name = $1 ORDER BY create_time ASC", - fs_name - ) - .fetch_all(pool) - .await?; - - Ok(xs) -} - -async fn get_retentions(pool: &sqlx::PgPool) -> Result, Error> { - let xs = sqlx::query_as!( - snapshot::SnapshotRetention, - r#" - SELECT - id, - filesystem_name, - reserve_value, - reserve_unit as "reserve_unit:snapshot::ReserveUnit", - last_run, - keep_num - FROM snapshot_retention - "# - ) - .fetch_all(pool) - .await?; - - Ok(xs) -} - -async fn get_retention_policy( - pool: &PgPool, - fs_name: &str, -) -> Result, Error> { - let xs = get_retentions(pool).await?; - - Ok(xs.into_iter().find(|x| x.filesystem_name == fs_name)) -} - -async fn destroy_snapshot( - client: Client, - fs_name: &str, - snapshot_name: &str, -) -> Result { - let resp: iml_graphql_queries::Response = - graphql( - client, - iml_graphql_queries::snapshot::destroy::build(fs_name, snapshot_name, true), - ) - .await?; - - let cmd = Result::from(resp)?.data.destroy_snapshot; - - Ok(cmd) -} - -async fn get_retention_filesystems(pool: &PgPool) -> Result, Error> { - let xs = get_retentions(pool).await?; - - let fs_names = xs.into_iter().map(|x| x.filesystem_name).collect(); - Ok(fs_names) -} - -pub async fn process_retention( - client: &Client, - influx_client: &InfluxClient, - pool: &PgPool, - mut stats_record: HashMap, -) -> Result, Error> { - let filesystems = get_retention_filesystems(pool).await?; - - tracing::debug!("Filesystems with retentions: {:?}", filesystems); - - for fs_name in filesystems { - let stats = get_stats_from_influx(&fs_name, &influx_client).await?; - - if let Some((bytes_avail, bytes_free, bytes_used)) = stats { - tracing::debug!( - "stats values: {}, {}, {}", - bytes_avail, - bytes_free, - bytes_used - ); - let percent_used = - (bytes_used as f64 / (bytes_used as f64 + bytes_avail as f64)) as f64 * 100.0f64; - let percent_free = 100.0f64 - percent_used; - - tracing::debug!( - "stats record: {:?} - bytes free: {}", - stats_record.get(&fs_name), - bytes_free - ); - let retention = get_retention_policy(pool, &fs_name).await?; - - if let Some(retention) = retention { - let snapshots = get_snapshots(pool, &fs_name).await?; - - tracing::debug!( - "percent_left: {}, reserve value: {}", - percent_free, - retention.reserve_value - ); - let should_delete_snapshot = match retention.reserve_unit { - snapshot::ReserveUnit::Percent => percent_free < retention.reserve_value as f64, - snapshot::ReserveUnit::Gibibytes => { - let gib_free: f64 = bytes_free as f64 / 1_073_741_824_f64; - gib_free < retention.reserve_value as f64 - } - snapshot::ReserveUnit::Tebibytes => { - let teb_free: f64 = bytes_free as f64 / 1_099_511_627_776_f64; - teb_free < retention.reserve_value as f64 - } - }; - tracing::debug!("Should delete snapshot?: {}", should_delete_snapshot); - - if should_delete_snapshot - && snapshots.len() > retention.keep_num as usize - && stats_record.get(&fs_name) != Some(&bytes_used) - { - stats_record.insert(fs_name.to_string(), bytes_used); - tracing::debug!("About to delete earliest snapshot."); - let snapshot_name = snapshots[0].snapshot_name.to_string(); - tracing::debug!("Deleting {}", snapshot_name); - let cmd = - destroy_snapshot(client.clone(), &fs_name, snapshot_name.as_ref()).await?; - - wait_for_cmds_success(&[cmd], None).await?; - } - } - } - } - - Ok(stats_record) -} - -pub async fn handle_retention_rules( - client: Client, - influx_client: InfluxClient, - pool: PgPool, -) -> Result<(), Error> { - let mut prev_stats: HashMap = vec![].into_iter().collect::>(); - - loop { - prev_stats = - match process_retention(&client, &influx_client, &pool, prev_stats.clone()).await { - Ok(x) => x, - Err(e) => { - tracing::error!("Retention Rule processing error: {:?}", e); - prev_stats - } - }; - - tokio::time::delay_for(tokio::time::Duration::from_secs(60)).await; - } -} diff --git a/iml-warp-drive/src/cache.rs b/iml-warp-drive/src/cache.rs index 434a053f3f..10fdfcb517 100644 --- a/iml-warp-drive/src/cache.rs +++ b/iml-warp-drive/src/cache.rs @@ -18,7 +18,7 @@ use iml_wire_types::{ EnclosureType, HealthState, JobState, JobType, MemberState, SfaController, SfaDiskDrive, SfaEnclosure, SfaJob, SfaPowerSupply, SfaStorageSystem, SubTargetType, }, - snapshot::{ReserveUnit, SnapshotInterval, SnapshotRecord, SnapshotRetention}, + snapshot::{SnapshotPolicy, SnapshotRecord}, warp_drive::{Cache, Record, RecordChange, RecordId}, Alert, ApiList, EndpointName, Filesystem, FlatQuery, FsType, Host, }; @@ -190,16 +190,10 @@ pub async fn db_record_to_change_record( Ok(RecordChange::Update(Record::Snapshot(x))) } }, - DbRecord::SnapshotInterval(x) => match (msg_type, x) { - (MessageType::Delete, x) => Ok(RecordChange::Delete(RecordId::SnapshotInterval(x.id))), + DbRecord::SnapshotPolicy(x) => match (msg_type, x) { + (MessageType::Delete, x) => Ok(RecordChange::Delete(RecordId::SnapshotPolicy(x.id))), (MessageType::Insert, x) | (MessageType::Update, x) => { - Ok(RecordChange::Update(Record::SnapshotInterval(x))) - } - }, - DbRecord::SnapshotRetention(x) => match (msg_type, x) { - (MessageType::Delete, x) => Ok(RecordChange::Delete(RecordId::SnapshotRetention(x.id))), - (MessageType::Insert, x) | (MessageType::Update, x) => { - Ok(RecordChange::Update(Record::SnapshotRetention(x))) + Ok(RecordChange::Update(Record::SnapshotPolicy(x))) } }, DbRecord::LnetConfiguration(x) => match (msg_type, x) { @@ -578,16 +572,21 @@ pub async fn populate_from_db( .try_collect() .await?; - cache.snapshot_interval = sqlx::query!("SELECT * FROM snapshot_interval") + cache.snapshot_policy = sqlx::query!(r#"SELECT * FROM snapshot_policy"#) .fetch(pool) .map_ok(|x| { ( x.id, - SnapshotInterval { + SnapshotPolicy { id: x.id, - filesystem_name: x.filesystem_name, - use_barrier: x.use_barrier, + filesystem: x.filesystem, interval: x.interval.into(), + start: x.start, + barrier: x.barrier, + keep: x.keep, + daily: x.daily, + weekly: x.weekly, + monthly: x.monthly, last_run: x.last_run, }, ) @@ -595,24 +594,6 @@ pub async fn populate_from_db( .try_collect() .await?; - cache.snapshot_retention = sqlx::query_as!( - SnapshotRetention, - r#" - SELECT - id, - filesystem_name, - reserve_value, - reserve_unit as "reserve_unit:ReserveUnit", - last_run, - keep_num - FROM snapshot_retention - "# - ) - .fetch(pool) - .map_ok(|x| (x.id, x)) - .try_collect() - .await?; - cache.stratagem_config = sqlx::query_as!( StratagemConfiguration, "select * from chroma_core_stratagemconfiguration where not_deleted = 't'" diff --git a/iml-warp-drive/src/db_record.rs b/iml-warp-drive/src/db_record.rs index d4a7c4e7ee..d53453c6ea 100644 --- a/iml-warp-drive/src/db_record.rs +++ b/iml-warp-drive/src/db_record.rs @@ -22,10 +22,7 @@ use iml_wire_types::{ SFA_CONTROLLER_TABLE_NAME, SFA_DISK_DRIVE_TABLE_NAME, SFA_ENCLOSURE_TABLE_NAME, SFA_JOB_TABLE_NAME, SFA_POWER_SUPPLY_TABLE_NAME, SFA_STORAGE_SYSTEM_TABLE_NAME, }, - snapshot::{ - SnapshotInterval, SnapshotRecord, SnapshotRetention, SNAPSHOT_INTERVAL_TABLE_NAME, - SNAPSHOT_RETENTION_TABLE_NAME, SNAPSHOT_TABLE_NAME, - }, + snapshot::{SnapshotPolicy, SnapshotRecord, SNAPSHOT_POLICY_TABLE_NAME, SNAPSHOT_TABLE_NAME}, }; use serde::de::Error; use std::convert::TryFrom; @@ -56,8 +53,7 @@ pub enum DbRecord { SfaPowerSupply(SfaPowerSupply), SfaController(SfaController), Snapshot(SnapshotRecord), - SnapshotInterval(SnapshotInterval), - SnapshotRetention(SnapshotRetention), + SnapshotPolicy(SnapshotPolicy), StratagemConfiguration(StratagemConfiguration), TargetRecord(TargetRecord), Volume(VolumeRecord), @@ -95,13 +91,8 @@ impl TryFrom<(TableName<'_>, serde_json::Value)> for DbRecord { serde_json::from_value(x).map(DbRecord::StratagemConfiguration) } SNAPSHOT_TABLE_NAME => serde_json::from_value(x).map(DbRecord::Snapshot), - SNAPSHOT_INTERVAL_TABLE_NAME => { - serde_json::from_value(x).map(DbRecord::SnapshotInterval) - } - SNAPSHOT_RETENTION_TABLE_NAME => { - serde_json::from_value(x).map(DbRecord::SnapshotRetention) - } TARGET_TABLE_NAME => serde_json::from_value(x).map(DbRecord::TargetRecord), + SNAPSHOT_POLICY_TABLE_NAME => serde_json::from_value(x).map(DbRecord::SnapshotPolicy), LNET_CONFIGURATION_TABLE_NAME => { serde_json::from_value(x).map(DbRecord::LnetConfiguration) } diff --git a/iml-wire-types/src/graphql_duration.rs b/iml-wire-types/src/graphql_duration.rs index a31bb8016e..822731ffe7 100644 --- a/iml-wire-types/src/graphql_duration.rs +++ b/iml-wire-types/src/graphql_duration.rs @@ -8,7 +8,7 @@ use sqlx::postgres::types::PgInterval; use std::convert::TryInto; use std::{convert::TryFrom, fmt, time::Duration}; -#[derive(serde::Deserialize, serde::Serialize, Clone, PartialEq, Debug)] +#[derive(serde::Deserialize, serde::Serialize, Clone, Eq, Hash, PartialEq, Debug)] #[serde(try_from = "String", into = "String")] pub struct GraphQLDuration(pub Duration); diff --git a/iml-wire-types/src/snapshot.rs b/iml-wire-types/src/snapshot.rs index 3bf2344f3c..419d73a2d4 100644 --- a/iml-wire-types/src/snapshot.rs +++ b/iml-wire-types/src/snapshot.rs @@ -9,7 +9,6 @@ use crate::{ graphql_duration::GraphQLDuration, }; use chrono::{offset::Utc, DateTime}; -use std::str::FromStr; #[cfg(feature = "cli")] use structopt::StructOpt; @@ -59,77 +58,64 @@ impl Id for SnapshotRecord { pub const SNAPSHOT_TABLE_NAME: TableName = TableName("snapshot"); #[cfg_attr(feature = "graphql", derive(juniper::GraphQLObject))] -#[derive(serde::Deserialize, serde::Serialize, Clone, PartialEq, Debug)] -/// A Snapshot interval -pub struct SnapshotInterval { +#[derive(serde::Deserialize, serde::Serialize, Eq, Clone, Debug)] +/// Automatic snapshot policy +pub struct SnapshotPolicy { /// The configuration id pub id: i32, /// The filesystem name - pub filesystem_name: String, - /// Use a write barrier - pub use_barrier: bool, + pub filesystem: String, /// The interval configuration pub interval: GraphQLDuration, - // Last known run + /// Preferred time to start creating automatic snapshots at + pub start: Option>, + /// Use a write barrier + pub barrier: bool, + /// Number of recent snapshots to keep + pub keep: i32, + /// The number of days when keep the most recent snapshot of each day + pub daily: i32, + /// The number of weeks when keep the most recent snapshot of each week + pub weekly: i32, + /// The number of months when keep the most recent snapshot of each month + pub monthly: i32, + /// Last known run pub last_run: Option>, } -impl Id for SnapshotInterval { - fn id(&self) -> i32 { - self.id +impl PartialEq for SnapshotPolicy { + fn eq(&self, other: &Self) -> bool { + self.filesystem == other.filesystem + && self.interval == other.interval + && self.start == other.start + && self.barrier == other.barrier + && self.keep == other.keep + && self.daily == other.daily + && self.weekly == other.weekly + && self.monthly == other.monthly } } -pub const SNAPSHOT_INTERVAL_TABLE_NAME: TableName = TableName("snapshot_interval"); - -#[cfg_attr(feature = "graphql", derive(juniper::GraphQLObject))] -#[derive(serde::Deserialize, serde::Serialize, Clone, PartialEq, Debug)] -pub struct SnapshotRetention { - pub id: i32, - pub filesystem_name: String, - /// Amount or percent of free space to reserve - pub reserve_value: i32, - pub reserve_unit: ReserveUnit, - /// Minimum number of snapshots to keep - pub keep_num: i32, - pub last_run: Option>, +impl std::hash::Hash for SnapshotPolicy { + fn hash(&self, state: &mut H) { + self.filesystem.hash(state); + self.interval.hash(state); + self.start.hash(state); + self.barrier.hash(state); + self.keep.hash(state); + self.daily.hash(state); + self.weekly.hash(state); + self.monthly.hash(state); + } } -impl Id for SnapshotRetention { +impl Id for SnapshotPolicy { fn id(&self) -> i32 { self.id } } -pub const SNAPSHOT_RETENTION_TABLE_NAME: TableName = TableName("snapshot_retention"); - -#[cfg_attr(feature = "graphql", derive(juniper::GraphQLEnum))] -#[cfg_attr(feature = "postgres-interop", derive(sqlx::Type))] -#[cfg_attr(feature = "postgres-interop", sqlx(rename = "snapshot_reserve_unit"))] -#[cfg_attr(feature = "postgres-interop", sqlx(rename_all = "lowercase"))] -#[derive(serde::Deserialize, serde::Serialize, Clone, Copy, PartialEq, Debug)] -#[serde(rename_all = "lowercase")] -pub enum ReserveUnit { - #[cfg_attr(feature = "graphql", graphql(name = "percent"))] - Percent, - #[cfg_attr(feature = "graphql", graphql(name = "gibibytes"))] - Gibibytes, - #[cfg_attr(feature = "graphql", graphql(name = "tebibytes"))] - Tebibytes, -} - -impl FromStr for ReserveUnit { - type Err = String; - - fn from_str(s: &str) -> Result { - match s.to_lowercase().as_str() { - "%" | "percent" => Ok(Self::Percent), - "gib" | "g" | "gibibytes" => Ok(Self::Gibibytes), - "tib" | "t" | "tebibytes" => Ok(Self::Tebibytes), - x => Err(format!("Unexpected '{}'", x)), - } - } -} +pub const SNAPSHOT_POLICY_TABLE_NAME: TableName = TableName("snapshot_policy"); #[derive(serde::Deserialize, Debug)] #[cfg_attr(feature = "cli", derive(StructOpt))] diff --git a/iml-wire-types/src/warp_drive.rs b/iml-wire-types/src/warp_drive.rs index 547e911ceb..b26a798759 100644 --- a/iml-wire-types/src/warp_drive.rs +++ b/iml-wire-types/src/warp_drive.rs @@ -11,7 +11,7 @@ use crate::{ StratagemConfiguration, TargetRecord, VolumeNodeRecord, VolumeRecord, }, sfa::{SfaController, SfaDiskDrive, SfaEnclosure, SfaJob, SfaPowerSupply, SfaStorageSystem}, - snapshot::{SnapshotInterval, SnapshotRecord, SnapshotRetention}, + snapshot::{SnapshotPolicy, SnapshotRecord}, Alert, CompositeId, EndpointNameSelf, Filesystem, Host, Label, LockChange, ToCompositeId, }; use im::{HashMap, HashSet}; @@ -110,8 +110,7 @@ pub struct Cache { pub sfa_storage_system: HashMap, pub sfa_controller: HashMap, pub snapshot: HashMap, - pub snapshot_interval: HashMap, - pub snapshot_retention: HashMap, + pub snapshot_policy: HashMap, pub stratagem_config: HashMap, pub target: HashMap, pub target_record: HashMap, @@ -143,8 +142,7 @@ pub struct ArcCache { pub sfa_power_supply: HashMap>, pub sfa_controller: HashMap>, pub snapshot: HashMap>, - pub snapshot_interval: HashMap>, - pub snapshot_retention: HashMap>, + pub snapshot_policy: HashMap>, pub stratagem_config: HashMap>, pub target: HashMap>, pub target_record: HashMap>, @@ -204,14 +202,9 @@ impl Cache { .remove(&id) .map(Record::StratagemConfig), RecordId::Snapshot(id) => self.snapshot.remove(&id).map(Record::Snapshot), - RecordId::SnapshotInterval(id) => self - .snapshot_interval - .remove(&id) - .map(Record::SnapshotInterval), - RecordId::SnapshotRetention(id) => self - .snapshot_retention - .remove(&id) - .map(Record::SnapshotRetention), + RecordId::SnapshotPolicy(id) => { + self.snapshot_policy.remove(&id).map(Record::SnapshotPolicy) + } RecordId::Target(id) => self.target.remove(&id).map(Record::Target), RecordId::TargetRecord(id) => self.target_record.remove(&id).map(Record::TargetRecord), RecordId::User(id) => self.user.remove(&id).map(Record::User), @@ -280,11 +273,8 @@ impl Cache { Record::Snapshot(x) => { self.snapshot.insert(x.id, x); } - Record::SnapshotInterval(x) => { - self.snapshot_interval.insert(x.id(), x); - } - Record::SnapshotRetention(x) => { - self.snapshot_retention.insert(x.id(), x); + Record::SnapshotPolicy(x) => { + self.snapshot_policy.insert(x.id(), x); } Record::StratagemConfig(x) => { self.stratagem_config.insert(x.id(), x); @@ -348,8 +338,7 @@ impl ArcCache { RecordId::SfaPowerSupply(id) => self.sfa_power_supply.remove(&id).is_some(), RecordId::SfaController(id) => self.sfa_controller.remove(&id).is_some(), RecordId::Snapshot(id) => self.snapshot.remove(&id).is_some(), - RecordId::SnapshotInterval(id) => self.snapshot_interval.remove(&id).is_some(), - RecordId::SnapshotRetention(id) => self.snapshot_retention.remove(&id).is_some(), + RecordId::SnapshotPolicy(id) => self.snapshot_policy.remove(&id).is_some(), RecordId::StratagemConfig(id) => self.stratagem_config.remove(&id).is_some(), RecordId::Target(id) => self.target.remove(&id).is_some(), RecordId::TargetRecord(id) => self.target_record.remove(&id).is_some(), @@ -419,11 +408,8 @@ impl ArcCache { Record::Snapshot(x) => { self.snapshot.insert(x.id, Arc::new(x)); } - Record::SnapshotInterval(x) => { - self.snapshot_interval.insert(x.id, Arc::new(x)); - } - Record::SnapshotRetention(x) => { - self.snapshot_retention.insert(x.id(), Arc::new(x)); + Record::SnapshotPolicy(x) => { + self.snapshot_policy.insert(x.id(), Arc::new(x)); } Record::StratagemConfig(x) => { self.stratagem_config.insert(x.id(), Arc::new(x)); @@ -501,8 +487,7 @@ impl From<&Cache> for ArcCache { sfa_power_supply: hashmap_to_arc_hashmap(&cache.sfa_power_supply), sfa_controller: hashmap_to_arc_hashmap(&cache.sfa_controller), snapshot: hashmap_to_arc_hashmap(&cache.snapshot), - snapshot_interval: hashmap_to_arc_hashmap(&cache.snapshot_interval), - snapshot_retention: hashmap_to_arc_hashmap(&cache.snapshot_retention), + snapshot_policy: hashmap_to_arc_hashmap(&cache.snapshot_policy), stratagem_config: hashmap_to_arc_hashmap(&cache.stratagem_config), target: hashmap_to_arc_hashmap(&cache.target), target_record: hashmap_to_arc_hashmap(&cache.target_record), @@ -536,8 +521,7 @@ impl From<&ArcCache> for Cache { sfa_power_supply: arc_hashmap_to_hashmap(&cache.sfa_power_supply), sfa_controller: arc_hashmap_to_hashmap(&cache.sfa_controller), snapshot: arc_hashmap_to_hashmap(&cache.snapshot), - snapshot_interval: arc_hashmap_to_hashmap(&cache.snapshot_interval), - snapshot_retention: arc_hashmap_to_hashmap(&cache.snapshot_retention), + snapshot_policy: arc_hashmap_to_hashmap(&cache.snapshot_policy), stratagem_config: arc_hashmap_to_hashmap(&cache.stratagem_config), target: arc_hashmap_to_hashmap(&cache.target), target_record: arc_hashmap_to_hashmap(&cache.target_record), @@ -572,8 +556,7 @@ pub enum Record { SfaPowerSupply(SfaPowerSupply), SfaController(SfaController), Snapshot(SnapshotRecord), - SnapshotInterval(SnapshotInterval), - SnapshotRetention(SnapshotRetention), + SnapshotPolicy(SnapshotPolicy), StratagemConfig(StratagemConfiguration), Target(ManagedTargetRecord), TargetRecord(TargetRecord), @@ -604,8 +587,7 @@ pub enum ArcRecord { SfaPowerSupply(Arc), SfaController(Arc), Snapshot(Arc), - SnapshotInterval(Arc), - SnapshotRetention(Arc), + SnapshotPolicy(Arc), StratagemConfig(Arc), Target(Arc), TargetRecord(Arc), @@ -638,8 +620,7 @@ impl From for ArcRecord { Record::SfaController(x) => Self::SfaController(Arc::new(x)), Record::StratagemConfig(x) => Self::StratagemConfig(Arc::new(x)), Record::Snapshot(x) => Self::Snapshot(Arc::new(x)), - Record::SnapshotInterval(x) => Self::SnapshotInterval(Arc::new(x)), - Record::SnapshotRetention(x) => Self::SnapshotRetention(Arc::new(x)), + Record::SnapshotPolicy(x) => Self::SnapshotPolicy(Arc::new(x)), Record::Target(x) => Self::Target(Arc::new(x)), Record::TargetRecord(x) => Self::TargetRecord(Arc::new(x)), Record::User(x) => Self::User(Arc::new(x)), @@ -673,8 +654,7 @@ pub enum RecordId { SfaController(i32), StratagemConfig(i32), Snapshot(i32), - SnapshotInterval(i32), - SnapshotRetention(i32), + SnapshotPolicy(i32), Target(i32), TargetRecord(i32), User(i32), @@ -706,8 +686,7 @@ impl From<&Record> for RecordId { Record::SfaController(x) => RecordId::SfaController(x.id), Record::StratagemConfig(x) => RecordId::StratagemConfig(x.id), Record::Snapshot(x) => RecordId::Snapshot(x.id), - Record::SnapshotInterval(x) => RecordId::SnapshotInterval(x.id), - Record::SnapshotRetention(x) => RecordId::SnapshotRetention(x.id), + Record::SnapshotPolicy(x) => RecordId::SnapshotPolicy(x.id), Record::Target(x) => RecordId::Target(x.id), Record::TargetRecord(x) => RecordId::TargetRecord(x.id), Record::User(x) => RecordId::User(x.id), @@ -742,8 +721,7 @@ impl Deref for RecordId { | Self::SfaController(x) | Self::Snapshot(x) | Self::StratagemConfig(x) - | Self::SnapshotInterval(x) - | Self::SnapshotRetention(x) + | Self::SnapshotPolicy(x) | Self::Target(x) | Self::TargetRecord(x) | Self::User(x) diff --git a/migrations/20201214000000_snapshot_v2.sql b/migrations/20201214000000_snapshot_v2.sql new file mode 100644 index 0000000000..fc4bb00561 --- /dev/null +++ b/migrations/20201214000000_snapshot_v2.sql @@ -0,0 +1,71 @@ +CREATE TABLE IF NOT EXISTS snapshot_policy ( + id serial PRIMARY KEY, + filesystem TEXT NOT NULL UNIQUE, + interval INTERVAL NOT NULL, + start TIMESTAMP WITH TIME ZONE, + barrier BOOLEAN DEFAULT false NOT NULL, + keep INT NOT NULL CHECK (keep > 1 AND keep < 33), + daily INT DEFAULT 0 NOT NULL CHECK (daily >= 0), + weekly INT DEFAULT 0 NOT NULL CHECK (weekly >= 0), + monthly INT DEFAULT 0 NOT NULL CHECK (monthly >= 0), + last_run TIMESTAMP WITH TIME ZONE +); + + +CREATE OR REPLACE FUNCTION snapshot_policy_func() RETURNS TRIGGER AS $$ +DECLARE + r snapshot_policy; +BEGIN + IF (TG_OP = 'INSERT') OR (TG_OP = 'UPDATE' AND OLD IS DISTINCT FROM NEW) + THEN + r := NEW; + ELSEIF TG_OP = 'DELETE' + THEN + r := OLD; + ELSE + r := NULL; + END IF; + + IF r.id IS NOT NULL + THEN + PERFORM pg_notify( + 'table_update', + notify_row(TG_OP, TG_TABLE_NAME, + json_build_object( + 'id', r.id, + 'filesystem', r.filesystem, + 'interval', interval_to_seconds(r.interval), + 'start', r.start, + 'barrier', r.barrier, + 'keep', r.keep, + 'daily', r.daily, + 'weekly', r.weekly, + 'monthly', r.monthly, + 'last_run', r.last_run + ) + ) + ); + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + + +DROP TRIGGER IF EXISTS snapshot_policy_notify_update ON snapshot_policy; +CREATE TRIGGER snapshot_policy_notify_update AFTER UPDATE ON snapshot_policy +FOR EACH ROW EXECUTE PROCEDURE snapshot_policy_func(); + +DROP TRIGGER IF EXISTS snapshot_policy_notify_insert ON snapshot_policy; +CREATE TRIGGER snapshot_policy_notify_insert AFTER INSERT ON snapshot_policy +FOR EACH ROW EXECUTE PROCEDURE snapshot_policy_func(); + +DROP TRIGGER IF EXISTS snapshot_policy_notify_delete ON snapshot_policy; +CREATE TRIGGER snapshot_policy_notify_delete AFTER DELETE ON snapshot_policy +FOR EACH ROW EXECUTE PROCEDURE snapshot_policy_func(); + + +DROP TABLE IF EXISTS snapshot_interval CASCADE; +DROP TABLE IF EXISTS snapshot_retention CASCADE; +DROP FUNCTION IF EXISTS table_update_notify_snapshot_interval() CASCADE; +DROP TYPE IF EXISTS snapshot_reserve_unit CASCADE; diff --git a/sqlx-data.json b/sqlx-data.json index 2ee47168f8..05763c4e32 100644 --- a/sqlx-data.json +++ b/sqlx-data.json @@ -513,65 +513,6 @@ "nullable": [] } }, - "17645262c426038efcc8e22bf999c0d3cee07f52c4e276a1b607e2e00b2e62bd": { - "query": "\n SELECT\n id,\n filesystem_name,\n reserve_value,\n reserve_unit as \"reserve_unit:ReserveUnit\",\n last_run,\n keep_num\n FROM snapshot_retention\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int4" - }, - { - "ordinal": 1, - "name": "filesystem_name", - "type_info": "Text" - }, - { - "ordinal": 2, - "name": "reserve_value", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "reserve_unit:ReserveUnit", - "type_info": { - "Custom": { - "name": "snapshot_reserve_unit", - "kind": { - "Enum": [ - "percent", - "gibibytes", - "tebibytes" - ] - } - } - } - }, - { - "ordinal": 4, - "name": "last_run", - "type_info": "Timestamptz" - }, - { - "ordinal": 5, - "name": "keep_num", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false, - false, - false, - true, - false - ] - } - }, "17ed37ab2c915514b18cde4f0a1f3d2bf2eface63c4cb96e18155ab370fe39b6": { "query": "DELETE FROM chroma_core_serverprofile_repolist WHERE serverprofile_id = $1", "describe": { @@ -1310,28 +1251,6 @@ ] } }, - "3a099c680488372f27c087f941161200360329a21145cf0fa22c81f3ec5ee2ef": { - "query": "\n INSERT INTO snapshot_interval (\n filesystem_name,\n use_barrier,\n interval\n )\n VALUES ($1, $2, $3)\n ON CONFLICT (filesystem_name, interval)\n DO NOTHING\n RETURNING id\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Text", - "Bool", - "Interval" - ] - }, - "nullable": [ - false - ] - } - }, "3b1ddcee3dee96365325874c4ea32c832ffd74ab137399443052a0918d69f2ed": { "query": "\n SELECT\n c.id AS id,\n cancelled,\n complete,\n errored,\n created_at,\n array_agg(cj.job_id)::INT[] AS job_ids,\n message\n FROM chroma_core_command c\n JOIN chroma_core_command_jobs cj ON c.id = cj.command_id\n WHERE (c.id = ANY ($3::INT[]))\n GROUP BY c.id\n OFFSET $1 LIMIT $2\n ", "describe": { @@ -1603,107 +1522,6 @@ ] } }, - "45e66b9722e3f0ea68049efc3ce9a52ac0ccc202cd82c415578337c4053c873b": { - "query": "\n SELECT\n id,\n filesystem_name,\n reserve_value,\n reserve_unit as \"reserve_unit:snapshot::ReserveUnit\",\n last_run,\n keep_num\n FROM snapshot_retention\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int4" - }, - { - "ordinal": 1, - "name": "filesystem_name", - "type_info": "Text" - }, - { - "ordinal": 2, - "name": "reserve_value", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "reserve_unit:snapshot::ReserveUnit", - "type_info": { - "Custom": { - "name": "snapshot_reserve_unit", - "kind": { - "Enum": [ - "percent", - "gibibytes", - "tebibytes" - ] - } - } - } - }, - { - "ordinal": 4, - "name": "last_run", - "type_info": "Timestamptz" - }, - { - "ordinal": 5, - "name": "keep_num", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false, - false, - false, - true, - false - ] - } - }, - "46a7815b904eddf8c5b3f77e9c9623806ba3e0a0247080ac9900adaad6b3ce11": { - "query": "SELECT * FROM snapshot_interval", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int4" - }, - { - "ordinal": 1, - "name": "filesystem_name", - "type_info": "Text" - }, - { - "ordinal": 2, - "name": "use_barrier", - "type_info": "Bool" - }, - { - "ordinal": 3, - "name": "last_run", - "type_info": "Timestamptz" - }, - { - "ordinal": 4, - "name": "interval", - "type_info": "Interval" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false, - false, - true, - false - ] - } - }, "484084fcd20a682adf9354fc35b4180e0ec9190f3941e993ba44ec30e60d0d20": { "query": "\n SELECT cluster_id, name, active\n FROM corosync_resource\n WHERE resource_agent = 'ocf::ddn:Ticketer';\n ", "describe": { @@ -2839,6 +2657,78 @@ ] } }, + "82aa67ceef3e830c8a4f4739d8e02c2fce46906173bbc19dedcc324d0183628d": { + "query": "SELECT * FROM snapshot_policy", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "filesystem", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "interval", + "type_info": "Interval" + }, + { + "ordinal": 3, + "name": "start", + "type_info": "Timestamptz" + }, + { + "ordinal": 4, + "name": "barrier", + "type_info": "Bool" + }, + { + "ordinal": 5, + "name": "keep", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "daily", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "weekly", + "type_info": "Int4" + }, + { + "ordinal": 8, + "name": "monthly", + "type_info": "Int4" + }, + { + "ordinal": 9, + "name": "last_run", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + true, + false, + false, + false, + false, + false, + true + ] + } + }, "857ec5f2517d25f901399ddfb8efa1ab3f73ed8c8f899692c071172b61179d1a": { "query": "select * from chroma_core_lnetconfiguration where not_deleted = 't'", "describe": { @@ -3057,20 +2947,6 @@ ] } }, - "8ac87d0e2c016985d993de3824a253423b3aa038a1609a0eecc2562d1b4887aa": { - "query": "\n UPDATE snapshot_interval\n SET last_run=$1\n WHERE id=$2 AND filesystem_name=$3\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Timestamptz", - "Int4", - "Text" - ] - }, - "nullable": [] - } - }, "8b11d7745ec336638b333bc511166314f63619293bf8b3df15fd1df807e1ee9a": { "query": "SELECT id, message FROM chroma_core_alertstate WHERE lustre_pid = $1 ORDER BY id DESC LIMIT 1", "describe": { @@ -3262,18 +3138,6 @@ "nullable": [] } }, - "93e2695978ceecbebff40c31f2f58bf6fd5351869d3b279adc5ad1f7436a25e9": { - "query": "DELETE FROM snapshot_interval WHERE id=$1", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int4" - ] - }, - "nullable": [] - } - }, "9a4c05da9d9233e6b3fa63ca2f50cf90feb0c305b1cc05e0eb2edcf2572db4ba": { "query": "select * from chroma_core_volume where not_deleted = 't'", "describe": { @@ -3328,68 +3192,6 @@ ] } }, - "9bc21bd7e2f9506e3ae0cb80fd80e0fc95243470a9bedd1d728a860d7d6b408f": { - "query": "SELECT * FROM snapshot WHERE filesystem_name = $1 ORDER BY create_time ASC", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int4" - }, - { - "ordinal": 1, - "name": "filesystem_name", - "type_info": "Text" - }, - { - "ordinal": 2, - "name": "snapshot_name", - "type_info": "Text" - }, - { - "ordinal": 3, - "name": "create_time", - "type_info": "Timestamptz" - }, - { - "ordinal": 4, - "name": "modify_time", - "type_info": "Timestamptz" - }, - { - "ordinal": 5, - "name": "snapshot_fsname", - "type_info": "Text" - }, - { - "ordinal": 6, - "name": "mounted", - "type_info": "Bool" - }, - { - "ordinal": 7, - "name": "comment", - "type_info": "Text" - } - ], - "parameters": { - "Left": [ - "Text" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - true - ] - } - }, "9d75d9c59b4e8e5e54653866451a026b2308d9d97f1a5ea65513de4a1ce3862c": { "query": "\n INSERT INTO chroma_core_managedtarget (\n state_modified_at,\n state,\n immutable_state,\n name,\n uuid,\n ha_label,\n reformat,\n not_deleted,\n content_type_id\n ) VALUES (now(), 'mounted', 'f', $1, $2, $3, 'f', 't', $4)\n RETURNING id\n ", "describe": { @@ -3505,6 +3307,19 @@ ] } }, + "a0e41b98f3319a78ac985a3e7c0d5204cdceb8fd0ceb04b69d58e50abecf16b7": { + "query": "UPDATE snapshot_policy SET last_run = $1 WHERE filesystem = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Timestamptz", + "Text" + ] + }, + "nullable": [] + } + }, "a3269a5f7c491a332facfcf86c576350f4b9e7c14638a26d0bd17daef52c0613": { "query": "SELECT\n id,\n index,\n enclosure_index,\n failed,\n slot_number,\n health_state as \"health_state: HealthState\",\n health_state_reason,\n member_index,\n member_state as \"member_state: MemberState\",\n storage_system\n FROM chroma_core_sfadiskdrive\n ", "describe": { @@ -3803,6 +3618,32 @@ "nullable": [] } }, + "af4ce773226e54a4320e042550d316c8635fe6162a17fb532ab82c7c62a163f2": { + "query": "\n SELECT snapshot_name, create_time FROM snapshot\n WHERE filesystem_name = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "snapshot_name", + "type_info": "Text" + }, + { + "ordinal": 1, + "name": "create_time", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false + ] + } + }, "b04b6d9456e4fbd6b7c97cb633393b8971701303845f99a716f581b6ee0eb481": { "query": "SELECT repo_name from chroma_core_repo where repo_name = ANY($1)", "describe": { @@ -4231,6 +4072,25 @@ "nullable": [] } }, + "c2ec32d65c69f7b530d75755fba6c2095327b72162950c6a123aa1e3cde82977": { + "query": "\n INSERT INTO snapshot_policy (\n filesystem,\n interval,\n start,\n barrier,\n keep,\n daily,\n weekly,\n monthly\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8)\n ON CONFLICT (filesystem)\n DO UPDATE SET\n interval = EXCLUDED.interval,\n start = EXCLUDED.start,\n barrier = EXCLUDED.barrier,\n keep = EXCLUDED.keep,\n daily = EXCLUDED.daily,\n weekly = EXCLUDED.weekly,\n monthly = EXCLUDED.monthly\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Interval", + "Timestamptz", + "Bool", + "Int4", + "Int4", + "Int4", + "Int4" + ] + }, + "nullable": [] + } + }, "c4699fe75876e33df71163c95690a4680f8bee665994cd0b6ebe4a6087aa6d0a": { "query": "\n SELECT\n index,\n enclosure_index,\n health_state as \"health_state: _\",\n health_state_reason,\n position,\n storage_system\n FROM chroma_core_sfapowersupply\n ", "describe": { @@ -4375,18 +4235,6 @@ "nullable": [] } }, - "c92c006232fff5c4ba37894b349d63142824fc0141a156f06114fa113ed36fa7": { - "query": "DELETE FROM snapshot_retention WHERE id=$1", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int4" - ] - }, - "nullable": [] - } - }, "c9f733dc3958a75b638d7c6f567e6fccf89575ac2ac3a43a14bf64978557d91c": { "query": "SELECT id, name, cluster_id, resource_agent, role, active, orphaned, managed,\n failed, failure_ignored, nodes_running_on, (active_node).id AS active_node_id,\n (active_node).name AS active_node_name, mount_point\n FROM corosync_resource", "describe": { @@ -4496,6 +4344,32 @@ "nullable": [] } }, + "ce0ed9bea162c8123ed47fc2cfb66f6cced822ce07dc4394d470f70c53b2f86c": { + "query": "\n SELECT snapshot_name, create_time FROM snapshot\n WHERE filesystem_name = $1 AND snapshot_name LIKE '\\_\\_%'\n ORDER BY create_time DESC\n LIMIT 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "snapshot_name", + "type_info": "Text" + }, + { + "ordinal": 1, + "name": "create_time", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false + ] + } + }, "ceff1ca4a42d0a41561eb3c3f51f5e720138c890d76437845b74889fc82410cb": { "query": "\n SELECT n.nid FROM target AS t\n INNER JOIN lnet as l ON l.host_id = ANY(t.host_ids)\n INNER JOIN nid as n ON n.id = ANY(l.nids)\n WHERE t.name='MGS' AND $1 = ANY(t.filesystems)\n AND n.host_id NOT IN (\n SELECT nh.host_id\n FROM corosync_resource_bans b\n INNER JOIN corosync_node_managed_host nh ON (nh.corosync_node_id).name = b.node\n AND nh.cluster_id = b.cluster_id\n INNER JOIN corosync_resource r ON r.name = b.resource AND b.cluster_id = r.cluster_id\n WHERE r.mount_point is not NULL AND r.mount_point = t.mount_path\n )\n GROUP BY l.host_id, n.nid ORDER BY l.host_id, n.nid\n ", "describe": { @@ -4768,27 +4642,13 @@ ] } }, - "dc969e1cf438fc20baccd36a43cf7725bcf95cf758c6f08d5ce88bd4f378a71c": { - "query": "\n INSERT INTO snapshot_retention (\n filesystem_name,\n reserve_value,\n reserve_unit,\n keep_num\n )\n VALUES ($1, $2, $3, $4)\n ON CONFLICT (filesystem_name)\n DO UPDATE SET\n reserve_value = EXCLUDED.reserve_value,\n reserve_unit = EXCLUDED.reserve_unit,\n keep_num = EXCLUDED.keep_num\n ", + "db6424e3d3d5c2c2aaa67ea58d21af22a5ac91586c057d6983e24aca31792ff9": { + "query": "DELETE FROM snapshot_policy WHERE filesystem = $1", "describe": { "columns": [], "parameters": { "Left": [ - "Text", - "Int4", - { - "Custom": { - "name": "snapshot_reserve_unit", - "kind": { - "Enum": [ - "percent", - "gibibytes", - "tebibytes" - ] - } - } - }, - "Int4" + "Text" ] }, "nullable": [] @@ -4888,65 +4748,6 @@ ] } }, - "e211fee58a58a67ae21206e84802a76ce81d15f4a3573bcceb3ba55f32a282b1": { - "query": "\n SELECT\n id,\n filesystem_name,\n reserve_value,\n reserve_unit as \"reserve_unit:ReserveUnit\",\n last_run,\n keep_num\n FROM snapshot_retention\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int4" - }, - { - "ordinal": 1, - "name": "filesystem_name", - "type_info": "Text" - }, - { - "ordinal": 2, - "name": "reserve_value", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "reserve_unit:ReserveUnit", - "type_info": { - "Custom": { - "name": "snapshot_reserve_unit", - "kind": { - "Enum": [ - "percent", - "gibibytes", - "tebibytes" - ] - } - } - } - }, - { - "ordinal": 4, - "name": "last_run", - "type_info": "Timestamptz" - }, - { - "ordinal": 5, - "name": "keep_num", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false, - false, - false, - true, - false - ] - } - }, "e556047b44f30c75388944aa4d96d4ade4f5eed4e0a401bbd766943cf9495ca0": { "query": "\n SELECT \n mt.state,\n t.name,\n t.filesystems\n FROM chroma_core_managedtarget mt\n INNER JOIN target t\n ON t.uuid = mt.uuid\n WHERE mt.not_deleted = 't'\n AND $1::text[] @> t.filesystems;\n ", "describe": {