diff --git a/s3/Cargo.toml b/s3/Cargo.toml index 9bb327bf37..ea742e228f 100644 --- a/s3/Cargo.toml +++ b/s3/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rust-s3" -version = "0.35.0-alpha.2" +version = "0.35.0-alpha.3" authors = ["Drazen Urch"] description = "Rust library for working with AWS S3 and compatible object storage APIs" repository = "https://github.com/durch/rust-s3" diff --git a/s3/src/bucket.rs b/s3/src/bucket.rs index 3fe22d3a01..9d741b0b9f 100644 --- a/s3/src/bucket.rs +++ b/s3/src/bucket.rs @@ -52,8 +52,9 @@ use crate::error::S3Error; use crate::post_policy::PresignedPost; use crate::request::Request; use crate::serde_types::{ - BucketLocationResult, CompleteMultipartUploadData, CorsConfiguration, HeadObjectResult, - InitiateMultipartUploadResponse, ListBucketResult, ListMultipartUploadsResult, Part, + BucketLifecycleConfiguration, BucketLocationResult, CompleteMultipartUploadData, + CorsConfiguration, HeadObjectResult, InitiateMultipartUploadResponse, ListBucketResult, + ListMultipartUploadsResult, Part, }; #[allow(unused_imports)] use crate::utils::{error_from_response_data, PutStreamResponse}; @@ -780,6 +781,33 @@ impl Bucket { request.response_data(false).await } + #[maybe_async::maybe_async] + pub async fn get_bucket_lifecycle(&self) -> Result { + let request = RequestImpl::new(self, "", Command::GetBucketLifecycle).await?; + let response = request.response_data(false).await?; + Ok(quick_xml::de::from_str::( + response.as_str()?, + )?) + } + + #[maybe_async::maybe_async] + pub async fn put_bucket_lifecycle( + &self, + lifecycle_config: BucketLifecycleConfiguration, + ) -> Result { + let command = Command::PutBucketLifecycle { + configuration: lifecycle_config, + }; + let request = RequestImpl::new(self, "", command).await?; + request.response_data(false).await + } + + #[maybe_async::maybe_async] + pub async fn delete_bucket_lifecycle(&self) -> Result { + let request = RequestImpl::new(self, "", Command::DeleteBucket).await?; + request.response_data(false).await + } + /// Gets torrent from an S3 path. /// /// # Example: diff --git a/s3/src/command.rs b/s3/src/command.rs index 8e5cbc7ee5..01104ec469 100644 --- a/s3/src/command.rs +++ b/s3/src/command.rs @@ -1,6 +1,9 @@ use std::collections::HashMap; -use crate::serde_types::{CompleteMultipartUploadData, CorsConfiguration}; +use crate::error::S3Error; +use crate::serde_types::{ + BucketLifecycleConfiguration, CompleteMultipartUploadData, CorsConfiguration, +}; use crate::EMPTY_PAYLOAD_SHA; use sha2::{Digest, Sha256}; @@ -129,6 +132,11 @@ pub enum Command<'a> { PutBucketCors { configuration: CorsConfiguration, }, + GetBucketLifecycle, + PutBucketLifecycle { + configuration: BucketLifecycleConfiguration, + }, + DeleteBucketLifecycle, } impl<'a> Command<'a> { @@ -142,6 +150,7 @@ impl<'a> Command<'a> { | Command::ListObjectsV2 { .. } | Command::GetBucketLocation | Command::GetObjectTagging + | Command::GetBucketLifecycle | Command::ListMultipartUploads { .. } | Command::PresignGet { .. } => HttpMethod::Get, Command::PutObject { .. } @@ -150,12 +159,14 @@ impl<'a> Command<'a> { | Command::PresignPut { .. } | Command::UploadPart { .. } | Command::PutBucketCors { .. } - | Command::CreateBucket { .. } => HttpMethod::Put, + | Command::CreateBucket { .. } + | Command::PutBucketLifecycle { .. } => HttpMethod::Put, Command::DeleteObject | Command::DeleteObjectTagging | Command::AbortMultipartUpload { .. } | Command::PresignDelete { .. } - | Command::DeleteBucket => HttpMethod::Delete, + | Command::DeleteBucket + | Command::DeleteBucketLifecycle => HttpMethod::Delete, Command::InitiateMultipartUpload { .. } | Command::CompleteMultipartUpload { .. } => { HttpMethod::Post } @@ -163,8 +174,8 @@ impl<'a> Command<'a> { } } - pub fn content_length(&self) -> usize { - match &self { + pub fn content_length(&self) -> Result { + let result = match &self { Command::CopyObject { from: _ } => 0, Command::PutObject { content, .. } => content.len(), Command::PutObjectTagging { tags } => tags.len(), @@ -177,21 +188,27 @@ impl<'a> Command<'a> { 0 } } + Command::PutBucketLifecycle { configuration } => { + quick_xml::se::to_string(configuration)?.as_bytes().len() + } _ => 0, - } + }; + Ok(result) } pub fn content_type(&self) -> String { match self { Command::InitiateMultipartUpload { content_type } => content_type.to_string(), Command::PutObject { content_type, .. } => content_type.to_string(), - Command::CompleteMultipartUpload { .. } => "application/xml".into(), + Command::CompleteMultipartUpload { .. } | Command::PutBucketLifecycle { .. } => { + "application/xml".into() + } _ => "text/plain".into(), } } - pub fn sha256(&self) -> String { - match &self { + pub fn sha256(&self) -> Result { + let result = match &self { Command::PutObject { content, .. } => { let mut sha = Sha256::default(); sha.update(content); @@ -217,6 +234,7 @@ impl<'a> Command<'a> { } } _ => EMPTY_PAYLOAD_SHA.into(), - } + }; + Ok(result) } } diff --git a/s3/src/request/async_std_backend.rs b/s3/src/request/async_std_backend.rs index 0288192a01..7609c71c46 100644 --- a/s3/src/request/async_std_backend.rs +++ b/s3/src/request/async_std_backend.rs @@ -61,7 +61,7 @@ impl<'a> Request for SurfRequest<'a> { HttpMethod::Head => surf::Request::builder(Method::Head, self.url()?), }; - let mut request = request.body(self.request_body()); + let mut request = request.body(self.request_body()?); for (name, value) in headers.iter() { request = request.header( diff --git a/s3/src/request/request_trait.rs b/s3/src/request/request_trait.rs index 7d15cd951d..dbde953028 100644 --- a/s3/src/request/request_trait.rs +++ b/s3/src/request/request_trait.rs @@ -1,6 +1,7 @@ use base64::engine::general_purpose; use base64::Engine; use hmac::Mac; +use quick_xml::se::to_string; use std::collections::HashMap; #[cfg(any(feature = "with-tokio", feature = "with-async-std"))] use std::pin::Pin; @@ -153,8 +154,8 @@ pub trait Request { ) } - fn request_body(&self) -> Vec { - if let Command::PutObject { content, .. } = self.command() { + fn request_body(&self) -> Result, S3Error> { + let result = if let Command::PutObject { content, .. } = self.command() { Vec::from(content) } else if let Command::PutObjectTagging { tags } = self.command() { Vec::from(tags) @@ -169,9 +170,12 @@ pub trait Request { } else { Vec::new() } + } else if let Command::PutBucketLifecycle { configuration } = &self.command() { + quick_xml::se::to_string(configuration)?.as_bytes().to_vec() } else { Vec::new() - } + }; + Ok(result) } fn long_date(&self) -> Result { @@ -377,6 +381,11 @@ pub trait Request { url_str.push_str(&multipart.query_string()) } } + Command::GetBucketLifecycle + | Command::PutBucketLifecycle { .. } + | Command::DeleteBucketLifecycle => { + url_str.push_str("?lifecycle"); + } _ => {} } @@ -464,7 +473,7 @@ pub trait Request { &self.command().http_verb().to_string(), &self.url()?, headers, - &self.command().sha256(), + &self.command().sha256()?, ) } @@ -492,7 +501,7 @@ pub trait Request { #[maybe_async::maybe_async] async fn headers(&self) -> Result { // Generate this once, but it's used in more than one place. - let sha256 = self.command().sha256(); + let sha256 = self.command().sha256()?; // Start with extra_headers, that way our headers replace anything with // the same name. @@ -531,7 +540,7 @@ pub trait Request { _ => { headers.insert( CONTENT_LENGTH, - self.command().content_length().to_string().parse()?, + self.command().content_length()?.to_string().parse()?, ); headers.insert(CONTENT_TYPE, self.command().content_type().parse()?); } @@ -584,6 +593,11 @@ pub trait Request { headers.insert(RANGE, range.parse()?); } else if let Command::CreateBucket { ref config } = self.command() { config.add_headers(&mut headers)?; + } else if let Command::PutBucketLifecycle { ref configuration } = self.command() { + let digest = md5::compute(to_string(configuration)?.as_bytes()); + let hash = general_purpose::STANDARD.encode(digest.as_ref()); + headers.insert(HeaderName::from_static("content-md5"), hash.parse()?); + headers.remove("x-amz-content-sha256"); } // This must be last, as it signs the other headers, omitted if no secret key is provided diff --git a/s3/src/request/tokio_backend.rs b/s3/src/request/tokio_backend.rs index 7c5f20487f..4ba8971b7b 100644 --- a/s3/src/request/tokio_backend.rs +++ b/s3/src/request/tokio_backend.rs @@ -133,7 +133,7 @@ impl<'a> Request for HyperRequest<'a> { request = request.header(header, value); } - request.body(Body::from(self.request_body()))? + request.body(Body::from(self.request_body()?))? }; let response = client.request(request).await?; diff --git a/s3/src/serde_types.rs b/s3/src/serde_types.rs index 2f858d0e0e..e93a723d4d 100644 --- a/s3/src/serde_types.rs +++ b/s3/src/serde_types.rs @@ -371,8 +371,343 @@ impl CorsRule { } } +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename = "LifecycleConfiguration")] +pub struct BucketLifecycleConfiguration { + #[serde(rename = "Rule")] + pub rules: Vec, +} + +impl BucketLifecycleConfiguration { + pub fn new(rules: Vec) -> Self { + BucketLifecycleConfiguration { rules } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default)] +pub struct LifecycleRule { + #[serde( + rename = "AbortIncompleteMultipartUpload", + skip_serializing_if = "Option::is_none" + )] + pub abort_incomplete_multipart_upload: Option, + + #[serde(rename = "Expiration", skip_serializing_if = "Option::is_none")] + pub expiration: Option, + + #[serde(rename = "Filter", skip_serializing_if = "Option::is_none")] + pub filter: Option, + + #[serde(rename = "ID", skip_serializing_if = "Option::is_none")] + pub id: Option, + + #[serde( + rename = "NoncurrentVersionExpiration", + skip_serializing_if = "Option::is_none" + )] + pub noncurrent_version_expiration: Option, + + #[serde( + rename = "NoncurrentVersionTransition", + skip_serializing_if = "Option::is_none" + )] + pub noncurrent_version_transition: Option>, + + #[serde(rename = "Status")] + /// Valid Values: Enabled | Disabled + pub status: String, + + #[serde(rename = "Transition", skip_serializing_if = "Option::is_none")] + pub transition: Option>, +} + +pub struct LifecycleRuleBuilder { + lifecycle_rule: LifecycleRule, +} + +impl LifecycleRule { + pub fn builder(status: &str) -> LifecycleRuleBuilder { + LifecycleRuleBuilder::new(status) + } +} + +impl LifecycleRuleBuilder { + pub fn new(status: &str) -> LifecycleRuleBuilder { + LifecycleRuleBuilder { + lifecycle_rule: LifecycleRule { + status: status.to_string(), + ..Default::default() + }, + } + } + + pub fn abort_incomplete_multipart_upload( + mut self, + abort_incomplete_multipart_upload: AbortIncompleteMultipartUpload, + ) -> LifecycleRuleBuilder { + self.lifecycle_rule.abort_incomplete_multipart_upload = + Some(abort_incomplete_multipart_upload); + self + } + + pub fn expiration(mut self, expiration: Expiration) -> LifecycleRuleBuilder { + self.lifecycle_rule.expiration = Some(expiration); + self + } + + pub fn filter(mut self, filter: LifecycleFilter) -> LifecycleRuleBuilder { + self.lifecycle_rule.filter = Some(filter); + self + } + + pub fn id(mut self, id: &str) -> LifecycleRuleBuilder { + self.lifecycle_rule.id = Some(id.to_string()); + self + } + + pub fn noncurrent_version_expiration( + mut self, + noncurrent_version_expiration: NoncurrentVersionExpiration, + ) -> LifecycleRuleBuilder { + self.lifecycle_rule.noncurrent_version_expiration = Some(noncurrent_version_expiration); + self + } + + pub fn noncurrent_version_transition( + mut self, + noncurrent_version_transition: Vec, + ) -> LifecycleRuleBuilder { + self.lifecycle_rule.noncurrent_version_transition = Some(noncurrent_version_transition); + self + } + + pub fn transition(mut self, transition: Vec) -> LifecycleRuleBuilder { + self.lifecycle_rule.transition = Some(transition); + self + } + + pub fn build(self) -> LifecycleRule { + self.lifecycle_rule + } +} +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct AbortIncompleteMultipartUpload { + #[serde( + rename = "DaysAfterInitiation", + skip_serializing_if = "Option::is_none" + )] + pub days_after_initiation: Option, +} + +impl AbortIncompleteMultipartUpload { + pub fn new(days_after_initiation: Option) -> Self { + Self { + days_after_initiation, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Expiration { + /// Indicates at what date the object is to be moved or deleted. The date value must conform to the ISO 8601 format. The time is always midnight UTC. + #[serde(rename = "Date", skip_serializing_if = "Option::is_none")] + pub date: Option, + + #[serde(rename = "Days", skip_serializing_if = "Option::is_none")] + pub days: Option, + + /// Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy. + #[serde( + rename = "ExpiredObjectDeleteMarker", + skip_serializing_if = "Option::is_none" + )] + pub expired_object_delete_marker: Option, +} + +impl Expiration { + pub fn new( + date: Option, + days: Option, + expired_object_delete_marker: Option, + ) -> Self { + Self { + date, + days, + expired_object_delete_marker, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct LifecycleFilter { + #[serde(rename = "And", skip_serializing_if = "Option::is_none")] + pub and: Option, + + #[serde( + rename = "ObjectSizeGreaterThan", + skip_serializing_if = "Option::is_none" + )] + pub object_size_greater_than: Option, + + #[serde(rename = "ObjectSizeLessThan", skip_serializing_if = "Option::is_none")] + pub object_size_less_than: Option, + + #[serde(rename = "Prefix", skip_serializing_if = "Option::is_none")] + pub prefix: Option, + + #[serde(rename = "Tag", skip_serializing_if = "Option::is_none")] + pub tag: Option, +} +impl LifecycleFilter { + pub fn new( + and: Option, + object_size_greater_than: Option, + object_size_less_than: Option, + prefix: Option, + tag: Option, + ) -> Self { + Self { + and, + object_size_greater_than, + object_size_less_than, + prefix, + tag, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct And { + #[serde( + rename = "ObjectSizeGreaterThan", + skip_serializing_if = "Option::is_none" + )] + pub object_size_greater_than: Option, + + #[serde(rename = "ObjectSizeLessThan", skip_serializing_if = "Option::is_none")] + pub object_size_less_than: Option, + + #[serde(rename = "Prefix", skip_serializing_if = "Option::is_none")] + pub prefix: Option, + + #[serde(rename = "Tag", skip_serializing_if = "Option::is_none")] + pub tags: Option>, +} + +impl And { + pub fn new( + object_size_greater_than: Option, + object_size_less_than: Option, + prefix: Option, + tags: Option>, + ) -> Self { + Self { + object_size_greater_than, + object_size_less_than, + prefix, + tags, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Tag { + #[serde(rename = "Key")] + pub key: String, + + #[serde(rename = "Value")] + pub value: String, +} + +impl Tag { + pub fn new(key: &str, value: &str) -> Self { + Self { + key: key.to_string(), + value: value.to_string(), + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct NoncurrentVersionExpiration { + #[serde( + rename = "NewerNoncurrentVersions", + skip_serializing_if = "Option::is_none" + )] + pub newer_noncurrent_versions: Option, + + #[serde(rename = "NoncurrentDays", skip_serializing_if = "Option::is_none")] + pub noncurrent_days: Option, +} + +impl NoncurrentVersionExpiration { + pub fn new(newer_noncurrent_versions: Option, noncurrent_days: Option) -> Self { + NoncurrentVersionExpiration { + newer_noncurrent_versions, + noncurrent_days, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct NoncurrentVersionTransition { + #[serde( + rename = "NewerNoncurrentVersions", + skip_serializing_if = "Option::is_none" + )] + pub newer_noncurrent_versions: Option, + + #[serde(rename = "NoncurrentDays", skip_serializing_if = "Option::is_none")] + pub noncurrent_days: Option, + + #[serde(rename = "StorageClass", skip_serializing_if = "Option::is_none")] + /// Valid Values: GLACIER | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | DEEP_ARCHIVE | GLACIER_IR + pub storage_class: Option, +} + +impl NoncurrentVersionTransition { + pub fn new( + newer_noncurrent_versions: Option, + noncurrent_days: Option, + storage_class: Option, + ) -> Self { + NoncurrentVersionTransition { + newer_noncurrent_versions, + noncurrent_days, + storage_class, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Transition { + #[serde(rename = "Date", skip_serializing_if = "Option::is_none")] + pub date: Option, + + #[serde(rename = "Days", skip_serializing_if = "Option::is_none")] + pub days: Option, + /// Valid Values: GLACIER | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | DEEP_ARCHIVE | GLACIER_IR + #[serde(rename = "StorageClass")] + pub storage_class: Option, +} + +impl Transition { + pub fn new(date: Option, days: Option, storage_class: Option) -> Self { + Transition { + date, + days, + storage_class, + } + } +} + #[cfg(test)] mod test { + use crate::serde_types::{ + AbortIncompleteMultipartUpload, BucketLifecycleConfiguration, Expiration, LifecycleFilter, + LifecycleRule, NoncurrentVersionExpiration, NoncurrentVersionTransition, Transition, + }; + use super::{CorsConfiguration, CorsRule}; #[test] @@ -396,4 +731,49 @@ mod test { r#"AuthorizationHeader2GETDELETE*lalaAuthorizationHeader2GETDELETE*lala"# ) } + + #[test] + fn lifecycle_config_serde() { + let rule = LifecycleRule { + abort_incomplete_multipart_upload: Some(AbortIncompleteMultipartUpload { + days_after_initiation: Some(30), + }), + expiration: Some(Expiration { + date: Some("2024-06-017".to_string()), + days: Some(30), + expired_object_delete_marker: Some(true), + }), + filter: Some(LifecycleFilter { + and: None, + object_size_greater_than: Some(10), + object_size_less_than: Some(50), + prefix: None, + tag: None, + }), + id: Some("lala".to_string()), + noncurrent_version_expiration: Some(NoncurrentVersionExpiration { + newer_noncurrent_versions: Some(30), + noncurrent_days: Some(30), + }), + noncurrent_version_transition: Some(vec![NoncurrentVersionTransition { + newer_noncurrent_versions: Some(30), + noncurrent_days: Some(30), + storage_class: Some("GLACIER".to_string()), + }]), + status: "Enabled".to_string(), + transition: Some(vec![Transition { + date: Some("2024-06-017".to_string()), + days: Some(30), + storage_class: Some("GLACIER".to_string()), + }]), + }; + + let config = BucketLifecycleConfiguration { rules: vec![rule] }; + + let se = quick_xml::se::to_string(&config).unwrap(); + assert_eq!( + se, + r#"302024-06-01730true1050lala30303030GLACIEREnabled2024-06-01730GLACIER"# + ) + } }