Skip to content

Commit

Permalink
Add test for canceling upload object via MPU
Browse files Browse the repository at this point in the history
This commit addresses #75 (comment)
  • Loading branch information
ysaito1001 committed Nov 29, 2024
1 parent a477ede commit 43debc1
Show file tree
Hide file tree
Showing 2 changed files with 62 additions and 3 deletions.
2 changes: 1 addition & 1 deletion aws-s3-transfer-manager/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use std::cmp;
pub(crate) mod loader;

/// Minimum upload part size in bytes
const MIN_MULTIPART_PART_SIZE_BYTES: u64 = 5 * ByteUnit::Mebibyte.as_bytes_u64();
pub(crate) const MIN_MULTIPART_PART_SIZE_BYTES: u64 = 5 * ByteUnit::Mebibyte.as_bytes_u64();

/// Configuration for a [`Client`](crate::client::Client)
#[derive(Debug, Clone)]
Expand Down
63 changes: 61 additions & 2 deletions aws-s3-transfer-manager/src/operation/upload_objects/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -329,18 +329,23 @@ fn handle_failed_upload(

#[cfg(test)]
mod tests {
use aws_sdk_s3::operation::put_object::PutObjectOutput;
use aws_sdk_s3::operation::{
abort_multipart_upload::AbortMultipartUploadOutput,
create_multipart_upload::CreateMultipartUploadOutput, put_object::PutObjectOutput,
};
use aws_smithy_mocks_experimental::{mock, mock_client, RuleMode};
use bytes::Bytes;

use crate::{
client::Handle,
config::MIN_MULTIPART_PART_SIZE_BYTES,
io::InputStream,
operation::upload_objects::{
worker::{upload_single_obj, UploadObjectJob},
UploadObjectsContext, UploadObjectsInputBuilder,
},
runtime::scheduler::Scheduler,
types::PartSize,
DEFAULT_CONCURRENCY,
};

Expand Down Expand Up @@ -705,7 +710,7 @@ mod tests {
}

#[tokio::test]
async fn test_cancel_single_upload() {
async fn test_cancel_single_upload_via_put_object() {
let bucket = "doesnotmatter";
let put_object = mock!(aws_sdk_s3::Client::put_object)
.match_requests(move |input| input.bucket() == Some(bucket))
Expand Down Expand Up @@ -734,4 +739,58 @@ mod tests {

assert_eq!(&crate::error::ErrorKind::OperationCancelled, err.kind());
}

#[tokio::test]
async fn test_cancel_single_upload_via_multipart_upload() {
let bucket = "test-bucket";
let key = "test-key";
let upload_id: String = "test-upload-id".to_owned();

let create_mpu = mock!(aws_sdk_s3::Client::create_multipart_upload).then_output({
let upload_id = upload_id.clone();
move || {
CreateMultipartUploadOutput::builder()
.upload_id(upload_id.clone())
.build()
}
});
let abort_mpu = mock!(aws_sdk_s3::Client::abort_multipart_upload)
.match_requests({
let upload_id = upload_id.clone();
move |input| {
input.upload_id.as_ref() == Some(&upload_id)
&& input.bucket() == Some(bucket)
&& input.key() == Some(key)
}
})
.then_output(|| AbortMultipartUploadOutput::builder().build());
let s3_client = mock_client!(aws_sdk_s3, RuleMode::Sequential, &[create_mpu, abort_mpu]);
let config = crate::Config::builder()
.set_multipart_threshold(PartSize::Target(MIN_MULTIPART_PART_SIZE_BYTES))
.client(s3_client)
.build();

let scheduler = Scheduler::new(DEFAULT_CONCURRENCY);

let handle = std::sync::Arc::new(Handle { config, scheduler });
let input = UploadObjectsInputBuilder::default()
.source("doesnotmatter")
.bucket(bucket)
.build()
.unwrap();

// specify the size of the contents so it triggers multipart upload
let contents = vec![0; MIN_MULTIPART_PART_SIZE_BYTES as usize];
let ctx = UploadObjectsContext::new(handle, input);
let job = UploadObjectJob {
object: InputStream::from(Bytes::from_static(Box::leak(contents.into_boxed_slice()))),
key: key.to_owned(),
};

ctx.state.cancel_tx.send(true).unwrap();

let err = upload_single_obj(&ctx, job).await.unwrap_err();

assert_eq!(&crate::error::ErrorKind::OperationCancelled, err.kind());
}
}

0 comments on commit 43debc1

Please sign in to comment.