diff --git a/aws-s3-transfer-manager/src/operation/upload.rs b/aws-s3-transfer-manager/src/operation/upload.rs index fc98693..0021db0 100644 --- a/aws-s3-transfer-manager/src/operation/upload.rs +++ b/aws-s3-transfer-manager/src/operation/upload.rs @@ -161,14 +161,14 @@ async fn try_start_mpu_upload( mpu.upload_id ); let upload_id = mpu.upload_id.clone().expect("upload_id is present"); - let mut mpu_data = MultipartUploadData { upload_part_tasks: Default::default(), read_body_tasks: Default::default(), response: Some(mpu), upload_id: upload_id.clone(), }; - distribute_work(&mut mpu_data, ctx, stream, part_size, upload_id)?; + + distribute_work(&mut mpu_data, ctx, stream, part_size)?; Ok(UploadType::MultipartUpload(mpu_data)) } diff --git a/aws-s3-transfer-manager/src/operation/upload/service.rs b/aws-s3-transfer-manager/src/operation/upload/service.rs index 0113876..10fc24d 100644 --- a/aws-s3-transfer-manager/src/operation/upload/service.rs +++ b/aws-s3-transfer-manager/src/operation/upload/service.rs @@ -101,7 +101,6 @@ pub(super) fn distribute_work( ctx: UploadContext, stream: InputStream, part_size: u64, - upload_id: String, ) -> Result<(), error::Error> { let part_reader = Arc::new( PartReaderBuilder::new() @@ -135,7 +134,7 @@ pub(super) fn distribute_work( let worker = read_body( part_reader.clone(), ctx.clone(), - upload_id.clone(), + mpu_data.upload_id.clone(), svc.clone(), mpu_data.upload_part_tasks.clone(), parent_span_for_upload_tasks.clone(),