diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2c96eb1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +target/ +Cargo.lock diff --git a/aws-s3-transfer-manager/.cargo/config.toml b/aws-s3-transfer-manager/.cargo/config.toml new file mode 100644 index 0000000..de03f0d --- /dev/null +++ b/aws-s3-transfer-manager/.cargo/config.toml @@ -0,0 +1,6 @@ +[build] +rustflags = ["--cfg", "tokio_unstable"] + +[profile.profiling] +inherits = "release" +debug = true diff --git a/aws-s3-transfer-manager/.gitignore b/aws-s3-transfer-manager/.gitignore new file mode 100644 index 0000000..acdb40a --- /dev/null +++ b/aws-s3-transfer-manager/.gitignore @@ -0,0 +1,2 @@ +flamegraph.svg +profile.json diff --git a/aws-s3-transfer-manager/Cargo.toml b/aws-s3-transfer-manager/Cargo.toml new file mode 100644 index 0000000..d6570a1 --- /dev/null +++ b/aws-s3-transfer-manager/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "aws-s3-transfer-manager" +version = "0.1.0" +edition = "2021" +authors = ["AWS Rust SDK Team ", "Aaron Todd "] +description = "S3 Transfer Manager" +license = "Apache-2.0" +repository = "https://github.com/smithy-lang/smithy-rs" +publish = false + +[dependencies] +async-channel = "2.3.1" +async-trait = "0.1.81" +aws-sdk-s3 = { version = "1.40.0", features = ["behavior-version-latest", "test-util"] } +aws-smithy-http = "0.60.9" +aws-smithy-runtime-api = "1.7.1" +aws-smithy-types = "1.2.0" +aws-types = "1.3.3" +bytes = "1" +# FIXME - upgrade to hyper 1.x +hyper = { version = "0.14.29", features = ["client"] } +thiserror = "1.0.61" +tokio = { version = "1.38.0", features = ["rt-multi-thread", "io-util", "sync", "fs", "macros"] } +tracing = "0.1" + +[dev-dependencies] +aws-config = { version = "1.5.4", features = ["behavior-version-latest"] } +aws-smithy-mocks-experimental = "0.2.1" +clap = { version = "4.5.7", default-features = false, features = ["derive", "std", "help"] } +console-subscriber = "0.3.0" +tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } +tempfile = "3.10.1" diff --git a/aws-s3-transfer-manager/README.md b/aws-s3-transfer-manager/README.md new file mode 100644 index 0000000..282b908 --- /dev/null +++ b/aws-s3-transfer-manager/README.md @@ -0,0 +1,57 @@ +# AWS S3 Transfer Manager + +A high performance Amazon S3 client. + + +## Development + +**Run all tests** + +```sh +cargo test --all-features +``` + +**Run individual test** + +```sh +cargo test --lib download::worker::tests::test_distribute_work +``` + +### Examples + +NOTE: You can use the `profiling` profile from `.cargo/config.toml` to enable release with debug info for any example. + +**Copy** + +See all options: +```sh +cargo run --example cp -- -h +``` + +**Download a file from S3** + +```sh +AWS_PROFILE= RUST_LOG=trace cargo run --example cp s3:/// /local/path/ +``` + +NOTE: To run in release mode add `--release/-r` to the command, see `cargo run -h`. +NOTE: `trace` may be too verbose, you can see just this library's logs with `RUST_LOG=aws_s3_transfer_manager=trace` + +#### Flamegraphs + +See [cargo-flamegraph](https://github.com/flamegraph-rs/flamegraph) for more prerequisites and installation information. + +Generate a flamegraph (default is to output to `flamegraph.svg`): + +```sh +sudo AWS_PROFILE= RUST_LOG=aws_s3_transfer_manager=info cargo flamegraph --profile profiling --example cp -- s3://test-sdk-rust-aaron/mb-128.dat /tmp/mb-128.dat +``` + +#### Using tokio-console + +Examples use [`console-subscriber`](https://crates.io/crates/console-subscriber) which allows you to run them with +[tokio-console](https://github.com/tokio-rs/console) to help debug task execution. + + +Follow installation instructions for [tokio-console](https://github.com/tokio-rs/console) and then run the +example with `tokio-console` running. diff --git a/aws-s3-transfer-manager/examples/cp.rs b/aws-s3-transfer-manager/examples/cp.rs new file mode 100644 index 0000000..a83cd16 --- /dev/null +++ b/aws-s3-transfer-manager/examples/cp.rs @@ -0,0 +1,212 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ +use std::error::Error; +use std::path::PathBuf; +use std::str::FromStr; +use std::{mem, time}; + +use aws_s3_transfer_manager::download::Downloader; + +use aws_s3_transfer_manager::download::body::Body; +use aws_sdk_s3::operation::get_object::builders::GetObjectInputBuilder; +use aws_types::SdkConfig; +use bytes::Buf; +use clap::{CommandFactory, Parser}; +use tokio::fs; +use tokio::io::AsyncWriteExt; +use tracing::{debug_span, Instrument}; + +type BoxError = Box; + +const ONE_MEBIBYTE: u64 = 1024 * 1024; + +#[derive(Debug, Clone, clap::Parser)] +#[command(name = "cp")] +#[command(about = "Copies a local file or S3 object to another location locally or in S3.")] +pub struct Args { + /// Source to copy from + #[arg(required = true)] + source: TransferUri, + + /// Destination to copy to + #[arg(required = true)] + dest: TransferUri, + + /// Number of concurrent uploads/downloads to perform. + #[arg(long, default_value_t = 8)] + concurrency: usize, + + /// Part size to use + #[arg(long, default_value_t = 8388608)] + part_size: u64, +} + +#[derive(Clone, Debug)] +enum TransferUri { + /// Local filesystem source/destination + Local(PathBuf), + + /// S3 source/destination + S3(S3Uri), +} + +impl TransferUri { + fn expect_s3(&self) -> &S3Uri { + match self { + TransferUri::S3(s3_uri) => s3_uri, + _ => panic!("expected S3Uri"), + } + } + + fn expect_local(&self) -> &PathBuf { + match self { + TransferUri::Local(path) => path, + _ => panic!("expected Local"), + } + } +} + +impl FromStr for TransferUri { + type Err = BoxError; + + fn from_str(s: &str) -> Result { + let uri = if s.starts_with("s3://") { + TransferUri::S3(S3Uri(s.to_owned())) + } else { + let path = PathBuf::from_str(s).unwrap(); + TransferUri::Local(path) + }; + Ok(uri) + } +} + +#[derive(Clone, Debug)] +struct S3Uri(String); + +impl S3Uri { + /// Split the URI into it's component parts '(bucket, key)' + fn parts(&self) -> (&str, &str) { + self.0 + .strip_prefix("s3://") + .expect("valid s3 uri prefix") + .split_once('/') + .expect("invalid s3 uri, missing '/' between bucket and key") + } +} + +fn invalid_arg(message: &str) -> ! { + Args::command() + .error(clap::error::ErrorKind::InvalidValue, message) + .exit() +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + console_subscriber::init(); + let args = dbg!(Args::parse()); + + use TransferUri::*; + match (&args.source, &args.dest) { + (Local(_), S3(_)) => todo!("upload not implemented yet"), + (Local(_), Local(_)) => invalid_arg("local to local transfer not supported"), + (S3(_), Local(_)) => (), + (S3(_), S3(_)) => invalid_arg("s3 to s3 transfer not supported"), + } + + let config = aws_config::from_env().load().await; + + println!("warming up client..."); + warmup(&config).await?; + println!("warming up complete"); + + let tm = Downloader::builder() + .sdk_config(config) + .concurrency(args.concurrency) + .target_part_size(args.part_size) + .build(); + + let (bucket, key) = args.source.expect_s3().parts(); + let input = GetObjectInputBuilder::default().bucket(bucket).key(key); + + let dest = fs::File::create(args.dest.expect_local()).await?; + println!("dest file opened, starting download"); + + let start = time::Instant::now(); + + // TODO(aws-sdk-rust#1159) - rewrite this less naively, + // likely abstract this into performant utils for single file download. Higher level + // TM will handle it's own thread pool for filesystem work + let mut handle = tm.download(input.into()).await?; + let body = mem::replace(&mut handle.body, Body::empty()); + + write_body(body, dest) + .instrument(debug_span!("write-output")) + .await?; + + let elapsed = start.elapsed(); + let obj_size = handle.object_meta.total_size(); + let obj_size_mebibytes = obj_size as f64 / ONE_MEBIBYTE as f64; + + println!( + "downloaded {obj_size} bytes ({obj_size_mebibytes} MiB) in {elapsed:?}; MiB/s: {}", + obj_size_mebibytes / elapsed.as_secs_f64(), + ); + + Ok(()) +} + +// async fn write_body(mut body: Body, mut dest: fs::File) -> Result<(), Box> { +// let b1: &[u8] = &mut []; +// let b2: &[u8] = &mut []; +// let b3: &[u8] = &mut []; +// let b4: &[u8] = &mut []; +// let b5: &[u8] = &mut []; +// let b6: &[u8] = &mut []; +// let b7: &[u8] = &mut []; +// let b8: &[u8] = &mut []; +// while let Some(chunk) = body.next().await { +// let mut chunk = chunk.unwrap(); +// while chunk.has_remaining() { +// let mut dst = [ +// IoSlice::new(b1), +// IoSlice::new(b2), +// IoSlice::new(b3), +// IoSlice::new(b4), +// IoSlice::new(b5), +// IoSlice::new(b6), +// IoSlice::new(b7), +// IoSlice::new(b8), +// ]; +// let filled = chunk.chunks_vectored(&mut dst[..]); +// tracing::trace!("filled: {filled} io slices"); +// +// let wc = dest.write_vectored(&dst[0..filled]).await?; +// tracing::trace!("wrote: {wc} bytes"); +// chunk.advance(wc); +// } +// } +// Ok(()) +// } + +async fn write_body(mut body: Body, mut dest: fs::File) -> Result<(), Box> { + while let Some(chunk) = body.next().await { + let chunk = chunk.unwrap(); + tracing::trace!("recv'd chunk remaining={}", chunk.remaining()); + let mut segment_cnt = 1; + for segment in chunk.into_segments() { + dest.write_all(segment.as_ref()).await?; + tracing::trace!("wrote segment size: {}", segment.remaining()); + segment_cnt += 1; + } + tracing::trace!("chunk had {segment_cnt} segments"); + } + Ok(()) +} + +async fn warmup(config: &SdkConfig) -> Result<(), Box> { + let s3 = aws_sdk_s3::Client::new(&config); + s3.list_buckets().send().await?; + Ok(()) +} diff --git a/aws-s3-transfer-manager/external-types.toml b/aws-s3-transfer-manager/external-types.toml new file mode 100644 index 0000000..d0b12ae --- /dev/null +++ b/aws-s3-transfer-manager/external-types.toml @@ -0,0 +1,8 @@ +allowed_external_types = [ + "aws_sdk_s3::operation::get_object::builders::GetObjectFluentBuilder", + "aws_sdk_s3::operation::get_object::_get_object_input::GetObjectInputBuilder", + "aws_sdk_s3::operation::get_object::GetObjectError", + "aws_sdk_s3::operation::head_object::HeadObjectError", + "aws_smithy_runtime_api::*", + "aws_smithy_types::*", +] diff --git a/aws-s3-transfer-manager/src/download.rs b/aws-s3-transfer-manager/src/download.rs new file mode 100644 index 0000000..85faed5 --- /dev/null +++ b/aws-s3-transfer-manager/src/download.rs @@ -0,0 +1,223 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +/// Abstractions for response bodies and consuming data streams. +pub mod body; +mod context; +mod discovery; +mod handle; +mod header; +mod object_meta; +mod worker; + +use crate::download::body::Body; +use crate::download::discovery::{discover_obj, ObjectDiscovery}; +use crate::download::handle::DownloadHandle; +use crate::download::worker::{distribute_work, download_chunks, ChunkResponse}; +use crate::error::TransferError; +use crate::MEBIBYTE; +use aws_sdk_s3::operation::get_object::builders::{GetObjectFluentBuilder, GetObjectInputBuilder}; +use aws_types::SdkConfig; +use context::DownloadContext; +use tokio::sync::mpsc; +use tokio::task::JoinSet; +use tracing::Instrument; + +// TODO(aws-sdk-rust#1159) - need to set User-Agent header value for SEP, e.g. `ft/hll#s3-transfer` + +/// Request type for downloading a single object +#[derive(Debug)] +#[non_exhaustive] +pub struct DownloadRequest { + pub(crate) input: GetObjectInputBuilder, +} + +// FIXME - should probably be TryFrom since checksums may conflict? +impl From for DownloadRequest { + fn from(value: GetObjectFluentBuilder) -> Self { + Self { + input: value.as_input().clone(), + } + } +} + +impl From for DownloadRequest { + fn from(value: GetObjectInputBuilder) -> Self { + Self { input: value } + } +} + +/// Fluent style builder for [Downloader] +#[derive(Debug, Clone)] +pub struct Builder { + target_part_size_bytes: u64, + // TODO(design): should we instead consider an enum here allows for not only explicit but also + // an "Auto" mode that allows us to control the concurrency actually used based on overall transfer and part size? + concurrency: usize, + sdk_config: Option, +} + +impl Builder { + fn new() -> Self { + Self { + target_part_size_bytes: 8 * MEBIBYTE, + concurrency: 8, + sdk_config: None, + } + } + + /// Size of parts the object will be downloaded in, in bytes. + /// + /// Defaults is 8 MiB. + pub fn target_part_size(mut self, size_bytes: u64) -> Self { + self.target_part_size_bytes = size_bytes; + self + } + + /// Set the configuration used by the S3 client + pub fn sdk_config(mut self, config: SdkConfig) -> Self { + self.sdk_config = Some(config); + self + } + + /// Set the concurrency level this component is allowed to use. + /// + /// This sets the maximum number of concurrent in-flight requests. + /// Default is 8. + pub fn concurrency(mut self, concurrency: usize) -> Self { + self.concurrency = concurrency; + self + } + + /// Consumes the builder and constructs a [Downloader] + pub fn build(self) -> Downloader { + self.into() + } +} + +impl From for Downloader { + fn from(value: Builder) -> Self { + let sdk_config = value + .sdk_config + .unwrap_or_else(|| SdkConfig::builder().build()); + let client = aws_sdk_s3::Client::new(&sdk_config); + Self { + target_part_size_bytes: value.target_part_size_bytes, + concurrency: value.concurrency, + client, + } + } +} + +/// Download an object in the most efficient way possible by splitting the request into +/// concurrent requests (e.g. using ranged GET or part number). +#[derive(Debug, Clone)] +pub struct Downloader { + target_part_size_bytes: u64, + concurrency: usize, + client: aws_sdk_s3::client::Client, +} + +impl Downloader { + /// Create a new [Builder] + pub fn builder() -> Builder { + Builder::new() + } + + /// Download a single object from S3. + /// + /// A single logical request may be split into many concurrent ranged `GetObject` requests + /// to improve throughput. + /// + /// # Examples + /// + /// ```no_run + /// use std::error::Error; + /// use aws_sdk_s3::operation::get_object::builders::GetObjectInputBuilder; + /// use aws_s3_transfer_manager::download::{Downloader, DownloadRequest}; + /// + /// async fn get_object(client: Downloader) -> Result<(), Box> { + /// let request = GetObjectInputBuilder::default() + /// .bucket("my-bucket") + /// .key("my-key") + /// .into(); + /// + /// let handle = client.download(request).await?; + /// // process data off handle... + /// Ok(()) + /// } + /// ``` + pub async fn download(&self, req: DownloadRequest) -> Result { + // if there is a part number then just send the default request + if req.input.get_part_number().is_some() { + todo!("single part download not implemented") + } + + let ctx = DownloadContext { + client: self.client.clone(), + target_part_size: self.target_part_size_bytes, + }; + + // make initial discovery about the object size, metadata, possibly first chunk + let mut discovery = discover_obj(&ctx, &req).await?; + let (comp_tx, comp_rx) = mpsc::channel(self.concurrency); + let start_seq = handle_discovery_chunk(&mut discovery, &comp_tx).await; + + // spawn all work into the same JoinSet such that when the set is dropped all tasks are cancelled. + let mut tasks = JoinSet::new(); + + if !discovery.remaining.is_empty() { + // start assigning work + let (work_tx, work_rx) = async_channel::bounded(self.concurrency); + let input = req.input.clone(); + let part_size = self.target_part_size_bytes; + let rem = discovery.remaining.clone(); + + // TODO(aws-sdk-rust#1159) - test semaphore based approach where we create all futures at once, + // the downside is controlling memory usage as a large download may result in + // quite a few futures created. If more performant could be enabled for + // objects less than some size. + + tasks.spawn(distribute_work(rem, input, part_size, start_seq, work_tx)); + + for i in 0..self.concurrency { + let worker = download_chunks(ctx.clone(), work_rx.clone(), comp_tx.clone()) + .instrument(tracing::debug_span!("chunk-downloader", worker = i)); + tasks.spawn(worker); + } + } + + // Drop our half of the completion channel. When all workers drop theirs, the channel is closed. + drop(comp_tx); + + let handle = DownloadHandle { + // FIXME(aws-sdk-rust#1159) - initial object discovery for a range/first-part will not + // have the correct metadata w.r.t. content-length and maybe others for the whole object. + object_meta: discovery.meta, + body: Body::new(comp_rx), + _tasks: tasks, + }; + + Ok(handle) + } +} + +/// Handle possibly sending the first chunk of data received through discovery. Returns +/// the starting sequence number to use for remaining chunks. +async fn handle_discovery_chunk( + discovery: &mut ObjectDiscovery, + completed: &mpsc::Sender>, +) -> u64 { + let mut start_seq = 0; + if let Some(initial_data) = discovery.initial_chunk.take() { + let chunk = ChunkResponse { + seq: start_seq, + data: Some(initial_data), + }; + completed.send(Ok(chunk)).await.expect("initial chunk"); + start_seq = 1; + } + start_seq +} diff --git a/aws-s3-transfer-manager/src/download/body.rs b/aws-s3-transfer-manager/src/download/body.rs new file mode 100644 index 0000000..3cb37f0 --- /dev/null +++ b/aws-s3-transfer-manager/src/download/body.rs @@ -0,0 +1,242 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ +use crate::download::worker::ChunkResponse; +use crate::error::TransferError; +use aws_smithy_types::byte_stream::AggregatedBytes; +use std::cmp; +use std::cmp::Ordering; +use std::collections::BinaryHeap; +use tokio::sync::mpsc; + +/// Stream of binary data representing an Amazon S3 Object's contents. +/// +/// Wraps potentially multiple streams of binary data into a single coherent stream. +/// The data on this stream is sequenced into the correct order. +#[derive(Debug)] +pub struct Body { + inner: UnorderedBody, + sequencer: Sequencer, +} + +type BodyChannel = mpsc::Receiver>; + +impl Body { + /// Create a new empty Body + pub fn empty() -> Self { + Self::new_from_channel(None) + } + + pub(crate) fn new(chunks: BodyChannel) -> Self { + Self::new_from_channel(Some(chunks)) + } + + fn new_from_channel(chunks: Option) -> Self { + Self { + inner: UnorderedBody::new(chunks), + sequencer: Sequencer::new(), + } + } + + /// Convert this body into an unordered stream of chunks. + // TODO(aws-sdk-rust#1159) - revisit if we actually need/use unordered data stream + #[allow(dead_code)] + pub(crate) fn unordered(self) -> UnorderedBody { + self.inner + } + + /// Pull the next chunk of data off the stream. + /// + /// Returns [None] when there is no more data. + /// Chunks returned from a [Body] are guaranteed to be sequenced + /// in the right order. + pub async fn next(&mut self) -> Option> { + // TODO(aws-sdk-rust#1159, design) - do we want ChunkResponse (or similar) rather than AggregatedBytes? Would + // make additional retries of an individual chunk/part more feasible (though theoretically already exhausted retries) + loop { + if self.sequencer.is_ordered() { + break; + } + + match self.inner.next().await { + None => break, + Some(Ok(chunk)) => self.sequencer.push(chunk), + Some(Err(err)) => return Some(Err(err)), + } + } + + let chunk = self + .sequencer + .pop() + .map(|r| Ok(r.data.expect("chunk data"))); + + if chunk.is_some() { + // if we actually pulled data out, advance the next sequence we expect + self.sequencer.advance(); + } + + chunk + } +} + +#[derive(Debug)] +struct Sequencer { + /// next expected sequence + next_seq: u64, + chunks: BinaryHeap>, +} + +impl Sequencer { + fn new() -> Self { + Self { + chunks: BinaryHeap::with_capacity(8), + next_seq: 0, + } + } + + fn push(&mut self, chunk: ChunkResponse) { + self.chunks.push(cmp::Reverse(SequencedChunk(chunk))) + } + + fn pop(&mut self) -> Option { + self.chunks.pop().map(|c| c.0 .0) + } + + fn is_ordered(&self) -> bool { + let next = self.peek(); + if next.is_none() { + return false; + } + + next.unwrap().seq == self.next_seq + } + + fn peek(&self) -> Option<&ChunkResponse> { + self.chunks.peek().map(|c| &c.0 .0) + } + + fn advance(&mut self) { + self.next_seq += 1 + } +} + +#[derive(Debug)] +struct SequencedChunk(ChunkResponse); + +impl Ord for SequencedChunk { + fn cmp(&self, other: &Self) -> Ordering { + self.0.seq.cmp(&other.0.seq) + } +} + +impl PartialOrd for SequencedChunk { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Eq for SequencedChunk {} +impl PartialEq for SequencedChunk { + fn eq(&self, other: &Self) -> bool { + self.0.seq == other.0.seq + } +} + +/// A body that returns chunks in whatever order they are received. +#[derive(Debug)] +pub(crate) struct UnorderedBody { + chunks: Option>>, +} + +impl UnorderedBody { + fn new(chunks: Option) -> Self { + Self { chunks } + } + + /// Pull the next chunk of data off the stream. + /// + /// Returns [None] when there is no more data. + /// Chunks returned from an [UnorderedBody] are not guaranteed to be sorted + /// in the right order. Consumers are expected to sort the data themselves + /// using the chunk sequence number (starting from zero). + pub(crate) async fn next(&mut self) -> Option> { + match self.chunks.as_mut() { + None => None, + Some(ch) => ch.recv().await, + } + } +} + +#[cfg(test)] +mod tests { + use crate::download::worker::ChunkResponse; + use crate::error::TransferError; + use aws_smithy_types::byte_stream::{AggregatedBytes, ByteStream}; + use bytes::Bytes; + use tokio::sync::mpsc; + + use super::{Body, Sequencer}; + + fn chunk_resp(seq: u64, data: Option) -> ChunkResponse { + ChunkResponse { seq, data } + } + + #[test] + fn test_sequencer() { + let mut sequencer = Sequencer::new(); + sequencer.push(chunk_resp(1, None)); + sequencer.push(chunk_resp(2, None)); + assert_eq!(sequencer.peek().unwrap().seq, 1); + sequencer.push(chunk_resp(0, None)); + assert_eq!(sequencer.pop().unwrap().seq, 0); + } + + #[tokio::test] + async fn test_body_next() { + let (tx, rx) = mpsc::channel(2); + let mut body = Body::new(rx); + tokio::spawn(async move { + let seq = vec![2, 0, 1]; + for i in seq { + let data = Bytes::from(format!("chunk {i}")); + let aggregated = ByteStream::from(data).collect().await.unwrap(); + let chunk = chunk_resp(i as u64, Some(aggregated)); + tx.send(Ok(chunk)).await.unwrap(); + } + }); + + let mut received = Vec::new(); + while let Some(chunk) = body.next().await { + let chunk = chunk.expect("chunk ok"); + let data = String::from_utf8(chunk.to_vec()).unwrap(); + received.push(data); + } + + let expected: Vec = vec![0, 1, 2].iter().map(|i| format!("chunk {i}")).collect(); + assert_eq!(expected, received); + } + + #[tokio::test] + async fn test_body_next_error() { + let (tx, rx) = mpsc::channel(2); + let mut body = Body::new(rx); + tokio::spawn(async move { + let data = Bytes::from("chunk 0".to_string()); + let aggregated = ByteStream::from(data).collect().await.unwrap(); + let chunk = chunk_resp(0, Some(aggregated)); + tx.send(Ok(chunk)).await.unwrap(); + let err = TransferError::InvalidMetaRequest("test errors".to_string()); + tx.send(Err(err)).await.unwrap(); + }); + + let mut received = Vec::new(); + while let Some(chunk) = body.next().await { + received.push(chunk); + } + + assert_eq!(2, received.len()); + received.pop().unwrap().expect_err("error propagated"); + received.pop().unwrap().expect("chunk 0 successful"); + } +} diff --git a/aws-s3-transfer-manager/src/download/context.rs b/aws-s3-transfer-manager/src/download/context.rs new file mode 100644 index 0000000..d8f3167 --- /dev/null +++ b/aws-s3-transfer-manager/src/download/context.rs @@ -0,0 +1,11 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +/// Shared context used across a single download request +#[derive(Debug, Clone)] +pub(crate) struct DownloadContext { + pub(crate) client: aws_sdk_s3::Client, + pub(crate) target_part_size: u64, +} diff --git a/aws-s3-transfer-manager/src/download/discovery.rs b/aws-s3-transfer-manager/src/download/discovery.rs new file mode 100644 index 0000000..1f54a7a --- /dev/null +++ b/aws-s3-transfer-manager/src/download/discovery.rs @@ -0,0 +1,320 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +use std::ops::RangeInclusive; +use std::str::FromStr; +use std::{cmp, mem}; + +use aws_sdk_s3::operation::get_object::builders::GetObjectInputBuilder; +use aws_smithy_types::body::SdkBody; +use aws_smithy_types::byte_stream::{AggregatedBytes, ByteStream}; +use bytes::Buf; + +use crate::error; + +use super::header::{self, ByteRange}; +use super::object_meta::ObjectMetadata; +use super::DownloadRequest; +use crate::download::context::DownloadContext; + +#[derive(Debug, Clone, PartialEq)] +enum ObjectDiscoveryStrategy { + // Send a `HeadObject` request. + // The overall transfer is optionally constrained to the given range. + HeadObject(Option), + // Send `GetObject` request using a ranged get. + // The overall transfer is optionally constrained to the given range. + RangedGet(Option>), +} + +/// Discovered object metadata (optionally with first chunk of data) +#[derive(Debug)] +pub(super) struct ObjectDiscovery { + /// range of data remaining to be fetched + pub(super) remaining: RangeInclusive, + + /// the discovered metadata + pub(super) meta: ObjectMetadata, + + /// the first chunk of data if fetched during discovery + pub(super) initial_chunk: Option, +} + +impl ObjectDiscoveryStrategy { + fn from_request( + request: &DownloadRequest, + ) -> Result { + let strategy = match request.input.get_range() { + Some(h) => { + let byte_range = header::Range::from_str(h)?.0; + match byte_range { + ByteRange::Inclusive(start, end) => { + ObjectDiscoveryStrategy::RangedGet(Some(start..=end)) + } + // TODO(aws-sdk-rust#1159): explore when given a start range what it would like to just start + // sending requests from [start, start+part_size] + _ => ObjectDiscoveryStrategy::HeadObject(Some(byte_range)), + } + } + None => ObjectDiscoveryStrategy::RangedGet(None), + }; + + Ok(strategy) + } +} + +/// Discover metadata about an object. +/// +///Returns object metadata, the remaining range of data +/// to be fetched, and _(if available)_ the first chunk of data. +pub(super) async fn discover_obj( + ctx: &DownloadContext, + request: &DownloadRequest, +) -> Result { + let strategy = ObjectDiscoveryStrategy::from_request(request)?; + match strategy { + ObjectDiscoveryStrategy::HeadObject(byte_range) => { + discover_obj_with_head(ctx, request, byte_range).await + } + ObjectDiscoveryStrategy::RangedGet(range) => { + let byte_range = match range.as_ref() { + Some(r) => ByteRange::Inclusive( + *r.start(), + cmp::min(*r.start() + ctx.target_part_size - 1, *r.end()), + ), + None => ByteRange::Inclusive(0, ctx.target_part_size - 1), + }; + let r = request + .input + .clone() + .set_part_number(None) + .range(header::Range::bytes(byte_range)); + + discover_obj_with_get(ctx, r, range).await + } + } +} + +async fn discover_obj_with_head( + ctx: &DownloadContext, + request: &DownloadRequest, + byte_range: Option, +) -> Result { + let meta: ObjectMetadata = ctx + .client + .head_object() + .set_bucket(request.input.get_bucket().clone()) + .set_key(request.input.get_key().clone()) + .send() + .await + .map_err(|e| error::DownloadError::DiscoverFailed(e.into()))? + .into(); + + let remaining = match byte_range { + Some(range) => match range { + ByteRange::Inclusive(start, end) => start..=end, + ByteRange::AllFrom(start) => start..=meta.total_size(), + ByteRange::Last(n) => (meta.total_size() - n + 1)..=meta.total_size(), + }, + None => 0..=meta.total_size(), + }; + + Ok(ObjectDiscovery { + remaining, + meta, + initial_chunk: None, + }) +} + +async fn discover_obj_with_get( + ctx: &DownloadContext, + request: GetObjectInputBuilder, + range: Option>, +) -> Result { + let resp = request.send_with(&ctx.client).await; + + if resp.is_err() { + // TODO(aws-sdk-rust#1159) - deal with empty file errors, see https://github.com/awslabs/aws-c-s3/blob/v0.5.7/source/s3_auto_ranged_get.c#L147-L153 + } + + let mut resp = resp.map_err(|e| error::DownloadError::DiscoverFailed(e.into()))?; + + // take the body so we can convert the metadata + let empty_stream = ByteStream::new(SdkBody::empty()); + let body = mem::replace(&mut resp.body, empty_stream); + + let data = body + .collect() + .await + .map_err(|e| error::DownloadError::DiscoverFailed(e.into()))?; + + let meta: ObjectMetadata = resp.into(); + + let remaining = match range { + Some(range) => (*range.start() + data.remaining() as u64)..=*range.end(), + None => (data.remaining() as u64)..=meta.total_size() - 1, + }; + + Ok(ObjectDiscovery { + remaining, + meta, + initial_chunk: Some(data), + }) +} + +#[cfg(test)] +mod tests { + use crate::download::context::DownloadContext; + use crate::download::discovery::{ + discover_obj, discover_obj_with_head, ObjectDiscoveryStrategy, + }; + use crate::download::header::ByteRange; + use crate::MEBIBYTE; + use aws_sdk_s3::operation::get_object::{GetObjectInput, GetObjectOutput}; + use aws_sdk_s3::operation::head_object::HeadObjectOutput; + use aws_sdk_s3::Client; + use aws_smithy_mocks_experimental::{mock, mock_client}; + use aws_smithy_types::byte_stream::ByteStream; + use bytes::Buf; + + use super::ObjectDiscovery; + + fn strategy_from_range(range: Option<&str>) -> ObjectDiscoveryStrategy { + let req = GetObjectInput::builder() + .set_range(range.map(|r| r.to_string())) + .into(); + ObjectDiscoveryStrategy::from_request(&req).unwrap() + } + + #[test] + fn test_strategy_from_req() { + assert_eq!( + ObjectDiscoveryStrategy::RangedGet(None), + strategy_from_range(None) + ); + + assert_eq!( + ObjectDiscoveryStrategy::RangedGet(Some(100..=200)), + strategy_from_range(Some("bytes=100-200")) + ); + assert_eq!( + ObjectDiscoveryStrategy::HeadObject(Some(ByteRange::AllFrom(100))), + strategy_from_range(Some("bytes=100-")) + ); + assert_eq!( + ObjectDiscoveryStrategy::HeadObject(Some(ByteRange::Last(500))), + strategy_from_range(Some("bytes=-500")) + ); + } + + async fn get_discovery_from_head(range: Option) -> ObjectDiscovery { + let head_obj_rule = mock!(Client::head_object) + .then_output(|| HeadObjectOutput::builder().content_length(500).build()); + let client = mock_client!(aws_sdk_s3, &[&head_obj_rule]); + + let ctx = DownloadContext { + client, + target_part_size: 5 * MEBIBYTE, + }; + let request = GetObjectInput::builder() + .bucket("test-bucket") + .key("test-key") + .into(); + + discover_obj_with_head(&ctx, &request, range).await.unwrap() + } + + #[tokio::test] + async fn test_discover_obj_with_head() { + assert_eq!(0..=500, get_discovery_from_head(None).await.remaining); + assert_eq!( + 10..=100, + get_discovery_from_head(Some(ByteRange::Inclusive(10, 100))) + .await + .remaining + ); + assert_eq!( + 100..=500, + get_discovery_from_head(Some(ByteRange::AllFrom(100))) + .await + .remaining + ); + assert_eq!( + 401..=500, + get_discovery_from_head(Some(ByteRange::Last(100))) + .await + .remaining + ); + } + + #[tokio::test] + async fn test_discover_obj_with_get_full_range() { + let target_part_size = 500; + let bytes = &[0u8; 500]; + let get_obj_rule = mock!(Client::get_object) + .match_requests(|r| r.range() == Some("bytes=0-499")) + .then_output(|| { + GetObjectOutput::builder() + .content_length(500) + .content_range("0-499/700") + .body(ByteStream::from_static(bytes)) + .build() + }); + let client = mock_client!(aws_sdk_s3, &[&get_obj_rule]); + + let ctx = DownloadContext { + client, + target_part_size, + }; + + let request = GetObjectInput::builder() + .bucket("test-bucket") + .key("test-key") + .into(); + + let discovery = discover_obj(&ctx, &request).await.unwrap(); + assert_eq!(200, discovery.remaining.clone().count()); + assert_eq!(500..=699, discovery.remaining); + assert_eq!( + 500, + discovery.initial_chunk.expect("initial chunk").remaining() + ); + } + + #[tokio::test] + async fn test_discover_obj_with_get_partial_range() { + let target_part_size = 100; + let bytes = &[0u8; 100]; + let get_obj_rule = mock!(Client::get_object) + .match_requests(|r| r.range() == Some("bytes=200-299")) + .then_output(|| { + GetObjectOutput::builder() + .content_length(100) + .content_range("200-299/700") + .body(ByteStream::from_static(bytes)) + .build() + }); + let client = mock_client!(aws_sdk_s3, &[&get_obj_rule]); + + let ctx = DownloadContext { + client, + target_part_size, + }; + + let request = GetObjectInput::builder() + .bucket("test-bucket") + .key("test-key") + .range("bytes=200-499") + .into(); + + let discovery = discover_obj(&ctx, &request).await.unwrap(); + assert_eq!(200, discovery.remaining.clone().count()); + assert_eq!(300..=499, discovery.remaining); + assert_eq!( + 100, + discovery.initial_chunk.expect("initial chunk").remaining() + ); + } +} diff --git a/aws-s3-transfer-manager/src/download/handle.rs b/aws-s3-transfer-manager/src/download/handle.rs new file mode 100644 index 0000000..19b03f6 --- /dev/null +++ b/aws-s3-transfer-manager/src/download/handle.rs @@ -0,0 +1,33 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ +use crate::download::body::Body; +use crate::download::object_meta::ObjectMetadata; +use tokio::task; + +/// Response type for a single download object request. +#[derive(Debug)] +#[non_exhaustive] +pub struct DownloadHandle { + /// Object metadata + pub object_meta: ObjectMetadata, + + /// The object content + pub body: Body, + + /// All child tasks spawned for this download + pub(crate) _tasks: task::JoinSet<()>, +} + +impl DownloadHandle { + /// Object metadata + pub fn object_meta(&self) -> &ObjectMetadata { + &self.object_meta + } + + /// Object content + pub fn body(&self) -> &Body { + &self.body + } +} diff --git a/aws-s3-transfer-manager/src/download/header.rs b/aws-s3-transfer-manager/src/download/header.rs new file mode 100644 index 0000000..53c41f2 --- /dev/null +++ b/aws-s3-transfer-manager/src/download/header.rs @@ -0,0 +1,148 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +use core::fmt; +use std::str::FromStr; + +use crate::error; + +/// Representation of `Range` header. +/// NOTE: S3 only supports a single bytes range this is a simplified representation +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct Range(pub(crate) ByteRange); + +impl Range { + /// Create a range from the given byte range + pub(crate) fn bytes(rng: ByteRange) -> Self { + Self(rng) + } + + /// Create a range from the inclusive start and end offsets + pub(crate) fn bytes_inclusive(start: u64, end: u64) -> Self { + Range::bytes(ByteRange::Inclusive(start, end)) + } +} + +impl fmt::Display for Range { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "bytes={}", self.0) + } +} + +impl From for String { + fn from(value: Range) -> Self { + format!("{}", value) + } +} + +impl FromStr for Range { + type Err = error::TransferError; + + fn from_str(s: &str) -> Result { + let mut iter = s.splitn(2, '='); + match (iter.next(), iter.next()) { + (Some("bytes"), Some(range)) => { + if range.contains(',') { + // TODO(aws-sdk-rust#1159) - error S3 doesn't support multiple byte ranges + Err(error::invalid_meta_request(format!( + "multiple byte ranges not supported for range header {}", + s + ))) + } else { + let spec = ByteRange::from_str(range).map_err(|_| { + error::invalid_meta_request(format!("invalid range header {}", s)) + })?; + Ok(Range(spec)) + } + } + _ => Err(error::invalid_meta_request(format!( + "unsupported byte range header format `{s}`; see https://www.rfc-editor.org/rfc/rfc9110.html#name-range for valid formats" + ))), + } + } +} + +/// Representation of a single [RFC-99110 byte range](https://www.rfc-editor.org/rfc/rfc9110.html#name-byte-ranges) +#[derive(Debug, Clone, PartialEq)] +pub(crate) enum ByteRange { + /// Get all bytes between x and y inclusive ("bytes=x-y") + Inclusive(u64, u64), + + /// Get all bytes starting from x ("bytes=x-") + AllFrom(u64), + + /// Get the last n bytes ("bltes=-n") + Last(u64), +} + +impl fmt::Display for ByteRange { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + ByteRange::Inclusive(start, end) => write!(f, "{}-{}", start, end), + ByteRange::AllFrom(from) => write!(f, "{}-", from), + ByteRange::Last(n) => write!(f, "-{}", n), + } + } +} + +impl FromStr for ByteRange { + type Err = (); + + fn from_str(s: &str) -> Result { + let mut iter = s.splitn(2, '-'); + match (iter.next(), iter.next()) { + (Some(""), Some(end)) => end.parse().map(ByteRange::Last).or(Err(())), + (Some(start), Some("")) => start.parse().map(ByteRange::AllFrom).or(Err(())), + (Some(start), Some(end)) => match (start.parse(), end.parse()) { + (Ok(start), Ok(end)) if start <= end => Ok(ByteRange::Inclusive(start, end)), + _ => Err(()), + }, + _ => Err(()), + } + } +} + +#[cfg(test)] +mod tests { + use super::{ByteRange, Range}; + use crate::error::TransferError; + use std::str::FromStr; + + #[test] + fn test_byte_range_from_str() { + assert_eq!( + ByteRange::Last(500), + Range::from_str("bytes=-500").unwrap().0 + ); + assert_eq!( + ByteRange::AllFrom(200), + Range::from_str("bytes=200-").unwrap().0 + ); + assert_eq!( + ByteRange::Inclusive(200, 500), + Range::from_str("bytes=200-500").unwrap().0 + ); + } + + fn assert_err_contains(r: Result, msg: &str) { + let err = r.unwrap_err(); + match err { + TransferError::InvalidMetaRequest(m) => { + assert!(m.contains(msg), "'{}' does not contain '{}'", m, msg); + } + _ => panic!("unexpected error type"), + } + } + + #[test] + fn test_invalid_byte_range_from_str() { + assert_err_contains(Range::from_str("bytes=-"), "invalid range header"); + assert_err_contains(Range::from_str("bytes=500-200"), "invalid range header"); + assert_err_contains( + Range::from_str("bytes=0-200,400-500"), + "multiple byte ranges not supported for range header", + ); + } +} diff --git a/aws-s3-transfer-manager/src/download/object_meta.rs b/aws-s3-transfer-manager/src/download/object_meta.rs new file mode 100644 index 0000000..e742406 --- /dev/null +++ b/aws-s3-transfer-manager/src/download/object_meta.rs @@ -0,0 +1,153 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +use aws_sdk_s3::operation::get_object::GetObjectOutput; +use aws_sdk_s3::operation::head_object::HeadObjectOutput; + +// TODO(aws-sdk-rust#1159,design): how many of these fields should we expose? +// TODO(aws-sdk-rust#1159,docs): Document fields + +/// Object metadata other than the body that can be set from either `GetObject` or `HeadObject` +#[derive(Debug, Clone)] +pub struct ObjectMetadata { + pub delete_marker: Option, + pub accept_ranges: Option, + pub expiration: Option, + pub restore: Option, + pub last_modified: Option<::aws_smithy_types::DateTime>, + pub content_length: Option, + pub e_tag: Option, + pub checksum_crc32: Option, + pub checksum_crc32_c: Option, + pub checksum_sha1: Option, + pub checksum_sha256: Option, + pub missing_meta: Option, + pub version_id: Option, + pub cache_control: Option, + pub content_disposition: Option, + pub content_encoding: Option, + pub content_language: Option, + pub content_range: Option, + pub content_type: Option, + pub expires: Option<::aws_smithy_types::DateTime>, + pub expires_string: Option, + pub website_redirect_location: Option, + pub server_side_encryption: Option, + pub metadata: Option<::std::collections::HashMap>, + pub sse_customer_algorithm: Option, + pub sse_customer_key_md5: Option, + pub ssekms_key_id: Option, + pub bucket_key_enabled: Option, + pub storage_class: Option, + pub request_charged: Option, + pub replication_status: Option, + pub parts_count: Option, + pub tag_count: Option, + pub object_lock_mode: Option, + pub object_lock_retain_until_date: Option<::aws_smithy_types::DateTime>, + pub object_lock_legal_hold_status: Option, +} + +impl ObjectMetadata { + /// The total object size + pub fn total_size(&self) -> u64 { + match (self.content_length, self.content_range.as_ref()) { + (_, Some(range)) => { + let total = range.split_once('/').map(|x| x.1).expect("content range total"); + total.parse().expect("valid range total") + } + (Some(length), None) => length as u64, + (None, None) => panic!("total object size cannot be calculated without either content length or content range headers") + } + } +} + +impl From for ObjectMetadata { + fn from(value: GetObjectOutput) -> Self { + Self { + delete_marker: value.delete_marker, + accept_ranges: value.accept_ranges, + expiration: value.expiration, + restore: value.restore, + last_modified: value.last_modified, + content_length: value.content_length, + e_tag: value.e_tag, + checksum_crc32: value.checksum_crc32, + checksum_crc32_c: value.checksum_crc32_c, + checksum_sha1: value.checksum_sha1, + checksum_sha256: value.checksum_sha256, + missing_meta: value.missing_meta, + version_id: value.version_id, + cache_control: value.cache_control, + content_disposition: value.content_disposition, + content_encoding: value.content_encoding, + content_language: value.content_language, + content_range: value.content_range, + content_type: value.content_type, + #[allow(deprecated)] + expires: value.expires, + expires_string: value.expires_string, + website_redirect_location: value.website_redirect_location, + server_side_encryption: value.server_side_encryption, + metadata: value.metadata, + sse_customer_algorithm: value.sse_customer_algorithm, + sse_customer_key_md5: value.sse_customer_key_md5, + ssekms_key_id: value.ssekms_key_id, + bucket_key_enabled: value.bucket_key_enabled, + storage_class: value.storage_class, + request_charged: value.request_charged, + replication_status: value.replication_status, + parts_count: value.parts_count, + tag_count: value.tag_count, + object_lock_mode: value.object_lock_mode, + object_lock_retain_until_date: value.object_lock_retain_until_date, + object_lock_legal_hold_status: value.object_lock_legal_hold_status, + } + } +} + +impl From for ObjectMetadata { + fn from(value: HeadObjectOutput) -> Self { + Self { + delete_marker: value.delete_marker, + accept_ranges: value.accept_ranges, + expiration: value.expiration, + restore: value.restore, + last_modified: value.last_modified, + content_length: value.content_length, + e_tag: value.e_tag, + checksum_crc32: value.checksum_crc32, + checksum_crc32_c: value.checksum_crc32_c, + checksum_sha1: value.checksum_sha1, + checksum_sha256: value.checksum_sha256, + missing_meta: value.missing_meta, + version_id: value.version_id, + cache_control: value.cache_control, + content_disposition: value.content_disposition, + content_encoding: value.content_encoding, + content_language: value.content_language, + content_range: None, + content_type: value.content_type, + #[allow(deprecated)] + expires: value.expires, + expires_string: value.expires_string, + website_redirect_location: value.website_redirect_location, + server_side_encryption: value.server_side_encryption, + metadata: value.metadata, + sse_customer_algorithm: value.sse_customer_algorithm, + sse_customer_key_md5: value.sse_customer_key_md5, + ssekms_key_id: value.ssekms_key_id, + bucket_key_enabled: value.bucket_key_enabled, + storage_class: value.storage_class, + request_charged: value.request_charged, + replication_status: value.replication_status, + parts_count: value.parts_count, + tag_count: None, + object_lock_mode: value.object_lock_mode, + object_lock_retain_until_date: value.object_lock_retain_until_date, + object_lock_legal_hold_status: value.object_lock_legal_hold_status, + } + } +} diff --git a/aws-s3-transfer-manager/src/download/worker.rs b/aws-s3-transfer-manager/src/download/worker.rs new file mode 100644 index 0000000..edd8149 --- /dev/null +++ b/aws-s3-transfer-manager/src/download/worker.rs @@ -0,0 +1,179 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ +use crate::download::context::DownloadContext; +use crate::download::header; +use crate::error; +use crate::error::TransferError; +use aws_sdk_s3::operation::get_object::builders::GetObjectInputBuilder; +use aws_smithy_types::body::SdkBody; +use aws_smithy_types::byte_stream::{AggregatedBytes, ByteStream}; +use std::ops::RangeInclusive; +use std::{cmp, mem}; +use tokio::sync::mpsc; +use tracing::Instrument; + +// FIXME - should probably be enum ChunkRequest { Range(..), Part(..) } or have an inner field like such +#[derive(Debug, Clone)] +pub(super) struct ChunkRequest { + // byte range to download + pub(super) range: RangeInclusive, + pub(super) input: GetObjectInputBuilder, + // sequence number + pub(super) seq: u64, +} + +impl ChunkRequest { + /// Size of this chunk request in bytes + pub(super) fn size(&self) -> u64 { + self.range.end() - self.range.start() + 1 + } +} + +#[derive(Debug, Clone)] +pub(crate) struct ChunkResponse { + // TODO(aws-sdk-rust#1159, design) - consider PartialOrd for ChunkResponse and hiding `seq` as internal only detail + // the seq number + pub(crate) seq: u64, + // chunk data + pub(crate) data: Option, +} + +/// Worker function that processes requests from the `requests` channel and +/// sends the result back on the `completed` channel. +pub(super) async fn download_chunks( + ctx: DownloadContext, + requests: async_channel::Receiver, + completed: mpsc::Sender>, +) { + while let Ok(request) = requests.recv().await { + let seq = request.seq; + tracing::trace!("worker recv'd request for chunk seq {seq}"); + + let result = download_chunk(&ctx, request) + .instrument(tracing::debug_span!("download-chunk", seq = seq)) + .await; + + if let Err(err) = completed.send(result).await { + tracing::debug!(error = ?err, "chunk worker send failed"); + return; + } + } + + tracing::trace!("req channel closed, worker finished"); +} + +/// Download an individual chunk of data (range / part) +async fn download_chunk( + ctx: &DownloadContext, + request: ChunkRequest, +) -> Result { + let mut resp = request + .input + .send_with(&ctx.client) + .await + .map_err(error::chunk_failed)?; + + let body = mem::replace(&mut resp.body, ByteStream::new(SdkBody::taken())); + + let bytes = body + .collect() + .instrument(tracing::debug_span!("collect-body", seq = request.seq)) + .await + .map_err(error::chunk_failed)?; + + Ok(ChunkResponse { + seq: request.seq, + data: Some(bytes), + }) +} + +pub(super) async fn distribute_work( + remaining: RangeInclusive, + input: GetObjectInputBuilder, + part_size: u64, + start_seq: u64, + tx: async_channel::Sender, +) { + let end = *remaining.end(); + let mut pos = *remaining.start(); + let mut remaining = end - pos + 1; + let mut seq = start_seq; + + while remaining > 0 { + let start = pos; + let end_inclusive = cmp::min(pos + part_size - 1, end); + + let chunk_req = next_chunk(start, end_inclusive, seq, input.clone()); + tracing::trace!( + "distributing chunk(size={}): {:?}", + chunk_req.size(), + chunk_req + ); + let chunk_size = chunk_req.size(); + tx.send(chunk_req).await.expect("channel open"); + + seq += 1; + remaining -= chunk_size; + tracing::trace!("remaining = {}", remaining); + pos += chunk_size; + } + + tracing::trace!("work fully distributed"); + tx.close(); +} + +fn next_chunk( + start: u64, + end_inclusive: u64, + seq: u64, + input: GetObjectInputBuilder, +) -> ChunkRequest { + let range = start..=end_inclusive; + let input = input.range(header::Range::bytes_inclusive(start, end_inclusive)); + ChunkRequest { seq, range, input } +} + +#[cfg(test)] +mod tests { + use crate::download::header; + use crate::download::worker::distribute_work; + use aws_sdk_s3::operation::get_object::builders::GetObjectInputBuilder; + use std::ops::RangeInclusive; + + #[tokio::test] + async fn test_distribute_work() { + let rem = 0..=90u64; + let part_size = 20; + let input = GetObjectInputBuilder::default(); + let (tx, rx) = async_channel::unbounded(); + + tokio::spawn(distribute_work(rem, input, part_size, 0, tx)); + + let mut chunks = Vec::new(); + while let Ok(chunk) = rx.recv().await { + chunks.push(chunk); + } + + let expected_ranges = vec![0..=19u64, 20..=39u64, 40..=59u64, 60..=79u64, 80..=90u64]; + + let actual_ranges: Vec> = + chunks.iter().map(|c| c.range.clone()).collect(); + + assert_eq!(expected_ranges, actual_ranges); + assert!(rx.is_closed()); + + for (i, chunk) in chunks.iter().enumerate() { + assert_eq!(i as u64, chunk.seq); + let expected_range_header = + header::Range::bytes_inclusive(*chunk.range.start(), *chunk.range.end()) + .to_string(); + + assert_eq!( + expected_range_header, + chunk.input.get_range().clone().expect("range header set") + ); + } + } +} diff --git a/aws-s3-transfer-manager/src/error.rs b/aws-s3-transfer-manager/src/error.rs new file mode 100644 index 0000000..0b3b757 --- /dev/null +++ b/aws-s3-transfer-manager/src/error.rs @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +use aws_smithy_types::byte_stream; +use std::io; + +// TODO(design): revisit errors + +/// Failed transfer result +#[derive(thiserror::Error, Debug)] +pub enum TransferError { + /// The request was invalid + #[error("invalid meta request: {0}")] + InvalidMetaRequest(String), + + /// A download failed + #[error("download failed")] + DownloadFailed(#[from] DownloadError), +} + +pub(crate) type GetObjectSdkError = ::aws_smithy_runtime_api::client::result::SdkError< + aws_sdk_s3::operation::get_object::GetObjectError, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, +>; +pub(crate) type HeadObjectSdkError = ::aws_smithy_runtime_api::client::result::SdkError< + aws_sdk_s3::operation::head_object::HeadObjectError, + ::aws_smithy_runtime_api::client::orchestrator::HttpResponse, +>; + +/// An error related to downloading an object +#[derive(thiserror::Error, Debug)] +pub enum DownloadError { + /// Discovery of object metadata failed + #[error(transparent)] + DiscoverFailed(SdkOperationError), + + /// A failure occurred fetching a single chunk of the overall object data + #[error("download chunk failed")] + ChunkFailed { + /// The underlying SDK error + source: SdkOperationError, + }, +} + +/// An underlying S3 SDK error +#[derive(thiserror::Error, Debug)] +pub enum SdkOperationError { + /// An error occurred invoking [aws_sdk_s3::Client::head_object] + #[error(transparent)] + HeadObject(#[from] HeadObjectSdkError), + + /// An error occurred invoking [aws_sdk_s3::Client::get_object] + #[error(transparent)] + GetObject(#[from] GetObjectSdkError), + + /// An error occurred reading the underlying data + #[error(transparent)] + ReadError(#[from] byte_stream::error::Error), + + /// An unknown IO error occurred carrying out the request + #[error(transparent)] + IoError(#[from] io::Error), +} + +// convenience to construct a TransferError from a chunk failure +pub(crate) fn chunk_failed>(e: E) -> TransferError { + DownloadError::ChunkFailed { source: e.into() }.into() +} + +pub(crate) fn invalid_meta_request(message: String) -> TransferError { + TransferError::InvalidMetaRequest(message) +} diff --git a/aws-s3-transfer-manager/src/io/error.rs b/aws-s3-transfer-manager/src/io/error.rs new file mode 100644 index 0000000..4b88133 --- /dev/null +++ b/aws-s3-transfer-manager/src/io/error.rs @@ -0,0 +1,79 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ +use std::error::Error as StdError; +use std::fmt; +use std::fmt::Formatter; +use std::io::{Error as StdIoError, ErrorKind as StdIoErrorKind}; +use tokio::task::JoinError; + +#[derive(Debug)] +pub(crate) enum ErrorKind { + UpperBoundSizeHintRequired, + OffsetGreaterThanFileSize, + TaskFailed(JoinError), + IoError(StdIoError), +} + +/// An I/O related error occurred +#[derive(Debug)] +pub struct Error { + kind: ErrorKind, +} + +impl Error { + pub(crate) fn upper_bound_size_hint_required() -> Error { + ErrorKind::UpperBoundSizeHintRequired.into() + } +} +impl From for Error { + fn from(kind: ErrorKind) -> Self { + Self { kind } + } +} + +impl From for Error { + fn from(err: StdIoError) -> Self { + ErrorKind::IoError(err).into() + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match &self.kind { + ErrorKind::UpperBoundSizeHintRequired => write!( + f, + "size hint upper bound (SizeHint::upper) is required but was None" + ), + ErrorKind::OffsetGreaterThanFileSize => write!( + f, + "offset must be less than or equal to file size but was greater than" + ), + ErrorKind::IoError(_) => write!(f, "I/O error"), + ErrorKind::TaskFailed(_) => write!(f, "task failed"), + } + } +} + +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match &self.kind { + ErrorKind::UpperBoundSizeHintRequired => None, + ErrorKind::OffsetGreaterThanFileSize => None, + ErrorKind::IoError(err) => Some(err as _), + ErrorKind::TaskFailed(err) => Some(err as _), + } + } +} +impl From for StdIoError { + fn from(err: Error) -> Self { + StdIoError::new(StdIoErrorKind::Other, err) + } +} + +impl From for Error { + fn from(value: JoinError) -> Self { + ErrorKind::TaskFailed(value).into() + } +} diff --git a/aws-s3-transfer-manager/src/io/mod.rs b/aws-s3-transfer-manager/src/io/mod.rs new file mode 100644 index 0000000..ebe9a60 --- /dev/null +++ b/aws-s3-transfer-manager/src/io/mod.rs @@ -0,0 +1,17 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +pub(crate) mod part_reader; +mod path_body; +mod stream; + +/// Error types related to I/O abstractions +pub mod error; +mod size_hint; + +// re-exports +pub use self::path_body::PathBodyBuilder; +pub use self::size_hint::SizeHint; +pub use self::stream::InputStream; diff --git a/aws-s3-transfer-manager/src/io/part_reader.rs b/aws-s3-transfer-manager/src/io/part_reader.rs new file mode 100644 index 0000000..338ea9e --- /dev/null +++ b/aws-s3-transfer-manager/src/io/part_reader.rs @@ -0,0 +1,337 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ +use std::cmp; +use std::ops::DerefMut; +use std::sync::Mutex; + +use bytes::{Buf, Bytes, BytesMut}; + +use crate::io::error::Error; +use crate::io::path_body::PathBody; +use crate::io::stream::RawInputStream; +use crate::io::InputStream; +use crate::MEBIBYTE; + +/// Builder for creating a `ReadPart` implementation. +#[derive(Debug)] +pub(crate) struct Builder { + stream: Option, + part_size: usize, +} + +impl Builder { + pub(crate) fn new() -> Self { + Self { + stream: None, + part_size: 5 * MEBIBYTE as usize, + } + } + + /// Set the input stream to read from. + pub(crate) fn stream(mut self, stream: InputStream) -> Self { + self.stream = Some(stream.inner); + self + } + + /// Set the target part size that should be used when reading data. + /// + /// All parts except for possibly the last one should be of this size. + pub(crate) fn part_size(mut self, part_size: usize) -> Self { + self.part_size = part_size; + self + } + + pub(crate) fn build(self) -> impl ReadPart { + let stream = self.stream.expect("input stream set"); + match stream { + RawInputStream::Buf(buf) => { + PartReader::Bytes(BytesPartReader::new(buf, self.part_size)) + } + RawInputStream::Fs(path_body) => { + PartReader::Fs(PathBodyPartReader::new(path_body, self.part_size)) + } + } + } +} + +#[derive(Debug)] +enum PartReader { + Bytes(BytesPartReader), + Fs(PathBodyPartReader), +} + +impl ReadPart for PartReader { + async fn next_part(&self) -> Result, Error> { + match self { + PartReader::Bytes(bytes) => bytes.next_part().await, + PartReader::Fs(path_body) => path_body.next_part().await, + } + } +} + +/// Data for a single part +pub(crate) struct PartData { + // 1-indexed + pub(crate) part_number: u64, + pub(crate) data: Bytes, +} + +/// The `ReadPart` trait allows for reading data from an `InputStream` and packaging the raw +/// data into `PartData` which carries additional metadata needed for uploading a part. +pub(crate) trait ReadPart { + /// Request the next "part" of data. + /// + /// When there is no more data readers should return `Ok(None)`. + /// NOTE: Implementations are allowed to return data in any order and consumers are + /// expected to order data by the part number. + fn next_part( + &self, + ) -> impl std::future::Future, Error>> + Send; +} + +#[derive(Debug)] +struct PartReaderState { + // current start offset + offset: u64, + // current part number + part_number: u64, + // total number of bytes remaining to be read + remaining: u64, +} + +impl PartReaderState { + /// Create a new `PartReaderState` + fn new(content_length: u64) -> Self { + Self { + offset: 0, + part_number: 1, + remaining: content_length, + } + } + + /// Set the initial offset to start reading from + fn with_offset(self, offset: u64) -> Self { + Self { offset, ..self } + } +} + +/// [ReadPart] implementation for in-memory input streams. +#[derive(Debug)] +struct BytesPartReader { + buf: Bytes, + part_size: usize, + state: Mutex, +} + +impl BytesPartReader { + fn new(buf: Bytes, part_size: usize) -> Self { + let content_length = buf.remaining() as u64; + Self { + buf, + part_size, + state: Mutex::new(PartReaderState::new(content_length)), // std Mutex + } + } +} + +impl ReadPart for BytesPartReader { + async fn next_part(&self) -> Result, Error> { + let mut state = self.state.lock().expect("lock valid"); + if state.remaining == 0 { + return Ok(None); + } + + let start = state.offset as usize; + let end = cmp::min(start + self.part_size, self.buf.len()); + let data = self.buf.slice(start..end); + let part_number = state.part_number; + state.part_number += 1; + state.offset += data.len() as u64; + state.remaining -= data.len() as u64; + let part = PartData { data, part_number }; + Ok(Some(part)) + } +} + +/// [ReadPart] implementation for path based input streams +#[derive(Debug)] +struct PathBodyPartReader { + body: PathBody, + part_size: usize, + state: Mutex, // std Mutex +} + +impl PathBodyPartReader { + fn new(body: PathBody, part_size: usize) -> Self { + let offset = body.offset; + let content_length = body.length; + Self { + body, + part_size, + state: Mutex::new(PartReaderState::new(content_length).with_offset(offset)), // std Mutex + } + } +} + +impl ReadPart for PathBodyPartReader { + async fn next_part(&self) -> Result, Error> { + let (offset, part_number, part_size) = { + let mut state = self.state.lock().expect("lock valid"); + if state.remaining == 0 { + return Ok(None); + } + let offset = state.offset; + let part_number = state.part_number; + + let part_size = cmp::min(self.part_size as u64, state.remaining); + state.offset += part_size; + state.part_number += 1; + state.remaining -= part_size; + + (offset, part_number, part_size) + }; + let path = self.body.path.clone(); + let handle = tokio::task::spawn_blocking(move || { + // TODO(aws-sdk-rust#1159) - replace allocation with memory pool + let mut dst = BytesMut::with_capacity(part_size as usize); + // we need to set the length so that the raw &[u8] slice has the correct + // size, we are guaranteed to read exactly part_size data from file on success + // FIXME(aws-sdk-rust#1159) - can we get rid of this use of unsafe? + unsafe { dst.set_len(dst.capacity()) } + file_util::read_file_chunk_sync(dst.deref_mut(), path, offset)?; + let data = dst.freeze(); + Ok::(PartData { data, part_number }) + }); + + handle.await?.map(Some) + } +} + +mod file_util { + #[cfg(unix)] + pub(super) use unix::read_file_chunk_sync; + #[cfg(windows)] + pub(super) use windows::read_file_chunk_sync; + + #[cfg(unix)] + mod unix { + use std::fs::File; + use std::io; + use std::os::unix::fs::FileExt; + use std::path::Path; + + pub(crate) fn read_file_chunk_sync( + dst: &mut [u8], + path: impl AsRef, + offset: u64, + ) -> Result<(), io::Error> { + let file = File::open(path)?; + file.read_exact_at(dst, offset) + } + } + + #[cfg(windows)] + mod windows { + use std::fs::File; + use std::io; + use std::io::{Read, Seek, SeekFrom}; + use std::path::Path; + + pub(crate) fn read_file_chunk_sync( + dst: &mut [u8], + path: impl AsRef, + offset: u64, + ) -> Result<(), io::Error> { + let mut file = File::open(path)?; + file.seek(SeekFrom::Start(offset))?; + file.read_exact(dst) + } + } +} + +#[cfg(test)] +mod test { + use std::io::Write; + + use bytes::{Buf, Bytes}; + use tempfile::NamedTempFile; + + use crate::io::part_reader::{PartData, Builder, ReadPart}; + use crate::io::InputStream; + + async fn collect_parts(reader: impl ReadPart) -> Vec { + let mut parts = Vec::new(); + let mut expected_part_number = 1; + while let Some(part) = reader.next_part().await.unwrap() { + assert_eq!(expected_part_number, part.part_number); + expected_part_number += 1; + parts.push(part); + } + parts + } + + #[tokio::test] + async fn test_bytes_part_reader() { + let data = Bytes::from("a lep is a ball, a tay is a hammer, a flix is a comb"); + let stream = InputStream::from(data.clone()); + let expected = data.chunks(5).collect::>(); + let reader = Builder::new().part_size(5).stream(stream).build(); + let parts = collect_parts(reader).await; + let actual = parts.iter().map(|p| p.data.chunk()).collect::>(); + + assert_eq!(expected, actual); + } + + async fn path_reader_test(limit: Option, offset: Option) { + let part_size = 5; + let mut tmp = NamedTempFile::new().unwrap(); + let mut data = Bytes::from("a lep is a ball, a tay is a hammer, a flix is a comb"); + tmp.write_all(data.chunk()).unwrap(); + + let mut builder = InputStream::read_from().path(tmp.path()); + if let Some(limit) = limit { + data.truncate(limit); + builder = builder.length((limit - offset.unwrap_or_default()) as u64); + } + + if let Some(offset) = offset { + data.advance(offset); + builder = builder.offset(offset as u64); + } + + let expected = data.chunks(part_size).collect::>(); + + let stream = builder.build().unwrap(); + let reader = Builder::new() + .part_size(part_size) + .stream(stream) + .build(); + + let parts = collect_parts(reader).await; + let actual = parts.iter().map(|p| p.data.chunk()).collect::>(); + + assert_eq!(expected, actual); + } + + #[tokio::test] + async fn test_path_part_reader() { + path_reader_test(None, None).await; + } + + #[tokio::test] + async fn test_path_part_reader_with_offset() { + path_reader_test(None, Some(8)).await; + } + + #[tokio::test] + async fn test_path_part_reader_with_explicit_length() { + path_reader_test(Some(12), None).await; + } + + #[tokio::test] + async fn test_path_part_reader_with_length_and_offset() { + path_reader_test(Some(23), Some(4)).await; + } +} diff --git a/aws-s3-transfer-manager/src/io/path_body.rs b/aws-s3-transfer-manager/src/io/path_body.rs new file mode 100644 index 0000000..0d64d0f --- /dev/null +++ b/aws-s3-transfer-manager/src/io/path_body.rs @@ -0,0 +1,209 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +use crate::io::error::{Error, ErrorKind}; +use crate::io::stream::{InputStream, RawInputStream}; +use std::fs; +use std::path::PathBuf; + +/// Input stream designed to wrap file based input. +#[derive(Debug)] +pub(super) struct PathBody { + // The path to the file + pub(super) path: PathBuf, + // The total number of bytes to read + pub(super) length: u64, + // The byte-offset to start reading from + pub(super) offset: u64, +} + +/// Builder for creating [`InputStream`](InputStream) from a file/path. +/// +/// ```no_run +/// # { +/// use aws_s3_transfer_manager::io::InputStream; +/// use std::path::Path; +/// +/// async fn input_stream_from_file() -> InputStream { +/// let stream = InputStream::read_from() +/// .path("docs/some-large-file.csv") +/// // Specify the length of the file used (skips an additional call to retrieve the size) +/// .build() +/// .expect("valid path"); +/// stream +/// } +/// # } +/// ``` +#[derive(Debug, Default)] +pub struct PathBodyBuilder { + path: Option, + length: Option, + offset: Option, +} + +impl PathBodyBuilder { + /// Create a new [`PathBodyBuilder`]. + /// + /// You must call [`path`](PathBodyBuilder::path) to specify what to read from. + pub fn new() -> Self { + Self::default() + } + + /// Sets the path to read from. + pub fn path(mut self, path: impl AsRef) -> Self { + self.path = Some(path.as_ref().to_path_buf()); + self + } + + /// Specify the offset to start reading from (in bytes) + /// + /// When used in conjunction with [`length`](PathBodyBuilder::length), allows for reading a single "chunk" of a file. + pub fn offset(mut self, offset: u64) -> Self { + self.offset = Some(offset); + self + } + + /// Specify the length to read (in bytes). + /// + /// By pre-specifying the length, this API skips an additional call to retrieve the size from file-system metadata. + /// + /// When used in conjunction with [`offset`](PathBodyBuilder::offset), allows for reading a single "chunk" of a file. + /// + ///
+ /// Setting the length manually will trigger no validation related to any offset provided or the actual size of + /// the file. This is an advanced setting mainly used to avoid an additional syscall if you know the + /// size of the file already. + ///
+ pub fn length(mut self, length: u64) -> Self { + self.length = Some(length); + self + } + + /// Returns a [`InputStream`] from this builder. + pub fn build(self) -> Result { + let path = self.path.expect("path set"); + let offset = self.offset.unwrap_or_default(); + + let length = match self.length { + None => { + // TODO(aws-sdk-rust#1159, design) - evaluate if we want build() to be async and to use tokio for stat() call (bytestream FsBuilder::build() is async) + let metadata = fs::metadata(path.clone())?; + let file_size = metadata.len(); + + if offset >= file_size { + return Err(ErrorKind::OffsetGreaterThanFileSize.into()); + } + + file_size - offset + } + Some(explicit) => explicit, + }; + + let body = PathBody { + path, + length, + offset, + }; + + let stream = InputStream { + inner: RawInputStream::Fs(body), + }; + + Ok(stream) + } +} + +#[cfg(test)] +mod test { + use std::io::Write; + use tempfile::NamedTempFile; + + use crate::io::{path_body::PathBodyBuilder, InputStream}; + + use super::PathBody; + + fn path_body(stream: &InputStream) -> &PathBody { + match &stream.inner { + crate::io::stream::RawInputStream::Buf(_) => panic!("unexpected inner body"), + crate::io::stream::RawInputStream::Fs(path_body) => path_body, + } + } + + #[test] + fn test_from_path() { + let mut tmp = NamedTempFile::new().unwrap(); + let content = "hello path body"; + tmp.write_all(content.as_bytes()).unwrap(); + + let stream = PathBodyBuilder::new().path(tmp.path()).build().unwrap(); + let body = path_body(&stream); + assert_eq!(0, body.offset); + assert_eq!(content.as_bytes().len() as u64, body.length); + } + + #[test] + fn test_explicit_content_length() { + let mut tmp = NamedTempFile::new().unwrap(); + + let stream = PathBodyBuilder::new() + .path(tmp.path()) + .length(64) + .build() + .unwrap(); + + let body = path_body(&stream); + assert_eq!(0, body.offset); + // we don't validate this + assert_eq!(64, body.length); + } + + #[test] + fn test_length_with_offset() { + let mut tmp = NamedTempFile::new().unwrap(); + let content = "hello path body"; + tmp.write_all(content.as_bytes()).unwrap(); + let offset = 5; + + let stream = PathBodyBuilder::new() + .path(tmp.path()) + .offset(offset) + .build() + .unwrap(); + + let body = path_body(&stream); + assert_eq!(offset, body.offset); + assert_eq!(content.len() as u64 - offset, body.length); + } + + #[test] + fn test_explicit_content_length_and_offset() { + let mut tmp = NamedTempFile::new().unwrap(); + + let stream = PathBodyBuilder::new() + .path(tmp.path()) + .length(64) + .offset(12) + .build() + .unwrap(); + + let body = path_body(&stream); + assert_eq!(12, body.offset); + assert_eq!(64, body.length); + } + + #[should_panic] + #[test] + fn test_invalid_offset() { + let mut tmp = NamedTempFile::new().unwrap(); + let content = "hello path body"; + tmp.write_all(content.as_bytes()).unwrap(); + + let stream = PathBodyBuilder::new() + .path(tmp.path()) + .offset(22) + .build() + .unwrap(); + } +} diff --git a/aws-s3-transfer-manager/src/io/size_hint.rs b/aws-s3-transfer-manager/src/io/size_hint.rs new file mode 100644 index 0000000..4309617 --- /dev/null +++ b/aws-s3-transfer-manager/src/io/size_hint.rs @@ -0,0 +1,41 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +/// A body size hint +#[derive(Debug, Clone, Default)] +pub struct SizeHint { + lower: u64, + upper: Option, +} + +impl SizeHint { + /// Set an exact size hint with upper and lower set to `size` bytes. + pub fn exact(size: u64) -> Self { + Self { + lower: size, + upper: Some(size), + } + } + + /// Set the lower bound on the body size + pub fn with_lower(self, lower: u64) -> Self { + Self { lower, ..self } + } + + /// Set the upper bound on the body size + pub fn with_upper(self, upper: Option) -> Self { + Self { upper, ..self } + } + + /// Get the lower bound of the body size + pub fn lower(&self) -> u64 { + self.lower + } + + /// Get the upper bound of the body size if known. + pub fn upper(&self) -> Option { + self.upper + } +} diff --git a/aws-s3-transfer-manager/src/io/stream.rs b/aws-s3-transfer-manager/src/io/stream.rs new file mode 100644 index 0000000..c346d82 --- /dev/null +++ b/aws-s3-transfer-manager/src/io/stream.rs @@ -0,0 +1,129 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +use std::default::Default; +use std::path::Path; + +use bytes::{Buf, Bytes}; + +use crate::io::error::Error; +use crate::io::path_body::PathBody; +use crate::io::path_body::PathBodyBuilder; +use crate::io::size_hint::SizeHint; + +/// Source of binary data. +/// +/// `InputStream` wraps a stream of data for ease of use. +#[derive(Debug)] +pub struct InputStream { + pub(super) inner: RawInputStream, +} + +impl InputStream { + /// Create a new `InputStream` from a static byte slice + pub fn from_static(bytes: &'static [u8]) -> Self { + let inner = RawInputStream::Buf(bytes.into()); + Self { inner } + } + + /// Return the bounds on the remaining length of the `InputStream` + pub fn size_hint(&self) -> SizeHint { + self.inner.size_hint() + } + + /// Returns a [`PathBodyBuilder`], allowing you to build a `InputStream` with + /// full control over how the file is read (eg. specifying the length of + /// the file or the starting offset to read from). + /// + /// ```no_run + /// # { + /// use aws_s3_transfer_manager::io::InputStream; + /// + /// async fn input_stream_from_file() -> InputStream { + /// let stream = InputStream::read_from() + /// .path("docs/some-large-file.csv") + /// // Specify the length of the file used (skips an additional call to retrieve the size) + /// .length(123_456) + /// .build() + /// .expect("valid path"); + /// stream + /// } + /// # } + /// ``` + pub fn read_from() -> PathBodyBuilder { + PathBodyBuilder::new() + } + + /// Create a new `InputStream` that reads data from a given `path`. + /// + /// ## Warning + /// The contents of the file MUST not change. The length & checksum of the file + /// will be cached. If the contents of the file change, the operation will almost certainly fail. + /// + /// Furthermore, a partial write MAY seek in the file and resume from the previous location. + /// + /// # Examples + /// ```no_run + /// use aws_s3_transfer_manager::io::InputStream; + /// use std::path::Path; + /// async fn make_stream() -> InputStream { + /// InputStream::from_path("docs/rows.csv").expect("file should be readable") + /// } + /// ``` + pub fn from_path(path: impl AsRef) -> Result { + Self::read_from().path(path).build() + } +} + +#[derive(Debug)] +pub(super) enum RawInputStream { + /// In-memory buffer to read from + Buf(Bytes), + /// File based input + Fs(PathBody), +} + +impl RawInputStream { + pub(super) fn size_hint(&self) -> SizeHint { + match self { + RawInputStream::Buf(bytes) => SizeHint::exact(bytes.remaining() as u64), + RawInputStream::Fs(path_body) => SizeHint::exact(path_body.length), + } + } +} + +impl Default for InputStream { + fn default() -> Self { + Self { + inner: RawInputStream::Buf(Bytes::default()), + } + } +} + +impl From for InputStream { + fn from(value: Bytes) -> Self { + Self { + inner: RawInputStream::Buf(value), + } + } +} + +impl From> for InputStream { + fn from(value: Vec) -> Self { + Self::from(Bytes::from(value)) + } +} + +impl From<&'static [u8]> for InputStream { + fn from(slice: &'static [u8]) -> InputStream { + Self::from(Bytes::from_static(slice)) + } +} + +impl From<&'static str> for InputStream { + fn from(slice: &'static str) -> InputStream { + Self::from(Bytes::from_static(slice.as_bytes())) + } +} diff --git a/aws-s3-transfer-manager/src/lib.rs b/aws-s3-transfer-manager/src/lib.rs new file mode 100644 index 0000000..9ba6988 --- /dev/null +++ b/aws-s3-transfer-manager/src/lib.rs @@ -0,0 +1,35 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +/* Automatically managed default lints */ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +/* End of automatically managed default lints */ + +//! AWS S3 Transfer Manager +//! +//! # Crate Features +//! +//! - `test-util`: Enables utilities for unit tests. DO NOT ENABLE IN PRODUCTION. + +#![warn( + missing_debug_implementations, + missing_docs, + rustdoc::missing_crate_level_docs, + unreachable_pub, + rust_2018_idioms +)] + +pub(crate) const MEBIBYTE: u64 = 1024 * 1024; +/// Abstractions for downloading objects from S3 +pub mod download; + +/// Error types emitted by `aws-s3-transfer-manager` +pub mod error; + +/// Types and helpers for I/O +pub mod io; + +/// Abstractions for downloading objects from Amazon S3 +pub mod upload; diff --git a/aws-s3-transfer-manager/src/upload.rs b/aws-s3-transfer-manager/src/upload.rs new file mode 100644 index 0000000..8290e5d --- /dev/null +++ b/aws-s3-transfer-manager/src/upload.rs @@ -0,0 +1,14 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +pub use crate::upload::request::UploadRequest; +pub use crate::upload::response::UploadResponse; + + +/// Request types for uploads to Amazon S3 +pub mod request; + +/// Response types for uploads to Amazon S3 +pub mod response; diff --git a/aws-s3-transfer-manager/src/upload/request.rs b/aws-s3-transfer-manager/src/upload/request.rs new file mode 100644 index 0000000..ec57f14 --- /dev/null +++ b/aws-s3-transfer-manager/src/upload/request.rs @@ -0,0 +1,1503 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +use crate::io::InputStream; +use std::fmt::Debug; +use std::mem; + +/// Request type for uploading a single object +#[non_exhaustive] +pub struct UploadRequest { + ///

The canned ACL to apply to the object. For more information, see Canned ACL in the Amazon S3 User Guide.

+ ///

When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. By default, all objects are private. Only the owner has full access control. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API in the Amazon S3 User Guide.

+ ///

If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a 400 error with the error code AccessControlListNotSupported. For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub acl: Option, + ///

Object data.

+ pub body: crate::io::InputStream, + ///

The bucket name to which the PUT action was initiated.

+ ///

Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.

+ ///

Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

+ ///

Access points and Object Lambda access points are not supported by directory buckets.

+ ///
+ ///

S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

+ pub bucket: Option, + ///

Can be used to specify caching behavior along the request/reply chain. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.

+ pub cache_control: Option, + ///

Specifies presentational information for the object. For more information, see https://www.rfc-editor.org/rfc/rfc6266#section-4.

+ pub content_disposition: Option, + ///

Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding.

+ pub content_encoding: Option, + ///

The language the content is in.

+ pub content_language: Option, + ///

Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length.

+ pub content_length: Option, + ///

The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

+ ///

The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

+ ///
+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub content_md5: Option, + ///

A standard MIME type describing the format of the contents. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type.

+ pub content_type: Option, + ///

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

+ ///

For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list:

+ ///
    + ///
  • + ///

    CRC32

  • + ///
  • + ///

    CRC32C

  • + ///
  • + ///

    SHA1

  • + ///
  • + ///

    SHA256

  • + ///
+ ///

For more information, see Checking object integrity in the Amazon S3 User Guide.

+ ///

If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm .

+ ///

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

+ ///
+ pub checksum_algorithm: Option, + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub checksum_crc32: Option, + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub checksum_crc32_c: Option, + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub checksum_sha1: Option, + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub checksum_sha256: Option, + ///

The date and time at which the object is no longer cacheable. For more information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3.

+ pub expires: Option<::aws_smithy_types::DateTime>, + ///

Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub grant_full_control: Option, + ///

Allows grantee to read the object data and its metadata.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub grant_read: Option, + ///

Allows grantee to read the object ACL.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub grant_read_acp: Option, + ///

Allows grantee to write the ACL for the applicable object.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub grant_write_acp: Option, + ///

Object key for which the PUT action was initiated.

+ pub key: Option, + ///

A map of metadata to store with the object in S3.

+ pub metadata: Option<::std::collections::HashMap>, + ///

The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

+ ///

General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see Using Server-Side Encryption in the Amazon S3 User Guide.

+ ///

Directory buckets - For directory buckets, only the server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) value is supported.

+ pub server_side_encryption: Option, + ///

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.

+ ///
    + ///
  • + ///

    For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects.

  • + ///
  • + ///

    Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.

  • + ///
+ ///
+ pub storage_class: Option, + ///

If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. For information about object metadata, see Object Key and Metadata in the Amazon S3 User Guide.

+ ///

In the following example, the request header sets the redirect to an object (anotherPage.html) in the same bucket:

+ ///

x-amz-website-redirect-location: /anotherPage.html

+ ///

In the following example, the request header sets the object redirect to another website:

+ ///

x-amz-website-redirect-location: http://www.example.com/

+ ///

For more information about website hosting in Amazon S3, see Hosting Websites on Amazon S3 and How to Configure Website Page Redirects in the Amazon S3 User Guide.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub website_redirect_location: Option, + ///

Specifies the algorithm to use when encrypting the object (for example, AES256).

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub sse_customer_algorithm: Option, + ///

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub sse_customer_key: Option, + ///

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub sse_customer_key_md5: Option, + ///

If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. If the KMS key does not exist in the same account that's issuing the command, you must use the full ARN and not just the ID.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub sse_kms_key_id: Option, + ///

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject or CopyObject operations on this object. This value must be explicitly added during CopyObject operations.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub sse_kms_encryption_context: Option, + ///

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

+ ///

Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub bucket_key_enabled: Option, + ///

Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see Downloading Objects in Requester Pays Buckets in the Amazon S3 User Guide.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub request_payer: Option, + ///

The tag-set for the object. The tag-set must be encoded as URL Query parameters. (For example, "Key1=Value1")

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub tagging: Option, + ///

The Object Lock mode that you want to apply to this object.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub object_lock_mode: Option, + ///

The date and time when you want this object's Object Lock to expire. Must be formatted as a timestamp parameter.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub object_lock_retain_until_date: Option<::aws_smithy_types::DateTime>, + ///

Specifies whether a legal hold will be applied to this object. For more information about S3 Object Lock, see Object Lock in the Amazon S3 User Guide.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub object_lock_legal_hold_status: Option, + ///

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

+ pub expected_bucket_owner: Option, +} + +impl UploadRequest { + /// Create a new builder for `UploadRequest` + pub fn builder() -> UploadRequestBuilder { + UploadRequestBuilder::default() + } + + /// Split the body from the request by taking it and replacing it with the default. + pub(crate) fn take_body(&mut self) -> InputStream { + mem::take(&mut self.body) + } + + ///

The canned ACL to apply to the object. For more information, see Canned ACL in the Amazon S3 User Guide.

+ ///

When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. By default, all objects are private. Only the owner has full access control. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API in the Amazon S3 User Guide.

+ ///

If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a 400 error with the error code AccessControlListNotSupported. For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn acl(&self) -> Option<&aws_sdk_s3::types::ObjectCannedAcl> { + self.acl.as_ref() + } + ///

Object data.

+ pub fn body(&self) -> &crate::io::InputStream { + &self.body + } + ///

The bucket name to which the PUT action was initiated.

+ ///

Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.

+ ///

Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

+ ///

Access points and Object Lambda access points are not supported by directory buckets.

+ ///
+ ///

S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

+ pub fn bucket(&self) -> Option<&str> { + self.bucket.as_deref() + } + ///

Can be used to specify caching behavior along the request/reply chain. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.

+ pub fn cache_control(&self) -> Option<&str> { + self.cache_control.as_deref() + } + ///

Specifies presentational information for the object. For more information, see https://www.rfc-editor.org/rfc/rfc6266#section-4.

+ pub fn content_disposition(&self) -> Option<&str> { + self.content_disposition.as_deref() + } + ///

Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding.

+ pub fn content_encoding(&self) -> Option<&str> { + self.content_encoding.as_deref() + } + ///

The language the content is in.

+ pub fn content_language(&self) -> Option<&str> { + self.content_language.as_deref() + } + ///

Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length.

+ pub fn content_length(&self) -> Option { + self.content_length + } + ///

The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

+ ///

The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

+ ///
+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn content_md5(&self) -> Option<&str> { + self.content_md5.as_deref() + } + ///

A standard MIME type describing the format of the contents. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type.

+ pub fn content_type(&self) -> Option<&str> { + self.content_type.as_deref() + } + ///

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

+ ///

For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list:

+ ///
    + ///
  • + ///

    CRC32

  • + ///
  • + ///

    CRC32C

  • + ///
  • + ///

    SHA1

  • + ///
  • + ///

    SHA256

  • + ///
+ ///

For more information, see Checking object integrity in the Amazon S3 User Guide.

+ ///

If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm .

+ ///

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

+ ///
+ pub fn checksum_algorithm(&self) -> Option<&aws_sdk_s3::types::ChecksumAlgorithm> { + self.checksum_algorithm.as_ref() + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_crc32(&self) -> Option<&str> { + self.checksum_crc32.as_deref() + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_crc32_c(&self) -> Option<&str> { + self.checksum_crc32_c.as_deref() + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_sha1(&self) -> Option<&str> { + self.checksum_sha1.as_deref() + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_sha256(&self) -> Option<&str> { + self.checksum_sha256.as_deref() + } + ///

The date and time at which the object is no longer cacheable. For more information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3.

+ pub fn expires(&self) -> Option<&::aws_smithy_types::DateTime> { + self.expires.as_ref() + } + ///

Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn grant_full_control(&self) -> Option<&str> { + self.grant_full_control.as_deref() + } + ///

Allows grantee to read the object data and its metadata.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn grant_read(&self) -> Option<&str> { + self.grant_read.as_deref() + } + ///

Allows grantee to read the object ACL.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn grant_read_acp(&self) -> Option<&str> { + self.grant_read_acp.as_deref() + } + ///

Allows grantee to write the ACL for the applicable object.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn grant_write_acp(&self) -> Option<&str> { + self.grant_write_acp.as_deref() + } + ///

Object key for which the PUT action was initiated.

+ pub fn key(&self) -> Option<&str> { + self.key.as_deref() + } + ///

A map of metadata to store with the object in S3.

+ pub fn metadata(&self) -> Option<&::std::collections::HashMap> { + self.metadata.as_ref() + } + ///

The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

+ ///

General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see Using Server-Side Encryption in the Amazon S3 User Guide.

+ ///

Directory buckets - For directory buckets, only the server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) value is supported.

+ pub fn server_side_encryption(&self) -> Option<&aws_sdk_s3::types::ServerSideEncryption> { + self.server_side_encryption.as_ref() + } + ///

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.

+ ///
    + ///
  • + ///

    For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects.

  • + ///
  • + ///

    Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.

  • + ///
+ ///
+ pub fn storage_class(&self) -> Option<&aws_sdk_s3::types::StorageClass> { + self.storage_class.as_ref() + } + ///

If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. For information about object metadata, see Object Key and Metadata in the Amazon S3 User Guide.

+ ///

In the following example, the request header sets the redirect to an object (anotherPage.html) in the same bucket:

+ ///

x-amz-website-redirect-location: /anotherPage.html

+ ///

In the following example, the request header sets the object redirect to another website:

+ ///

x-amz-website-redirect-location: http://www.example.com/

+ ///

For more information about website hosting in Amazon S3, see Hosting Websites on Amazon S3 and How to Configure Website Page Redirects in the Amazon S3 User Guide.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn website_redirect_location(&self) -> Option<&str> { + self.website_redirect_location.as_deref() + } + ///

Specifies the algorithm to use when encrypting the object (for example, AES256).

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_customer_algorithm(&self) -> Option<&str> { + self.sse_customer_algorithm.as_deref() + } + ///

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_customer_key(&self) -> Option<&str> { + self.sse_customer_key.as_deref() + } + ///

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_customer_key_md5(&self) -> Option<&str> { + self.sse_customer_key_md5.as_deref() + } + ///

If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. If the KMS key does not exist in the same account that's issuing the command, you must use the full ARN and not just the ID.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_kms_key_id(&self) -> Option<&str> { + self.sse_kms_key_id.as_deref() + } + ///

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject or CopyObject operations on this object. This value must be explicitly added during CopyObject operations.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_kms_encryption_context(&self) -> Option<&str> { + self.sse_kms_encryption_context.as_deref() + } + ///

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

+ ///

Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn bucket_key_enabled(&self) -> Option { + self.bucket_key_enabled + } + ///

Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see Downloading Objects in Requester Pays Buckets in the Amazon S3 User Guide.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn request_payer(&self) -> Option<&aws_sdk_s3::types::RequestPayer> { + self.request_payer.as_ref() + } + ///

The tag-set for the object. The tag-set must be encoded as URL Query parameters. (For example, "Key1=Value1")

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn tagging(&self) -> Option<&str> { + self.tagging.as_deref() + } + ///

The Object Lock mode that you want to apply to this object.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn object_lock_mode(&self) -> Option<&aws_sdk_s3::types::ObjectLockMode> { + self.object_lock_mode.as_ref() + } + ///

The date and time when you want this object's Object Lock to expire. Must be formatted as a timestamp parameter.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn object_lock_retain_until_date(&self) -> Option<&::aws_smithy_types::DateTime> { + self.object_lock_retain_until_date.as_ref() + } + ///

Specifies whether a legal hold will be applied to this object. For more information about S3 Object Lock, see Object Lock in the Amazon S3 User Guide.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn object_lock_legal_hold_status( + &self, + ) -> Option<&aws_sdk_s3::types::ObjectLockLegalHoldStatus> { + self.object_lock_legal_hold_status.as_ref() + } + ///

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

+ pub fn expected_bucket_owner(&self) -> Option<&str> { + self.expected_bucket_owner.as_deref() + } +} + +impl Debug for UploadRequest { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + let mut formatter = f.debug_struct("UploadRequest"); + formatter.field("acl", &self.acl); + formatter.field("body", &self.body); + formatter.field("bucket", &self.bucket); + formatter.field("cache_control", &self.cache_control); + formatter.field("content_disposition", &self.content_disposition); + formatter.field("content_encoding", &self.content_encoding); + formatter.field("content_language", &self.content_language); + formatter.field("content_length", &self.content_length); + formatter.field("content_md5", &self.content_md5); + formatter.field("content_type", &self.content_type); + formatter.field("checksum_algorithm", &self.checksum_algorithm); + formatter.field("checksum_crc32", &self.checksum_crc32); + formatter.field("checksum_crc32_c", &self.checksum_crc32_c); + formatter.field("checksum_sha1", &self.checksum_sha1); + formatter.field("checksum_sha256", &self.checksum_sha256); + formatter.field("expires", &self.expires); + formatter.field("grant_full_control", &self.grant_full_control); + formatter.field("grant_read", &self.grant_read); + formatter.field("grant_read_acp", &self.grant_read_acp); + formatter.field("grant_write_acp", &self.grant_write_acp); + formatter.field("key", &self.key); + formatter.field("metadata", &self.metadata); + formatter.field("server_side_encryption", &self.server_side_encryption); + formatter.field("storage_class", &self.storage_class); + formatter.field( + "website_redirect_location", + &self.website_redirect_location(), + ); + formatter.field("sse_customer_algorithm", &self.sse_customer_algorithm); + formatter.field("sse_customer_key", &"*** Sensitive Data Redacted ***"); + formatter.field("sse_customer_key_md5", &self.sse_customer_key_md5); + formatter.field("sse_kms_key_id", &"*** Sensitive Data Redacted ***"); + formatter.field( + "sse_kms_encryption_context", + &"*** Sensitive Data Redacted ***", + ); + formatter.field("bucket_key_enabled", &self.bucket_key_enabled); + formatter.field("request_payer", &self.request_payer); + formatter.field("tagging", &self.tagging); + formatter.field("object_lock_mode", &self.object_lock_mode); + formatter.field( + "object_lock_retain_until_date", + &self.object_lock_retain_until_date(), + ); + formatter.field( + "object_lock_legal_hold_status", + &self.object_lock_legal_hold_status(), + ); + formatter.field("expected_bucket_owner", &self.expected_bucket_owner); + formatter.finish() + } +} + +/// A builder for [`UploadRequest`]. +#[non_exhaustive] +#[derive(Default)] +pub struct UploadRequestBuilder { + pub(crate) acl: Option, + pub(crate) body: Option, + pub(crate) bucket: Option, + pub(crate) cache_control: Option, + pub(crate) content_disposition: Option, + pub(crate) content_encoding: Option, + pub(crate) content_language: Option, + pub(crate) content_length: Option, + pub(crate) content_md5: Option, + pub(crate) content_type: Option, + pub(crate) checksum_algorithm: Option, + pub(crate) checksum_crc32: Option, + pub(crate) checksum_crc32_c: Option, + pub(crate) checksum_sha1: Option, + pub(crate) checksum_sha256: Option, + pub(crate) expires: Option<::aws_smithy_types::DateTime>, + pub(crate) grant_full_control: Option, + pub(crate) grant_read: Option, + pub(crate) grant_read_acp: Option, + pub(crate) grant_write_acp: Option, + pub(crate) key: Option, + pub(crate) metadata: Option<::std::collections::HashMap>, + pub(crate) server_side_encryption: Option, + pub(crate) storage_class: Option, + pub(crate) website_redirect_location: Option, + pub(crate) sse_customer_algorithm: Option, + pub(crate) sse_customer_key: Option, + pub(crate) sse_customer_key_md5: Option, + pub(crate) sse_kms_key_id: Option, + pub(crate) sse_kms_encryption_context: Option, + pub(crate) bucket_key_enabled: Option, + pub(crate) request_payer: Option, + pub(crate) tagging: Option, + pub(crate) object_lock_mode: Option, + pub(crate) object_lock_retain_until_date: Option<::aws_smithy_types::DateTime>, + pub(crate) object_lock_legal_hold_status: Option, + pub(crate) expected_bucket_owner: Option, +} + +impl UploadRequestBuilder { + ///

The canned ACL to apply to the object. For more information, see Canned ACL in the Amazon S3 User Guide.

+ ///

When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. By default, all objects are private. Only the owner has full access control. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API in the Amazon S3 User Guide.

+ ///

If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a 400 error with the error code AccessControlListNotSupported. For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn acl(mut self, input: aws_sdk_s3::types::ObjectCannedAcl) -> Self { + self.acl = Some(input); + self + } + ///

The canned ACL to apply to the object. For more information, see Canned ACL in the Amazon S3 User Guide.

+ ///

When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. By default, all objects are private. Only the owner has full access control. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API in the Amazon S3 User Guide.

+ ///

If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a 400 error with the error code AccessControlListNotSupported. For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn set_acl(mut self, input: Option) -> Self { + self.acl = input; + self + } + ///

The canned ACL to apply to the object. For more information, see Canned ACL in the Amazon S3 User Guide.

+ ///

When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. By default, all objects are private. Only the owner has full access control. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API in the Amazon S3 User Guide.

+ ///

If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a 400 error with the error code AccessControlListNotSupported. For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn get_acl(&self) -> &Option { + &self.acl + } + ///

Object data.

+ pub fn body(mut self, input: crate::io::InputStream) -> Self { + self.body = Some(input); + self + } + ///

Object data.

+ pub fn set_body(mut self, input: Option) -> Self { + self.body = input; + self + } + + ///

Object data.

+ pub fn get_body(&self) -> &Option { + &self.body + } + + ///

The bucket name to which the PUT action was initiated.

+ ///

Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.

+ ///

Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

+ ///

Access points and Object Lambda access points are not supported by directory buckets.

+ ///
+ ///

S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

+ /// This field is required. + pub fn bucket(mut self, input: impl Into) -> Self { + self.bucket = Some(input.into()); + self + } + ///

The bucket name to which the PUT action was initiated.

+ ///

Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.

+ ///

Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

+ ///

Access points and Object Lambda access points are not supported by directory buckets.

+ ///
+ ///

S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

+ pub fn set_bucket(mut self, input: Option) -> Self { + self.bucket = input; + self + } + ///

The bucket name to which the PUT action was initiated.

+ ///

Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.

+ ///

Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

+ ///

Access points and Object Lambda access points are not supported by directory buckets.

+ ///
+ ///

S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.

+ pub fn get_bucket(&self) -> &Option { + &self.bucket + } + ///

Can be used to specify caching behavior along the request/reply chain. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.

+ pub fn cache_control(mut self, input: impl Into) -> Self { + self.cache_control = Some(input.into()); + self + } + ///

Can be used to specify caching behavior along the request/reply chain. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.

+ pub fn set_cache_control(mut self, input: Option) -> Self { + self.cache_control = input; + self + } + ///

Can be used to specify caching behavior along the request/reply chain. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.

+ pub fn get_cache_control(&self) -> &Option { + &self.cache_control + } + ///

Specifies presentational information for the object. For more information, see https://www.rfc-editor.org/rfc/rfc6266#section-4.

+ pub fn content_disposition(mut self, input: impl Into) -> Self { + self.content_disposition = Some(input.into()); + self + } + ///

Specifies presentational information for the object. For more information, see https://www.rfc-editor.org/rfc/rfc6266#section-4.

+ pub fn set_content_disposition(mut self, input: Option) -> Self { + self.content_disposition = input; + self + } + ///

Specifies presentational information for the object. For more information, see https://www.rfc-editor.org/rfc/rfc6266#section-4.

+ pub fn get_content_disposition(&self) -> &Option { + &self.content_disposition + } + ///

Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding.

+ pub fn content_encoding(mut self, input: impl Into) -> Self { + self.content_encoding = Some(input.into()); + self + } + ///

Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding.

+ pub fn set_content_encoding(mut self, input: Option) -> Self { + self.content_encoding = input; + self + } + ///

Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding.

+ pub fn get_content_encoding(&self) -> &Option { + &self.content_encoding + } + ///

The language the content is in.

+ pub fn content_language(mut self, input: impl Into) -> Self { + self.content_language = Some(input.into()); + self + } + ///

The language the content is in.

+ pub fn set_content_language(mut self, input: Option) -> Self { + self.content_language = input; + self + } + ///

The language the content is in.

+ pub fn get_content_language(&self) -> &Option { + &self.content_language + } + ///

Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length.

+ pub fn content_length(mut self, input: i64) -> Self { + self.content_length = Some(input); + self + } + ///

Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length.

+ pub fn set_content_length(mut self, input: Option) -> Self { + self.content_length = input; + self + } + ///

Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length.

+ pub fn get_content_length(&self) -> &Option { + &self.content_length + } + ///

The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

+ ///

The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

+ ///
+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn content_md5(mut self, input: impl Into) -> Self { + self.content_md5 = Some(input.into()); + self + } + ///

The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

+ ///

The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

+ ///
+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_content_md5(mut self, input: Option) -> Self { + self.content_md5 = input; + self + } + ///

The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

+ ///

The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

+ ///
+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_content_md5(&self) -> &Option { + &self.content_md5 + } + ///

A standard MIME type describing the format of the contents. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type.

+ pub fn content_type(mut self, input: impl Into) -> Self { + self.content_type = Some(input.into()); + self + } + ///

A standard MIME type describing the format of the contents. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type.

+ pub fn set_content_type(mut self, input: Option) -> Self { + self.content_type = input; + self + } + ///

A standard MIME type describing the format of the contents. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type.

+ pub fn get_content_type(&self) -> &Option { + &self.content_type + } + ///

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

+ ///

For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list:

+ ///
    + ///
  • + ///

    CRC32

  • + ///
  • + ///

    CRC32C

  • + ///
  • + ///

    SHA1

  • + ///
  • + ///

    SHA256

  • + ///
+ ///

For more information, see Checking object integrity in the Amazon S3 User Guide.

+ ///

If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm .

+ ///

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

+ ///
+ pub fn checksum_algorithm(mut self, input: aws_sdk_s3::types::ChecksumAlgorithm) -> Self { + self.checksum_algorithm = Some(input); + self + } + ///

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

+ ///

For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list:

+ ///
    + ///
  • + ///

    CRC32

  • + ///
  • + ///

    CRC32C

  • + ///
  • + ///

    SHA1

  • + ///
  • + ///

    SHA256

  • + ///
+ ///

For more information, see Checking object integrity in the Amazon S3 User Guide.

+ ///

If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm .

+ ///

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

+ ///
+ pub fn set_checksum_algorithm( + mut self, + input: Option, + ) -> Self { + self.checksum_algorithm = input; + self + } + ///

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

+ ///

For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list:

+ ///
    + ///
  • + ///

    CRC32

  • + ///
  • + ///

    CRC32C

  • + ///
  • + ///

    SHA1

  • + ///
  • + ///

    SHA256

  • + ///
+ ///

For more information, see Checking object integrity in the Amazon S3 User Guide.

+ ///

If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm .

+ ///

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

+ ///
+ pub fn get_checksum_algorithm(&self) -> &Option { + &self.checksum_algorithm + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_crc32(mut self, input: impl Into) -> Self { + self.checksum_crc32 = Some(input.into()); + self + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn set_checksum_crc32(mut self, input: Option) -> Self { + self.checksum_crc32 = input; + self + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn get_checksum_crc32(&self) -> &Option { + &self.checksum_crc32 + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_crc32_c(mut self, input: impl Into) -> Self { + self.checksum_crc32_c = Some(input.into()); + self + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn set_checksum_crc32_c(mut self, input: Option) -> Self { + self.checksum_crc32_c = input; + self + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn get_checksum_crc32_c(&self) -> &Option { + &self.checksum_crc32_c + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_sha1(mut self, input: impl Into) -> Self { + self.checksum_sha1 = Some(input.into()); + self + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn set_checksum_sha1(mut self, input: Option) -> Self { + self.checksum_sha1 = input; + self + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn get_checksum_sha1(&self) -> &Option { + &self.checksum_sha1 + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_sha256(mut self, input: impl Into) -> Self { + self.checksum_sha256 = Some(input.into()); + self + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn set_checksum_sha256(mut self, input: Option) -> Self { + self.checksum_sha256 = input; + self + } + ///

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn get_checksum_sha256(&self) -> &Option { + &self.checksum_sha256 + } + ///

The date and time at which the object is no longer cacheable. For more information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3.

+ pub fn expires(mut self, input: ::aws_smithy_types::DateTime) -> Self { + self.expires = Some(input); + self + } + ///

The date and time at which the object is no longer cacheable. For more information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3.

+ pub fn set_expires(mut self, input: Option<::aws_smithy_types::DateTime>) -> Self { + self.expires = input; + self + } + ///

The date and time at which the object is no longer cacheable. For more information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3.

+ pub fn get_expires(&self) -> &Option<::aws_smithy_types::DateTime> { + &self.expires + } + ///

Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn grant_full_control(mut self, input: impl Into) -> Self { + self.grant_full_control = Some(input.into()); + self + } + ///

Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn set_grant_full_control(mut self, input: Option) -> Self { + self.grant_full_control = input; + self + } + ///

Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn get_grant_full_control(&self) -> &Option { + &self.grant_full_control + } + ///

Allows grantee to read the object data and its metadata.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn grant_read(mut self, input: impl Into) -> Self { + self.grant_read = Some(input.into()); + self + } + ///

Allows grantee to read the object data and its metadata.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn set_grant_read(mut self, input: Option) -> Self { + self.grant_read = input; + self + } + ///

Allows grantee to read the object data and its metadata.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn get_grant_read(&self) -> &Option { + &self.grant_read + } + ///

Allows grantee to read the object ACL.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn grant_read_acp(mut self, input: impl Into) -> Self { + self.grant_read_acp = Some(input.into()); + self + } + ///

Allows grantee to read the object ACL.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn set_grant_read_acp(mut self, input: Option) -> Self { + self.grant_read_acp = input; + self + } + ///

Allows grantee to read the object ACL.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn get_grant_read_acp(&self) -> &Option { + &self.grant_read_acp + } + ///

Allows grantee to write the ACL for the applicable object.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn grant_write_acp(mut self, input: impl Into) -> Self { + self.grant_write_acp = Some(input.into()); + self + } + ///

Allows grantee to write the ACL for the applicable object.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn set_grant_write_acp(mut self, input: Option) -> Self { + self.grant_write_acp = input; + self + } + ///

Allows grantee to write the ACL for the applicable object.

+ ///
    + ///
  • + ///

    This functionality is not supported for directory buckets.

  • + ///
  • + ///

    This functionality is not supported for Amazon S3 on Outposts.

  • + ///
+ ///
+ pub fn get_grant_write_acp(&self) -> &Option { + &self.grant_write_acp + } + ///

Object key for which the PUT action was initiated.

+ /// This field is required. + pub fn key(mut self, input: impl Into) -> Self { + self.key = Some(input.into()); + self + } + ///

Object key for which the PUT action was initiated.

+ pub fn set_key(mut self, input: Option) -> Self { + self.key = input; + self + } + ///

Object key for which the PUT action was initiated.

+ pub fn get_key(&self) -> &Option { + &self.key + } + /// Adds a key-value pair to `metadata`. + /// + /// To override the contents of this collection use [`set_metadata`](Self::set_metadata). + /// + ///

A map of metadata to store with the object in S3.

+ pub fn metadata(mut self, k: impl Into, v: impl Into) -> Self { + let mut hash_map = self.metadata.unwrap_or_default(); + hash_map.insert(k.into(), v.into()); + self.metadata = Some(hash_map); + self + } + ///

A map of metadata to store with the object in S3.

+ pub fn set_metadata( + mut self, + input: Option<::std::collections::HashMap>, + ) -> Self { + self.metadata = input; + self + } + ///

A map of metadata to store with the object in S3.

+ pub fn get_metadata(&self) -> &Option<::std::collections::HashMap> { + &self.metadata + } + ///

The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

+ ///

General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see Using Server-Side Encryption in the Amazon S3 User Guide.

+ ///

Directory buckets - For directory buckets, only the server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) value is supported.

+ pub fn server_side_encryption( + mut self, + input: aws_sdk_s3::types::ServerSideEncryption, + ) -> Self { + self.server_side_encryption = Some(input); + self + } + ///

The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

+ ///

General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see Using Server-Side Encryption in the Amazon S3 User Guide.

+ ///

Directory buckets - For directory buckets, only the server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) value is supported.

+ pub fn set_server_side_encryption( + mut self, + input: Option, + ) -> Self { + self.server_side_encryption = input; + self + } + ///

The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

+ ///

General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see Using Server-Side Encryption in the Amazon S3 User Guide.

+ ///

Directory buckets - For directory buckets, only the server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) value is supported.

+ pub fn get_server_side_encryption(&self) -> &Option { + &self.server_side_encryption + } + ///

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.

+ ///
    + ///
  • + ///

    For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects.

  • + ///
  • + ///

    Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.

  • + ///
+ ///
+ pub fn storage_class(mut self, input: aws_sdk_s3::types::StorageClass) -> Self { + self.storage_class = Some(input); + self + } + ///

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.

+ ///
    + ///
  • + ///

    For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects.

  • + ///
  • + ///

    Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.

  • + ///
+ ///
+ pub fn set_storage_class(mut self, input: Option) -> Self { + self.storage_class = input; + self + } + ///

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.

+ ///
    + ///
  • + ///

    For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects.

  • + ///
  • + ///

    Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.

  • + ///
+ ///
+ pub fn get_storage_class(&self) -> &Option { + &self.storage_class + } + ///

If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. For information about object metadata, see Object Key and Metadata in the Amazon S3 User Guide.

+ ///

In the following example, the request header sets the redirect to an object (anotherPage.html) in the same bucket:

+ ///

x-amz-website-redirect-location: /anotherPage.html

+ ///

In the following example, the request header sets the object redirect to another website:

+ ///

x-amz-website-redirect-location: http://www.example.com/

+ ///

For more information about website hosting in Amazon S3, see Hosting Websites on Amazon S3 and How to Configure Website Page Redirects in the Amazon S3 User Guide.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn website_redirect_location(mut self, input: impl Into) -> Self { + self.website_redirect_location = Some(input.into()); + self + } + ///

If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. For information about object metadata, see Object Key and Metadata in the Amazon S3 User Guide.

+ ///

In the following example, the request header sets the redirect to an object (anotherPage.html) in the same bucket:

+ ///

x-amz-website-redirect-location: /anotherPage.html

+ ///

In the following example, the request header sets the object redirect to another website:

+ ///

x-amz-website-redirect-location: http://www.example.com/

+ ///

For more information about website hosting in Amazon S3, see Hosting Websites on Amazon S3 and How to Configure Website Page Redirects in the Amazon S3 User Guide.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_website_redirect_location(mut self, input: Option) -> Self { + self.website_redirect_location = input; + self + } + ///

If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. For information about object metadata, see Object Key and Metadata in the Amazon S3 User Guide.

+ ///

In the following example, the request header sets the redirect to an object (anotherPage.html) in the same bucket:

+ ///

x-amz-website-redirect-location: /anotherPage.html

+ ///

In the following example, the request header sets the object redirect to another website:

+ ///

x-amz-website-redirect-location: http://www.example.com/

+ ///

For more information about website hosting in Amazon S3, see Hosting Websites on Amazon S3 and How to Configure Website Page Redirects in the Amazon S3 User Guide.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_website_redirect_location(&self) -> &Option { + &self.website_redirect_location + } + ///

Specifies the algorithm to use when encrypting the object (for example, AES256).

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_customer_algorithm(mut self, input: impl Into) -> Self { + self.sse_customer_algorithm = Some(input.into()); + self + } + ///

Specifies the algorithm to use when encrypting the object (for example, AES256).

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_sse_customer_algorithm(mut self, input: Option) -> Self { + self.sse_customer_algorithm = input; + self + } + ///

Specifies the algorithm to use when encrypting the object (for example, AES256).

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_sse_customer_algorithm(&self) -> &Option { + &self.sse_customer_algorithm + } + ///

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_customer_key(mut self, input: impl Into) -> Self { + self.sse_customer_key = Some(input.into()); + self + } + ///

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_sse_customer_key(mut self, input: Option) -> Self { + self.sse_customer_key = input; + self + } + ///

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_sse_customer_key(&self) -> &Option { + &self.sse_customer_key + } + ///

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_customer_key_md5(mut self, input: impl Into) -> Self { + self.sse_customer_key_md5 = Some(input.into()); + self + } + ///

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_sse_customer_key_md5(mut self, input: Option) -> Self { + self.sse_customer_key_md5 = input; + self + } + ///

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_sse_customer_key_md5(&self) -> &Option { + &self.sse_customer_key_md5 + } + ///

If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. If the KMS key does not exist in the same account that's issuing the command, you must use the full ARN and not just the ID.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_kms_key_id(mut self, input: impl Into) -> Self { + self.sse_kms_key_id = Some(input.into()); + self + } + ///

If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. If the KMS key does not exist in the same account that's issuing the command, you must use the full ARN and not just the ID.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_sse_kms_key_id(mut self, input: Option) -> Self { + self.sse_kms_key_id = input; + self + } + ///

If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. If the KMS key does not exist in the same account that's issuing the command, you must use the full ARN and not just the ID.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_sse_kms_key_id(&self) -> &Option { + &self.sse_kms_key_id + } + ///

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject or CopyObject operations on this object. This value must be explicitly added during CopyObject operations.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_kms_encryption_context(mut self, input: impl Into) -> Self { + self.sse_kms_encryption_context = Some(input.into()); + self + } + ///

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject or CopyObject operations on this object. This value must be explicitly added during CopyObject operations.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_sse_kms_encryption_context(mut self, input: Option) -> Self { + self.sse_kms_encryption_context = input; + self + } + ///

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject or CopyObject operations on this object. This value must be explicitly added during CopyObject operations.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_sse_kms_encryption_context(&self) -> &Option { + &self.sse_kms_encryption_context + } + ///

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

+ ///

Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn bucket_key_enabled(mut self, input: bool) -> Self { + self.bucket_key_enabled = Some(input); + self + } + ///

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

+ ///

Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_bucket_key_enabled(mut self, input: Option) -> Self { + self.bucket_key_enabled = input; + self + } + ///

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

+ ///

Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_bucket_key_enabled(&self) -> &Option { + &self.bucket_key_enabled + } + ///

Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see Downloading Objects in Requester Pays Buckets in the Amazon S3 User Guide.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn request_payer(mut self, input: aws_sdk_s3::types::RequestPayer) -> Self { + self.request_payer = Some(input); + self + } + ///

Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see Downloading Objects in Requester Pays Buckets in the Amazon S3 User Guide.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_request_payer(mut self, input: Option) -> Self { + self.request_payer = input; + self + } + ///

Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see Downloading Objects in Requester Pays Buckets in the Amazon S3 User Guide.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_request_payer(&self) -> &Option { + &self.request_payer + } + ///

The tag-set for the object. The tag-set must be encoded as URL Query parameters. (For example, "Key1=Value1")

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn tagging(mut self, input: impl Into) -> Self { + self.tagging = Some(input.into()); + self + } + ///

The tag-set for the object. The tag-set must be encoded as URL Query parameters. (For example, "Key1=Value1")

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_tagging(mut self, input: Option) -> Self { + self.tagging = input; + self + } + ///

The tag-set for the object. The tag-set must be encoded as URL Query parameters. (For example, "Key1=Value1")

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_tagging(&self) -> &Option { + &self.tagging + } + ///

The Object Lock mode that you want to apply to this object.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn object_lock_mode(mut self, input: aws_sdk_s3::types::ObjectLockMode) -> Self { + self.object_lock_mode = Some(input); + self + } + ///

The Object Lock mode that you want to apply to this object.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_object_lock_mode( + mut self, + input: Option, + ) -> Self { + self.object_lock_mode = input; + self + } + ///

The Object Lock mode that you want to apply to this object.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_object_lock_mode(&self) -> &Option { + &self.object_lock_mode + } + ///

The date and time when you want this object's Object Lock to expire. Must be formatted as a timestamp parameter.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn object_lock_retain_until_date(mut self, input: ::aws_smithy_types::DateTime) -> Self { + self.object_lock_retain_until_date = Some(input); + self + } + ///

The date and time when you want this object's Object Lock to expire. Must be formatted as a timestamp parameter.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_object_lock_retain_until_date( + mut self, + input: Option<::aws_smithy_types::DateTime>, + ) -> Self { + self.object_lock_retain_until_date = input; + self + } + ///

The date and time when you want this object's Object Lock to expire. Must be formatted as a timestamp parameter.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_object_lock_retain_until_date(&self) -> &Option<::aws_smithy_types::DateTime> { + &self.object_lock_retain_until_date + } + ///

Specifies whether a legal hold will be applied to this object. For more information about S3 Object Lock, see Object Lock in the Amazon S3 User Guide.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn object_lock_legal_hold_status( + mut self, + input: aws_sdk_s3::types::ObjectLockLegalHoldStatus, + ) -> Self { + self.object_lock_legal_hold_status = Some(input); + self + } + ///

Specifies whether a legal hold will be applied to this object. For more information about S3 Object Lock, see Object Lock in the Amazon S3 User Guide.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_object_lock_legal_hold_status( + mut self, + input: Option, + ) -> Self { + self.object_lock_legal_hold_status = input; + self + } + ///

Specifies whether a legal hold will be applied to this object. For more information about S3 Object Lock, see Object Lock in the Amazon S3 User Guide.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_object_lock_legal_hold_status( + &self, + ) -> &Option { + &self.object_lock_legal_hold_status + } + ///

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

+ pub fn expected_bucket_owner(mut self, input: impl Into) -> Self { + self.expected_bucket_owner = Some(input.into()); + self + } + ///

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

+ pub fn set_expected_bucket_owner(mut self, input: Option) -> Self { + self.expected_bucket_owner = input; + self + } + ///

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

+ pub fn get_expected_bucket_owner(&self) -> &Option { + &self.expected_bucket_owner + } + + /// Consumes the builder and constructs a [`UploadRequest`] + // FIXME(aws-sdk-rust#1159): replace BuildError with our own type? + pub fn build(self) -> Result { + Ok(UploadRequest { + body: self.body.unwrap_or_default(), + acl: self.acl, + bucket: self.bucket, + cache_control: self.cache_control, + content_disposition: self.content_disposition, + content_encoding: self.content_encoding, + content_language: self.content_language, + content_length: self.content_length, + content_md5: self.content_md5, + content_type: self.content_type, + checksum_algorithm: self.checksum_algorithm, + checksum_crc32: self.checksum_crc32, + checksum_crc32_c: self.checksum_crc32_c, + checksum_sha1: self.checksum_sha1, + checksum_sha256: self.checksum_sha256, + expires: self.expires, + grant_full_control: self.grant_full_control, + grant_read: self.grant_read, + grant_read_acp: self.grant_read_acp, + grant_write_acp: self.grant_write_acp, + key: self.key, + metadata: self.metadata, + server_side_encryption: self.server_side_encryption, + storage_class: self.storage_class, + website_redirect_location: self.website_redirect_location, + sse_customer_algorithm: self.sse_customer_algorithm, + sse_customer_key: self.sse_customer_key, + sse_customer_key_md5: self.sse_customer_key_md5, + sse_kms_key_id: self.sse_kms_key_id, + sse_kms_encryption_context: self.sse_kms_encryption_context, + bucket_key_enabled: self.bucket_key_enabled, + request_payer: self.request_payer, + tagging: self.tagging, + object_lock_mode: self.object_lock_mode, + object_lock_retain_until_date: self.object_lock_retain_until_date, + object_lock_legal_hold_status: self.object_lock_legal_hold_status, + expected_bucket_owner: self.expected_bucket_owner, + }) + } +} + +impl Debug for UploadRequestBuilder { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + let mut formatter = f.debug_struct("UploadRequestBuilder"); + formatter.field("acl", &self.acl); + formatter.field("body", &self.body); + formatter.field("bucket", &self.bucket); + formatter.field("cache_control", &self.cache_control); + formatter.field("content_disposition", &self.content_disposition); + formatter.field("content_encoding", &self.content_encoding); + formatter.field("content_language", &self.content_language); + formatter.field("content_length", &self.content_length); + formatter.field("content_md5", &self.content_md5); + formatter.field("content_type", &self.content_type); + formatter.field("checksum_algorithm", &self.checksum_algorithm); + formatter.field("checksum_crc32", &self.checksum_crc32); + formatter.field("checksum_crc32_c", &self.checksum_crc32_c); + formatter.field("checksum_sha1", &self.checksum_sha1); + formatter.field("checksum_sha256", &self.checksum_sha256); + formatter.field("expires", &self.expires); + formatter.field("grant_full_control", &self.grant_full_control); + formatter.field("grant_read", &self.grant_read); + formatter.field("grant_read_acp", &self.grant_read_acp); + formatter.field("grant_write_acp", &self.grant_write_acp); + formatter.field("key", &self.key); + formatter.field("metadata", &self.metadata); + formatter.field("server_side_encryption", &self.server_side_encryption); + formatter.field("storage_class", &self.storage_class); + formatter.field("website_redirect_location", &self.website_redirect_location); + formatter.field("sse_customer_algorithm", &self.sse_customer_algorithm); + formatter.field("sse_customer_key", &"*** Sensitive Data Redacted ***"); + formatter.field("sse_customer_key_md5", &self.sse_customer_key_md5); + formatter.field("sse_kms_key_id", &"*** Sensitive Data Redacted ***"); + formatter.field( + "sse_kms_encryption_context", + &"*** Sensitive Data Redacted ***", + ); + formatter.field("bucket_key_enabled", &self.bucket_key_enabled); + formatter.field("request_payer", &self.request_payer); + formatter.field("tagging", &self.tagging); + formatter.field("object_lock_mode", &self.object_lock_mode); + formatter.field( + "object_lock_retain_until_date", + &self.object_lock_retain_until_date, + ); + formatter.field( + "object_lock_legal_hold_status", + &self.object_lock_legal_hold_status, + ); + formatter.field("expected_bucket_owner", &self.expected_bucket_owner); + formatter.finish() + } +} diff --git a/aws-s3-transfer-manager/src/upload/response.rs b/aws-s3-transfer-manager/src/upload/response.rs new file mode 100644 index 0000000..904f89c --- /dev/null +++ b/aws-s3-transfer-manager/src/upload/response.rs @@ -0,0 +1,565 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +use aws_sdk_s3::operation::create_multipart_upload::CreateMultipartUploadOutput; +use std::fmt::{Debug, Formatter}; + +/// Common response fields for uploading an object to Amazon S3 +#[non_exhaustive] +#[derive(Clone, PartialEq)] +pub struct UploadResponse { + ///

If the expiration is configured for the object (see PutBucketLifecycleConfiguration) in the Amazon S3 User Guide, the response includes this header. It includes the expiry-date and rule-id key-value pairs that provide information about object expiration. The value of the rule-id is URL-encoded.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub expiration: Option, + + ///

Entity tag for the uploaded object.

+ ///

General purpose buckets - To ensure that data is not corrupted traversing the network, for objects where the ETag is the MD5 digest of the object, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

+ ///

Directory buckets - The ETag for the object in a directory bucket isn't the MD5 digest of the object.

+ pub e_tag: Option, + + ///

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub checksum_crc32: Option, + + ///

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub checksum_crc32_c: Option, + + ///

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub checksum_sha1: Option, + + ///

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub checksum_sha256: Option, + + ///

The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

+ ///

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

+ ///
+ pub server_side_encryption: Option, + + ///

Version ID of the object.

+ ///

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub version_id: Option, + + ///

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub sse_customer_algorithm: Option, + + ///

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub sse_customer_key_md5: Option, + + ///

If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub sse_kms_key_id: Option, + + ///

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject or CopyObject operations on this object.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub sse_kms_encryption_context: Option, + + ///

Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub bucket_key_enabled: Option, + + ///

If present, indicates that the requester was successfully charged for the request.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub request_charged: Option, + + ///

ID for the initiated multipart upload.

+ /// This will not be set for requests that are not split into multipart uploads. + pub upload_id: Option, +} + +impl UploadResponse { + ///

If the expiration is configured for the object (see PutBucketLifecycleConfiguration) in the Amazon S3 User Guide, the response includes this header. It includes the expiry-date and rule-id key-value pairs that provide information about object expiration. The value of the rule-id is URL-encoded.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn expiration(&self) -> Option<&str> { + self.expiration.as_deref() + } + ///

Entity tag for the uploaded object.

+ ///

General purpose buckets - To ensure that data is not corrupted traversing the network, for objects where the ETag is the MD5 digest of the object, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

+ ///

Directory buckets - The ETag for the object in a directory bucket isn't the MD5 digest of the object.

+ pub fn e_tag(&self) -> Option<&str> { + self.e_tag.as_deref() + } + ///

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_crc32(&self) -> Option<&str> { + self.checksum_crc32.as_deref() + } + ///

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_crc32_c(&self) -> Option<&str> { + self.checksum_crc32_c.as_deref() + } + ///

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_sha1(&self) -> Option<&str> { + self.checksum_sha1.as_deref() + } + ///

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_sha256(&self) -> Option<&str> { + self.checksum_sha256.as_deref() + } + ///

The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

+ ///

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

+ ///
+ pub fn server_side_encryption(&self) -> Option<&aws_sdk_s3::types::ServerSideEncryption> { + self.server_side_encryption.as_ref() + } + ///

Version ID of the object.

+ ///

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn version_id(&self) -> Option<&str> { + self.version_id.as_deref() + } + ///

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_customer_algorithm(&self) -> Option<&str> { + self.sse_customer_algorithm.as_deref() + } + ///

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_customer_key_md5(&self) -> Option<&str> { + self.sse_customer_key_md5.as_deref() + } + ///

If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_kms_key_id(&self) -> Option<&str> { + self.sse_kms_key_id.as_deref() + } + ///

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject or CopyObject operations on this object.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_kms_encryption_context(&self) -> Option<&str> { + self.sse_kms_encryption_context.as_deref() + } + ///

Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn bucket_key_enabled(&self) -> Option { + self.bucket_key_enabled + } + ///

If present, indicates that the requester was successfully charged for the request.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn request_charged(&self) -> Option<&aws_sdk_s3::types::RequestCharged> { + self.request_charged.as_ref() + } + + ///

ID for the initiated multipart upload.

+ /// This will not be set for requests that are not split into multipart uploads. + pub fn upload_id(&self) -> Option<&String> { + self.upload_id.as_ref() + } +} + +impl Debug for UploadResponse { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let mut formatter = f.debug_struct("UploadResponse"); + formatter.field("expiration", &self.expiration); + formatter.field("e_tag", &self.e_tag); + formatter.field("checksum_crc32", &self.checksum_crc32); + formatter.field("checksum_crc32_c", &self.checksum_crc32_c); + formatter.field("checksum_sha1", &self.checksum_sha1); + formatter.field("checksum_sha256", &self.checksum_sha256); + formatter.field("server_side_encryption", &self.server_side_encryption); + formatter.field("version_id", &self.version_id); + formatter.field("sse_customer_algorithm", &self.sse_customer_algorithm); + formatter.field("sse_customer_key_md5", &self.sse_customer_key_md5); + formatter.field("sse_kms_key_id", &"*** Sensitive Data Redacted ***"); + formatter.field( + "sse_kms_encryption_context", + &"*** Sensitive Data Redacted ***", + ); + formatter.field("bucket_key_enabled", &self.bucket_key_enabled); + formatter.field("request_charged", &self.request_charged); + formatter.field("upload_id", &self.upload_id); + formatter.finish() + } +} + +/// A builder for [`UploadResponse`]. +#[non_exhaustive] +#[derive(Default)] +pub struct UploadResponseBuilder { + pub(crate) expiration: Option, + pub(crate) e_tag: Option, + pub(crate) checksum_crc32: Option, + pub(crate) checksum_crc32_c: Option, + pub(crate) checksum_sha1: Option, + pub(crate) checksum_sha256: Option, + pub(crate) server_side_encryption: Option, + pub(crate) version_id: Option, + pub(crate) sse_customer_algorithm: Option, + pub(crate) sse_customer_key_md5: Option, + pub(crate) sse_kms_key_id: Option, + pub(crate) sse_kms_encryption_context: Option, + pub(crate) bucket_key_enabled: Option, + pub(crate) request_charged: Option, + pub(crate) upload_id: Option, +} + +impl UploadResponseBuilder { + ///

If the expiration is configured for the object (see PutBucketLifecycleConfiguration) in the Amazon S3 User Guide, the response includes this header. It includes the expiry-date and rule-id key-value pairs that provide information about object expiration. The value of the rule-id is URL-encoded.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn expiration(mut self, input: impl Into) -> Self { + self.expiration = Some(input.into()); + self + } + ///

If the expiration is configured for the object (see PutBucketLifecycleConfiguration) in the Amazon S3 User Guide, the response includes this header. It includes the expiry-date and rule-id key-value pairs that provide information about object expiration. The value of the rule-id is URL-encoded.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_expiration(mut self, input: Option) -> Self { + self.expiration = input; + self + } + ///

If the expiration is configured for the object (see PutBucketLifecycleConfiguration) in the Amazon S3 User Guide, the response includes this header. It includes the expiry-date and rule-id key-value pairs that provide information about object expiration. The value of the rule-id is URL-encoded.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_expiration(&self) -> &Option { + &self.expiration + } + ///

Entity tag for the uploaded object.

+ ///

General purpose buckets - To ensure that data is not corrupted traversing the network, for objects where the ETag is the MD5 digest of the object, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

+ ///

Directory buckets - The ETag for the object in a directory bucket isn't the MD5 digest of the object.

+ pub fn e_tag(mut self, input: impl Into) -> Self { + self.e_tag = Some(input.into()); + self + } + ///

Entity tag for the uploaded object.

+ ///

General purpose buckets - To ensure that data is not corrupted traversing the network, for objects where the ETag is the MD5 digest of the object, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

+ ///

Directory buckets - The ETag for the object in a directory bucket isn't the MD5 digest of the object.

+ pub fn set_e_tag(mut self, input: Option) -> Self { + self.e_tag = input; + self + } + ///

Entity tag for the uploaded object.

+ ///

General purpose buckets - To ensure that data is not corrupted traversing the network, for objects where the ETag is the MD5 digest of the object, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

+ ///

Directory buckets - The ETag for the object in a directory bucket isn't the MD5 digest of the object.

+ pub fn get_e_tag(&self) -> &Option { + &self.e_tag + } + ///

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_crc32(mut self, input: impl Into) -> Self { + self.checksum_crc32 = Some(input.into()); + self + } + ///

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn set_checksum_crc32(mut self, input: Option) -> Self { + self.checksum_crc32 = input; + self + } + ///

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn get_checksum_crc32(&self) -> &Option { + &self.checksum_crc32 + } + ///

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_crc32_c(mut self, input: impl Into) -> Self { + self.checksum_crc32_c = Some(input.into()); + self + } + ///

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn set_checksum_crc32_c(mut self, input: Option) -> Self { + self.checksum_crc32_c = input; + self + } + ///

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn get_checksum_crc32_c(&self) -> &Option { + &self.checksum_crc32_c + } + ///

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_sha1(mut self, input: impl Into) -> Self { + self.checksum_sha1 = Some(input.into()); + self + } + ///

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn set_checksum_sha1(mut self, input: Option) -> Self { + self.checksum_sha1 = input; + self + } + ///

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn get_checksum_sha1(&self) -> &Option { + &self.checksum_sha1 + } + ///

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn checksum_sha256(mut self, input: impl Into) -> Self { + self.checksum_sha256 = Some(input.into()); + self + } + ///

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn set_checksum_sha256(mut self, input: Option) -> Self { + self.checksum_sha256 = input; + self + } + ///

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.

+ pub fn get_checksum_sha256(&self) -> &Option { + &self.checksum_sha256 + } + ///

The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

+ ///

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

+ ///
+ pub fn server_side_encryption( + mut self, + input: aws_sdk_s3::types::ServerSideEncryption, + ) -> Self { + self.server_side_encryption = Some(input); + self + } + ///

The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

+ ///

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

+ ///
+ pub fn set_server_side_encryption( + mut self, + input: Option, + ) -> Self { + self.server_side_encryption = input; + self + } + ///

The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

+ ///

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

+ ///
+ pub fn get_server_side_encryption(&self) -> &Option { + &self.server_side_encryption + } + ///

Version ID of the object.

+ ///

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn version_id(mut self, input: impl Into) -> Self { + self.version_id = Some(input.into()); + self + } + ///

Version ID of the object.

+ ///

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_version_id(mut self, input: Option) -> Self { + self.version_id = input; + self + } + ///

Version ID of the object.

+ ///

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_version_id(&self) -> &Option { + &self.version_id + } + ///

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_customer_algorithm(mut self, input: impl Into) -> Self { + self.sse_customer_algorithm = Some(input.into()); + self + } + ///

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_sse_customer_algorithm(mut self, input: Option) -> Self { + self.sse_customer_algorithm = input; + self + } + ///

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_sse_customer_algorithm(&self) -> &Option { + &self.sse_customer_algorithm + } + ///

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_customer_key_md5(mut self, input: impl Into) -> Self { + self.sse_customer_key_md5 = Some(input.into()); + self + } + ///

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_sse_customer_key_md5(mut self, input: Option) -> Self { + self.sse_customer_key_md5 = input; + self + } + ///

If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_sse_customer_key_md5(&self) -> &Option { + &self.sse_customer_key_md5 + } + ///

If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_kms_key_id(mut self, input: impl Into) -> Self { + self.sse_kms_key_id = Some(input.into()); + self + } + ///

If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_sse_kms_key_id(mut self, input: Option) -> Self { + self.sse_kms_key_id = input; + self + } + ///

If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_sse_kms_key_id(&self) -> &Option { + &self.sse_kms_key_id + } + ///

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject or CopyObject operations on this object.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn sse_kms_encryption_context(mut self, input: impl Into) -> Self { + self.sse_kms_encryption_context = Some(input.into()); + self + } + ///

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject or CopyObject operations on this object.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_sse_kms_encryption_context(mut self, input: Option) -> Self { + self.sse_kms_encryption_context = input; + self + } + ///

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject or CopyObject operations on this object.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_sse_kms_encryption_context(&self) -> &Option { + &self.sse_kms_encryption_context + } + ///

Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn bucket_key_enabled(mut self, input: bool) -> Self { + self.bucket_key_enabled = Some(input); + self + } + ///

Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_bucket_key_enabled(mut self, input: Option) -> Self { + self.bucket_key_enabled = input; + self + } + ///

Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_bucket_key_enabled(&self) -> &Option { + &self.bucket_key_enabled + } + ///

If present, indicates that the requester was successfully charged for the request.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn request_charged(mut self, input: aws_sdk_s3::types::RequestCharged) -> Self { + self.request_charged = Some(input); + self + } + ///

If present, indicates that the requester was successfully charged for the request.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn set_request_charged(mut self, input: Option) -> Self { + self.request_charged = input; + self + } + ///

If present, indicates that the requester was successfully charged for the request.

+ ///

This functionality is not supported for directory buckets.

+ ///
+ pub fn get_request_charged(&self) -> &Option { + &self.request_charged + } + + ///

ID for the initiated multipart upload.

+ pub fn upload_id(mut self, input: impl Into) -> Self { + self.upload_id = Some(input.into()); + self + } + ///

ID for the initiated multipart upload.

+ pub fn set_upload_id(mut self, input: Option) -> Self { + self.upload_id = input; + self + } + ///

ID for the initiated multipart upload.

+ pub fn get_upload_id(&self) -> &Option { + &self.upload_id + } + + /// Consumes the builder and constructs a [`UploadResponse`] + // FIXME(aws-sdk-rust#1159): replace BuildError with our own type? + pub fn build(self) -> Result { + Ok(UploadResponse { + expiration: self.expiration, + e_tag: self.e_tag, + checksum_crc32: self.checksum_crc32, + checksum_crc32_c: self.checksum_crc32_c, + checksum_sha1: self.checksum_sha1, + checksum_sha256: self.checksum_sha256, + server_side_encryption: self.server_side_encryption, + version_id: self.version_id, + sse_customer_algorithm: self.sse_customer_algorithm, + sse_customer_key_md5: self.sse_customer_key_md5, + sse_kms_key_id: self.sse_kms_key_id, + sse_kms_encryption_context: self.sse_kms_encryption_context, + bucket_key_enabled: self.bucket_key_enabled, + request_charged: self.request_charged, + upload_id: self.upload_id, + }) + } +} + +impl Debug for UploadResponseBuilder { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + let mut formatter = f.debug_struct("UploadResponseBuilder"); + formatter.field("expiration", &self.expiration); + formatter.field("e_tag", &self.e_tag); + formatter.field("checksum_crc32", &self.checksum_crc32); + formatter.field("checksum_crc32_c", &self.checksum_crc32_c); + formatter.field("checksum_sha1", &self.checksum_sha1); + formatter.field("checksum_sha256", &self.checksum_sha256); + formatter.field("server_side_encryption", &self.server_side_encryption); + formatter.field("version_id", &self.version_id); + formatter.field("sse_customer_algorithm", &self.sse_customer_algorithm); + formatter.field("sse_customer_key_md5", &self.sse_customer_key_md5); + formatter.field("sse_kms_key_id", &"*** Sensitive Data Redacted ***"); + formatter.field( + "sse_kms_encryption_context", + &"*** Sensitive Data Redacted ***", + ); + formatter.field("bucket_key_enabled", &self.bucket_key_enabled); + formatter.field("request_charged", &self.request_charged); + formatter.field("upload_id", &self.upload_id); + formatter.finish() + } +} + +impl From for UploadResponseBuilder { + fn from(value: CreateMultipartUploadOutput) -> Self { + UploadResponseBuilder { + upload_id: value.upload_id, + server_side_encryption: value.server_side_encryption, + sse_customer_algorithm: value.sse_customer_algorithm, + sse_customer_key_md5: value.sse_customer_key_md5, + sse_kms_key_id: value.ssekms_key_id, + sse_kms_encryption_context: value.ssekms_encryption_context, + bucket_key_enabled: value.bucket_key_enabled, + request_charged: value.request_charged, + // remaining fields not available from CreateMultipartUploadOutput + checksum_sha256: None, + expiration: None, + e_tag: None, + checksum_crc32: None, + checksum_crc32_c: None, + checksum_sha1: None, + version_id: None, + // TODO(aws-sdk-rust#1159): abort_rule_id and abort_date seem unique to CreateMultipartUploadOutput + } + } +}